xref: /openbmc/qemu/tests/unit/test-bdrv-drain.c (revision f3bbc53dc56c5d410f76442da6ad15ec8f9439fc)
1da668aa1SThomas Huth /*
2da668aa1SThomas Huth  * Block node draining tests
3da668aa1SThomas Huth  *
4da668aa1SThomas Huth  * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com>
5da668aa1SThomas Huth  *
6da668aa1SThomas Huth  * Permission is hereby granted, free of charge, to any person obtaining a copy
7da668aa1SThomas Huth  * of this software and associated documentation files (the "Software"), to deal
8da668aa1SThomas Huth  * in the Software without restriction, including without limitation the rights
9da668aa1SThomas Huth  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10da668aa1SThomas Huth  * copies of the Software, and to permit persons to whom the Software is
11da668aa1SThomas Huth  * furnished to do so, subject to the following conditions:
12da668aa1SThomas Huth  *
13da668aa1SThomas Huth  * The above copyright notice and this permission notice shall be included in
14da668aa1SThomas Huth  * all copies or substantial portions of the Software.
15da668aa1SThomas Huth  *
16da668aa1SThomas Huth  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17da668aa1SThomas Huth  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18da668aa1SThomas Huth  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19da668aa1SThomas Huth  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20da668aa1SThomas Huth  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21da668aa1SThomas Huth  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22da668aa1SThomas Huth  * THE SOFTWARE.
23da668aa1SThomas Huth  */
24da668aa1SThomas Huth 
25da668aa1SThomas Huth #include "qemu/osdep.h"
26e2c1c34fSMarkus Armbruster #include "block/block_int.h"
27da668aa1SThomas Huth #include "block/blockjob_int.h"
28da668aa1SThomas Huth #include "sysemu/block-backend.h"
29da668aa1SThomas Huth #include "qapi/error.h"
30da668aa1SThomas Huth #include "qemu/main-loop.h"
31da668aa1SThomas Huth #include "iothread.h"
32da668aa1SThomas Huth 
33da668aa1SThomas Huth static QemuEvent done_event;
34da668aa1SThomas Huth 
35da668aa1SThomas Huth typedef struct BDRVTestState {
36da668aa1SThomas Huth     int drain_count;
37da668aa1SThomas Huth     AioContext *bh_indirection_ctx;
38da668aa1SThomas Huth     bool sleep_in_drain_begin;
39da668aa1SThomas Huth } BDRVTestState;
40da668aa1SThomas Huth 
417bce1c29SKevin Wolf static void coroutine_fn sleep_in_drain_begin(void *opaque)
427bce1c29SKevin Wolf {
437bce1c29SKevin Wolf     BlockDriverState *bs = opaque;
447bce1c29SKevin Wolf 
457bce1c29SKevin Wolf     qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
467bce1c29SKevin Wolf     bdrv_dec_in_flight(bs);
477bce1c29SKevin Wolf }
487bce1c29SKevin Wolf 
495e8ac217SKevin Wolf static void bdrv_test_drain_begin(BlockDriverState *bs)
50da668aa1SThomas Huth {
51da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
52da668aa1SThomas Huth     s->drain_count++;
53da668aa1SThomas Huth     if (s->sleep_in_drain_begin) {
547bce1c29SKevin Wolf         Coroutine *co = qemu_coroutine_create(sleep_in_drain_begin, bs);
557bce1c29SKevin Wolf         bdrv_inc_in_flight(bs);
567bce1c29SKevin Wolf         aio_co_enter(bdrv_get_aio_context(bs), co);
57da668aa1SThomas Huth     }
58da668aa1SThomas Huth }
59da668aa1SThomas Huth 
605e8ac217SKevin Wolf static void bdrv_test_drain_end(BlockDriverState *bs)
61da668aa1SThomas Huth {
62da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
63da668aa1SThomas Huth     s->drain_count--;
64da668aa1SThomas Huth }
65da668aa1SThomas Huth 
66da668aa1SThomas Huth static void bdrv_test_close(BlockDriverState *bs)
67da668aa1SThomas Huth {
68da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
69da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, >, 0);
70da668aa1SThomas Huth }
71da668aa1SThomas Huth 
72da668aa1SThomas Huth static void co_reenter_bh(void *opaque)
73da668aa1SThomas Huth {
74da668aa1SThomas Huth     aio_co_wake(opaque);
75da668aa1SThomas Huth }
76da668aa1SThomas Huth 
77da668aa1SThomas Huth static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
78f7ef38ddSVladimir Sementsov-Ogievskiy                                             int64_t offset, int64_t bytes,
79f7ef38ddSVladimir Sementsov-Ogievskiy                                             QEMUIOVector *qiov,
80f7ef38ddSVladimir Sementsov-Ogievskiy                                             BdrvRequestFlags flags)
81da668aa1SThomas Huth {
82da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
83da668aa1SThomas Huth 
84da668aa1SThomas Huth     /* We want this request to stay until the polling loop in drain waits for
85da668aa1SThomas Huth      * it to complete. We need to sleep a while as bdrv_drain_invoke() comes
86da668aa1SThomas Huth      * first and polls its result, too, but it shouldn't accidentally complete
87da668aa1SThomas Huth      * this request yet. */
88da668aa1SThomas Huth     qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
89da668aa1SThomas Huth 
90da668aa1SThomas Huth     if (s->bh_indirection_ctx) {
91da668aa1SThomas Huth         aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh,
92da668aa1SThomas Huth                                 qemu_coroutine_self());
93da668aa1SThomas Huth         qemu_coroutine_yield();
94da668aa1SThomas Huth     }
95da668aa1SThomas Huth 
96da668aa1SThomas Huth     return 0;
97da668aa1SThomas Huth }
98da668aa1SThomas Huth 
99da668aa1SThomas Huth static int bdrv_test_change_backing_file(BlockDriverState *bs,
100da668aa1SThomas Huth                                          const char *backing_file,
101da668aa1SThomas Huth                                          const char *backing_fmt)
102da668aa1SThomas Huth {
103da668aa1SThomas Huth     return 0;
104da668aa1SThomas Huth }
105da668aa1SThomas Huth 
106da668aa1SThomas Huth static BlockDriver bdrv_test = {
107da668aa1SThomas Huth     .format_name            = "test",
108da668aa1SThomas Huth     .instance_size          = sizeof(BDRVTestState),
10925f78d9eSVladimir Sementsov-Ogievskiy     .supports_backing       = true,
110da668aa1SThomas Huth 
111da668aa1SThomas Huth     .bdrv_close             = bdrv_test_close,
112da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_test_co_preadv,
113da668aa1SThomas Huth 
1145e8ac217SKevin Wolf     .bdrv_drain_begin       = bdrv_test_drain_begin,
1155e8ac217SKevin Wolf     .bdrv_drain_end         = bdrv_test_drain_end,
116da668aa1SThomas Huth 
117da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
118da668aa1SThomas Huth 
119da668aa1SThomas Huth     .bdrv_change_backing_file = bdrv_test_change_backing_file,
120da668aa1SThomas Huth };
121da668aa1SThomas Huth 
122da668aa1SThomas Huth static void aio_ret_cb(void *opaque, int ret)
123da668aa1SThomas Huth {
124da668aa1SThomas Huth     int *aio_ret = opaque;
125da668aa1SThomas Huth     *aio_ret = ret;
126da668aa1SThomas Huth }
127da668aa1SThomas Huth 
128da668aa1SThomas Huth typedef struct CallInCoroutineData {
129da668aa1SThomas Huth     void (*entry)(void);
130da668aa1SThomas Huth     bool done;
131da668aa1SThomas Huth } CallInCoroutineData;
132da668aa1SThomas Huth 
133da668aa1SThomas Huth static coroutine_fn void call_in_coroutine_entry(void *opaque)
134da668aa1SThomas Huth {
135da668aa1SThomas Huth     CallInCoroutineData *data = opaque;
136da668aa1SThomas Huth 
137da668aa1SThomas Huth     data->entry();
138da668aa1SThomas Huth     data->done = true;
139da668aa1SThomas Huth }
140da668aa1SThomas Huth 
141da668aa1SThomas Huth static void call_in_coroutine(void (*entry)(void))
142da668aa1SThomas Huth {
143da668aa1SThomas Huth     Coroutine *co;
144da668aa1SThomas Huth     CallInCoroutineData data = {
145da668aa1SThomas Huth         .entry  = entry,
146da668aa1SThomas Huth         .done   = false,
147da668aa1SThomas Huth     };
148da668aa1SThomas Huth 
149da668aa1SThomas Huth     co = qemu_coroutine_create(call_in_coroutine_entry, &data);
150da668aa1SThomas Huth     qemu_coroutine_enter(co);
151da668aa1SThomas Huth     while (!data.done) {
152da668aa1SThomas Huth         aio_poll(qemu_get_aio_context(), true);
153da668aa1SThomas Huth     }
154da668aa1SThomas Huth }
155da668aa1SThomas Huth 
156da668aa1SThomas Huth enum drain_type {
157da668aa1SThomas Huth     BDRV_DRAIN_ALL,
158da668aa1SThomas Huth     BDRV_DRAIN,
159da668aa1SThomas Huth     DRAIN_TYPE_MAX,
160da668aa1SThomas Huth };
161da668aa1SThomas Huth 
162da668aa1SThomas Huth static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
163da668aa1SThomas Huth {
164da668aa1SThomas Huth     switch (drain_type) {
165da668aa1SThomas Huth     case BDRV_DRAIN_ALL:        bdrv_drain_all_begin(); break;
166da668aa1SThomas Huth     case BDRV_DRAIN:            bdrv_drained_begin(bs); break;
167da668aa1SThomas Huth     default:                    g_assert_not_reached();
168da668aa1SThomas Huth     }
169da668aa1SThomas Huth }
170da668aa1SThomas Huth 
171da668aa1SThomas Huth static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
172da668aa1SThomas Huth {
173da668aa1SThomas Huth     switch (drain_type) {
174da668aa1SThomas Huth     case BDRV_DRAIN_ALL:        bdrv_drain_all_end(); break;
175da668aa1SThomas Huth     case BDRV_DRAIN:            bdrv_drained_end(bs); break;
176da668aa1SThomas Huth     default:                    g_assert_not_reached();
177da668aa1SThomas Huth     }
178da668aa1SThomas Huth }
179da668aa1SThomas Huth 
180da668aa1SThomas Huth static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
181da668aa1SThomas Huth {
182da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
183da668aa1SThomas Huth         aio_context_acquire(bdrv_get_aio_context(bs));
184da668aa1SThomas Huth     }
185da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
186da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
187da668aa1SThomas Huth         aio_context_release(bdrv_get_aio_context(bs));
188da668aa1SThomas Huth     }
189da668aa1SThomas Huth }
190da668aa1SThomas Huth 
19157f3d07bSKevin Wolf static BlockBackend * no_coroutine_fn test_setup(void)
19257f3d07bSKevin Wolf {
19357f3d07bSKevin Wolf     BlockBackend *blk;
19457f3d07bSKevin Wolf     BlockDriverState *bs, *backing;
19557f3d07bSKevin Wolf 
19657f3d07bSKevin Wolf     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
19757f3d07bSKevin Wolf     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
19857f3d07bSKevin Wolf                               &error_abort);
19957f3d07bSKevin Wolf     blk_insert_bs(blk, bs, &error_abort);
20057f3d07bSKevin Wolf 
20157f3d07bSKevin Wolf     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
20257f3d07bSKevin Wolf     bdrv_set_backing_hd(bs, backing, &error_abort);
20357f3d07bSKevin Wolf 
20457f3d07bSKevin Wolf     bdrv_unref(backing);
20557f3d07bSKevin Wolf     bdrv_unref(bs);
20657f3d07bSKevin Wolf 
20757f3d07bSKevin Wolf     return blk;
20857f3d07bSKevin Wolf }
20957f3d07bSKevin Wolf 
210da668aa1SThomas Huth static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
211da668aa1SThomas Huth {
212da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
213da668aa1SThomas Huth         aio_context_acquire(bdrv_get_aio_context(bs));
214da668aa1SThomas Huth     }
215da668aa1SThomas Huth     do_drain_end(drain_type, bs);
216da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
217da668aa1SThomas Huth         aio_context_release(bdrv_get_aio_context(bs));
218da668aa1SThomas Huth     }
219da668aa1SThomas Huth }
220da668aa1SThomas Huth 
22157f3d07bSKevin Wolf static void test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type,
22257f3d07bSKevin Wolf                                bool recursive)
223da668aa1SThomas Huth {
22457f3d07bSKevin Wolf     BlockDriverState *bs = blk_bs(blk);
22557f3d07bSKevin Wolf     BlockDriverState *backing = bs->backing->bs;
226da668aa1SThomas Huth     BDRVTestState *s, *backing_s;
227da668aa1SThomas Huth     BlockAIOCB *acb;
228da668aa1SThomas Huth     int aio_ret;
229da668aa1SThomas Huth 
230da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
231da668aa1SThomas Huth 
232da668aa1SThomas Huth     s = bs->opaque;
233da668aa1SThomas Huth     backing_s = backing->opaque;
234da668aa1SThomas Huth 
235da668aa1SThomas Huth     /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */
236da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
237da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
238da668aa1SThomas Huth 
239da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
240da668aa1SThomas Huth 
241da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 1);
242da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
243da668aa1SThomas Huth 
244da668aa1SThomas Huth     do_drain_end(drain_type, bs);
245da668aa1SThomas Huth 
246da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
247da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
248da668aa1SThomas Huth 
249da668aa1SThomas Huth     /* Now do the same while a request is pending */
250da668aa1SThomas Huth     aio_ret = -EINPROGRESS;
251da668aa1SThomas Huth     acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
252da668aa1SThomas Huth     g_assert(acb != NULL);
253da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
254da668aa1SThomas Huth 
255da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
256da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
257da668aa1SThomas Huth 
258da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
259da668aa1SThomas Huth 
260da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, 0);
261da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 1);
262da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
263da668aa1SThomas Huth 
264da668aa1SThomas Huth     do_drain_end(drain_type, bs);
265da668aa1SThomas Huth 
266da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
267da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
268da668aa1SThomas Huth }
269da668aa1SThomas Huth 
270da668aa1SThomas Huth static void test_drv_cb_drain_all(void)
271da668aa1SThomas Huth {
27257f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
27357f3d07bSKevin Wolf     test_drv_cb_common(blk, BDRV_DRAIN_ALL, true);
27457f3d07bSKevin Wolf     blk_unref(blk);
275da668aa1SThomas Huth }
276da668aa1SThomas Huth 
277da668aa1SThomas Huth static void test_drv_cb_drain(void)
278da668aa1SThomas Huth {
27957f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
28057f3d07bSKevin Wolf     test_drv_cb_common(blk, BDRV_DRAIN, false);
28157f3d07bSKevin Wolf     blk_unref(blk);
28257f3d07bSKevin Wolf }
28357f3d07bSKevin Wolf 
28457f3d07bSKevin Wolf static void coroutine_fn test_drv_cb_co_drain_all_entry(void)
28557f3d07bSKevin Wolf {
28657f3d07bSKevin Wolf     BlockBackend *blk = blk_all_next(NULL);
28757f3d07bSKevin Wolf     test_drv_cb_common(blk, BDRV_DRAIN_ALL, true);
288da668aa1SThomas Huth }
289da668aa1SThomas Huth 
290da668aa1SThomas Huth static void test_drv_cb_co_drain_all(void)
291da668aa1SThomas Huth {
29257f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
29357f3d07bSKevin Wolf     call_in_coroutine(test_drv_cb_co_drain_all_entry);
29457f3d07bSKevin Wolf     blk_unref(blk);
29557f3d07bSKevin Wolf }
29657f3d07bSKevin Wolf 
29757f3d07bSKevin Wolf static void coroutine_fn test_drv_cb_co_drain_entry(void)
29857f3d07bSKevin Wolf {
29957f3d07bSKevin Wolf     BlockBackend *blk = blk_all_next(NULL);
30057f3d07bSKevin Wolf     test_drv_cb_common(blk, BDRV_DRAIN, false);
301da668aa1SThomas Huth }
302da668aa1SThomas Huth 
303da668aa1SThomas Huth static void test_drv_cb_co_drain(void)
304da668aa1SThomas Huth {
30557f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
30657f3d07bSKevin Wolf     call_in_coroutine(test_drv_cb_co_drain_entry);
30757f3d07bSKevin Wolf     blk_unref(blk);
308da668aa1SThomas Huth }
309da668aa1SThomas Huth 
31057f3d07bSKevin Wolf static void test_quiesce_common(BlockBackend *blk, enum drain_type drain_type,
31157f3d07bSKevin Wolf                                 bool recursive)
312da668aa1SThomas Huth {
31357f3d07bSKevin Wolf     BlockDriverState *bs = blk_bs(blk);
31457f3d07bSKevin Wolf     BlockDriverState *backing = bs->backing->bs;
315da668aa1SThomas Huth 
316da668aa1SThomas Huth     g_assert_cmpint(bs->quiesce_counter, ==, 0);
317da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
318da668aa1SThomas Huth 
319da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
320da668aa1SThomas Huth 
32157e05be3SKevin Wolf     if (drain_type == BDRV_DRAIN_ALL) {
32257e05be3SKevin Wolf         g_assert_cmpint(bs->quiesce_counter, ==, 2);
32357e05be3SKevin Wolf     } else {
324da668aa1SThomas Huth         g_assert_cmpint(bs->quiesce_counter, ==, 1);
32557e05be3SKevin Wolf     }
326da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
327da668aa1SThomas Huth 
328da668aa1SThomas Huth     do_drain_end(drain_type, bs);
329da668aa1SThomas Huth 
330da668aa1SThomas Huth     g_assert_cmpint(bs->quiesce_counter, ==, 0);
331da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
332da668aa1SThomas Huth }
333da668aa1SThomas Huth 
334da668aa1SThomas Huth static void test_quiesce_drain_all(void)
335da668aa1SThomas Huth {
33657f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
33757f3d07bSKevin Wolf     test_quiesce_common(blk, BDRV_DRAIN_ALL, true);
33857f3d07bSKevin Wolf     blk_unref(blk);
339da668aa1SThomas Huth }
340da668aa1SThomas Huth 
341da668aa1SThomas Huth static void test_quiesce_drain(void)
342da668aa1SThomas Huth {
34357f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
34457f3d07bSKevin Wolf     test_quiesce_common(blk, BDRV_DRAIN, false);
34557f3d07bSKevin Wolf     blk_unref(blk);
34657f3d07bSKevin Wolf }
34757f3d07bSKevin Wolf 
34857f3d07bSKevin Wolf static void coroutine_fn test_quiesce_co_drain_all_entry(void)
34957f3d07bSKevin Wolf {
35057f3d07bSKevin Wolf     BlockBackend *blk = blk_all_next(NULL);
35157f3d07bSKevin Wolf     test_quiesce_common(blk, BDRV_DRAIN_ALL, true);
352da668aa1SThomas Huth }
353da668aa1SThomas Huth 
354da668aa1SThomas Huth static void test_quiesce_co_drain_all(void)
355da668aa1SThomas Huth {
35657f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
35757f3d07bSKevin Wolf     call_in_coroutine(test_quiesce_co_drain_all_entry);
35857f3d07bSKevin Wolf     blk_unref(blk);
35957f3d07bSKevin Wolf }
36057f3d07bSKevin Wolf 
36157f3d07bSKevin Wolf static void coroutine_fn test_quiesce_co_drain_entry(void)
36257f3d07bSKevin Wolf {
36357f3d07bSKevin Wolf     BlockBackend *blk = blk_all_next(NULL);
36457f3d07bSKevin Wolf     test_quiesce_common(blk, BDRV_DRAIN, false);
365da668aa1SThomas Huth }
366da668aa1SThomas Huth 
367da668aa1SThomas Huth static void test_quiesce_co_drain(void)
368da668aa1SThomas Huth {
36957f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
37057f3d07bSKevin Wolf     call_in_coroutine(test_quiesce_co_drain_entry);
37157f3d07bSKevin Wolf     blk_unref(blk);
372da668aa1SThomas Huth }
373da668aa1SThomas Huth 
374da668aa1SThomas Huth static void test_nested(void)
375da668aa1SThomas Huth {
376da668aa1SThomas Huth     BlockBackend *blk;
377da668aa1SThomas Huth     BlockDriverState *bs, *backing;
378da668aa1SThomas Huth     BDRVTestState *s, *backing_s;
379da668aa1SThomas Huth     enum drain_type outer, inner;
380da668aa1SThomas Huth 
381da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
382da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
383da668aa1SThomas Huth                               &error_abort);
384da668aa1SThomas Huth     s = bs->opaque;
385da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
386da668aa1SThomas Huth 
387da668aa1SThomas Huth     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
388da668aa1SThomas Huth     backing_s = backing->opaque;
389da668aa1SThomas Huth     bdrv_set_backing_hd(bs, backing, &error_abort);
390da668aa1SThomas Huth 
391da668aa1SThomas Huth     for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
392da668aa1SThomas Huth         for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
39357e05be3SKevin Wolf             int backing_quiesce = (outer == BDRV_DRAIN_ALL) +
39457e05be3SKevin Wolf                                   (inner == BDRV_DRAIN_ALL);
395da668aa1SThomas Huth 
396da668aa1SThomas Huth             g_assert_cmpint(bs->quiesce_counter, ==, 0);
397da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, 0);
398da668aa1SThomas Huth             g_assert_cmpint(s->drain_count, ==, 0);
399da668aa1SThomas Huth             g_assert_cmpint(backing_s->drain_count, ==, 0);
400da668aa1SThomas Huth 
401da668aa1SThomas Huth             do_drain_begin(outer, bs);
402da668aa1SThomas Huth             do_drain_begin(inner, bs);
403da668aa1SThomas Huth 
40457e05be3SKevin Wolf             g_assert_cmpint(bs->quiesce_counter, ==, 2 + !!backing_quiesce);
405da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
40657e05be3SKevin Wolf             g_assert_cmpint(s->drain_count, ==, 1);
40757e05be3SKevin Wolf             g_assert_cmpint(backing_s->drain_count, ==, !!backing_quiesce);
408da668aa1SThomas Huth 
409da668aa1SThomas Huth             do_drain_end(inner, bs);
410da668aa1SThomas Huth             do_drain_end(outer, bs);
411da668aa1SThomas Huth 
412da668aa1SThomas Huth             g_assert_cmpint(bs->quiesce_counter, ==, 0);
413da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, 0);
414da668aa1SThomas Huth             g_assert_cmpint(s->drain_count, ==, 0);
415da668aa1SThomas Huth             g_assert_cmpint(backing_s->drain_count, ==, 0);
416da668aa1SThomas Huth         }
417da668aa1SThomas Huth     }
418da668aa1SThomas Huth 
419da668aa1SThomas Huth     bdrv_unref(backing);
420da668aa1SThomas Huth     bdrv_unref(bs);
421da668aa1SThomas Huth     blk_unref(blk);
422da668aa1SThomas Huth }
423da668aa1SThomas Huth 
424da668aa1SThomas Huth static void test_graph_change_drain_all(void)
425da668aa1SThomas Huth {
426da668aa1SThomas Huth     BlockBackend *blk_a, *blk_b;
427da668aa1SThomas Huth     BlockDriverState *bs_a, *bs_b;
428da668aa1SThomas Huth     BDRVTestState *a_s, *b_s;
429da668aa1SThomas Huth 
430da668aa1SThomas Huth     /* Create node A with a BlockBackend */
431da668aa1SThomas Huth     blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
432da668aa1SThomas Huth     bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
433da668aa1SThomas Huth                                 &error_abort);
434da668aa1SThomas Huth     a_s = bs_a->opaque;
435da668aa1SThomas Huth     blk_insert_bs(blk_a, bs_a, &error_abort);
436da668aa1SThomas Huth 
437da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
438da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 0);
439da668aa1SThomas Huth 
440da668aa1SThomas Huth     /* Call bdrv_drain_all_begin() */
441da668aa1SThomas Huth     bdrv_drain_all_begin();
442da668aa1SThomas Huth 
443da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
444da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
445da668aa1SThomas Huth 
446da668aa1SThomas Huth     /* Create node B with a BlockBackend */
447da668aa1SThomas Huth     blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
448da668aa1SThomas Huth     bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
449da668aa1SThomas Huth                                 &error_abort);
450da668aa1SThomas Huth     b_s = bs_b->opaque;
451da668aa1SThomas Huth     blk_insert_bs(blk_b, bs_b, &error_abort);
452da668aa1SThomas Huth 
453da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
454da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
455da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
456da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
457da668aa1SThomas Huth 
458da668aa1SThomas Huth     /* Unref and finally delete node A */
459da668aa1SThomas Huth     blk_unref(blk_a);
460da668aa1SThomas Huth 
461da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
462da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
463da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
464da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
465da668aa1SThomas Huth 
466da668aa1SThomas Huth     bdrv_unref(bs_a);
467da668aa1SThomas Huth 
468da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
469da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
470da668aa1SThomas Huth 
471da668aa1SThomas Huth     /* End the drained section */
472da668aa1SThomas Huth     bdrv_drain_all_end();
473da668aa1SThomas Huth 
474da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
475da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 0);
476da668aa1SThomas Huth 
477da668aa1SThomas Huth     bdrv_unref(bs_b);
478da668aa1SThomas Huth     blk_unref(blk_b);
479da668aa1SThomas Huth }
480da668aa1SThomas Huth 
481da668aa1SThomas Huth struct test_iothread_data {
482da668aa1SThomas Huth     BlockDriverState *bs;
483da668aa1SThomas Huth     enum drain_type drain_type;
484da668aa1SThomas Huth     int *aio_ret;
485ab613350SStefan Hajnoczi     bool co_done;
486da668aa1SThomas Huth };
487da668aa1SThomas Huth 
488ab613350SStefan Hajnoczi static void coroutine_fn test_iothread_drain_co_entry(void *opaque)
489da668aa1SThomas Huth {
490da668aa1SThomas Huth     struct test_iothread_data *data = opaque;
491da668aa1SThomas Huth 
492da668aa1SThomas Huth     do_drain_begin(data->drain_type, data->bs);
493da668aa1SThomas Huth     g_assert_cmpint(*data->aio_ret, ==, 0);
494da668aa1SThomas Huth     do_drain_end(data->drain_type, data->bs);
495da668aa1SThomas Huth 
496ab613350SStefan Hajnoczi     data->co_done = true;
497ab613350SStefan Hajnoczi     aio_wait_kick();
498da668aa1SThomas Huth }
499da668aa1SThomas Huth 
500da668aa1SThomas Huth static void test_iothread_aio_cb(void *opaque, int ret)
501da668aa1SThomas Huth {
502da668aa1SThomas Huth     int *aio_ret = opaque;
503da668aa1SThomas Huth     *aio_ret = ret;
504da668aa1SThomas Huth     qemu_event_set(&done_event);
505da668aa1SThomas Huth }
506da668aa1SThomas Huth 
507da668aa1SThomas Huth static void test_iothread_main_thread_bh(void *opaque)
508da668aa1SThomas Huth {
509da668aa1SThomas Huth     struct test_iothread_data *data = opaque;
510da668aa1SThomas Huth 
511da668aa1SThomas Huth     /* Test that the AioContext is not yet locked in a random BH that is
512da668aa1SThomas Huth      * executed during drain, otherwise this would deadlock. */
513da668aa1SThomas Huth     aio_context_acquire(bdrv_get_aio_context(data->bs));
514da668aa1SThomas Huth     bdrv_flush(data->bs);
515c8bf923dSStefan Hajnoczi     bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */
516da668aa1SThomas Huth     aio_context_release(bdrv_get_aio_context(data->bs));
517da668aa1SThomas Huth }
518da668aa1SThomas Huth 
519da668aa1SThomas Huth /*
520da668aa1SThomas Huth  * Starts an AIO request on a BDS that runs in the AioContext of iothread 1.
521da668aa1SThomas Huth  * The request involves a BH on iothread 2 before it can complete.
522da668aa1SThomas Huth  *
523da668aa1SThomas Huth  * @drain_thread = 0 means that do_drain_begin/end are called from the main
524da668aa1SThomas Huth  * thread, @drain_thread = 1 means that they are called from iothread 1. Drain
525da668aa1SThomas Huth  * for this BDS cannot be called from iothread 2 because only the main thread
526da668aa1SThomas Huth  * may do cross-AioContext polling.
527da668aa1SThomas Huth  */
528da668aa1SThomas Huth static void test_iothread_common(enum drain_type drain_type, int drain_thread)
529da668aa1SThomas Huth {
530da668aa1SThomas Huth     BlockBackend *blk;
531da668aa1SThomas Huth     BlockDriverState *bs;
532da668aa1SThomas Huth     BDRVTestState *s;
533da668aa1SThomas Huth     BlockAIOCB *acb;
534ab613350SStefan Hajnoczi     Coroutine *co;
535da668aa1SThomas Huth     int aio_ret;
536da668aa1SThomas Huth     struct test_iothread_data data;
537da668aa1SThomas Huth 
538da668aa1SThomas Huth     IOThread *a = iothread_new();
539da668aa1SThomas Huth     IOThread *b = iothread_new();
540da668aa1SThomas Huth     AioContext *ctx_a = iothread_get_aio_context(a);
541da668aa1SThomas Huth     AioContext *ctx_b = iothread_get_aio_context(b);
542da668aa1SThomas Huth 
543da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
544da668aa1SThomas Huth 
545da668aa1SThomas Huth     /* bdrv_drain_all() may only be called from the main loop thread */
546da668aa1SThomas Huth     if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
547da668aa1SThomas Huth         goto out;
548da668aa1SThomas Huth     }
549da668aa1SThomas Huth 
550da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
551da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
552da668aa1SThomas Huth                               &error_abort);
553da668aa1SThomas Huth     s = bs->opaque;
554da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
555da668aa1SThomas Huth     blk_set_disable_request_queuing(blk, true);
556da668aa1SThomas Huth 
557da668aa1SThomas Huth     blk_set_aio_context(blk, ctx_a, &error_abort);
558da668aa1SThomas Huth     aio_context_acquire(ctx_a);
559da668aa1SThomas Huth 
560da668aa1SThomas Huth     s->bh_indirection_ctx = ctx_b;
561da668aa1SThomas Huth 
562da668aa1SThomas Huth     aio_ret = -EINPROGRESS;
563da668aa1SThomas Huth     qemu_event_reset(&done_event);
564da668aa1SThomas Huth 
565da668aa1SThomas Huth     if (drain_thread == 0) {
566da668aa1SThomas Huth         acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret);
567da668aa1SThomas Huth     } else {
568da668aa1SThomas Huth         acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
569da668aa1SThomas Huth     }
570da668aa1SThomas Huth     g_assert(acb != NULL);
571da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
572da668aa1SThomas Huth 
573da668aa1SThomas Huth     aio_context_release(ctx_a);
574da668aa1SThomas Huth 
575da668aa1SThomas Huth     data = (struct test_iothread_data) {
576da668aa1SThomas Huth         .bs         = bs,
577da668aa1SThomas Huth         .drain_type = drain_type,
578da668aa1SThomas Huth         .aio_ret    = &aio_ret,
579da668aa1SThomas Huth     };
580da668aa1SThomas Huth 
581da668aa1SThomas Huth     switch (drain_thread) {
582da668aa1SThomas Huth     case 0:
583da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
584da668aa1SThomas Huth             aio_context_acquire(ctx_a);
585da668aa1SThomas Huth         }
586da668aa1SThomas Huth 
587c8bf923dSStefan Hajnoczi         /*
588c8bf923dSStefan Hajnoczi          * Increment in_flight so that do_drain_begin() waits for
589c8bf923dSStefan Hajnoczi          * test_iothread_main_thread_bh(). This prevents the race between
590c8bf923dSStefan Hajnoczi          * test_iothread_main_thread_bh() in IOThread a and do_drain_begin() in
591c8bf923dSStefan Hajnoczi          * this thread. test_iothread_main_thread_bh() decrements in_flight.
592c8bf923dSStefan Hajnoczi          */
593c8bf923dSStefan Hajnoczi         bdrv_inc_in_flight(bs);
594da668aa1SThomas Huth         aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
595da668aa1SThomas Huth 
596da668aa1SThomas Huth         /* The request is running on the IOThread a. Draining its block device
597da668aa1SThomas Huth          * will make sure that it has completed as far as the BDS is concerned,
598da668aa1SThomas Huth          * but the drain in this thread can continue immediately after
599da668aa1SThomas Huth          * bdrv_dec_in_flight() and aio_ret might be assigned only slightly
600da668aa1SThomas Huth          * later. */
601da668aa1SThomas Huth         do_drain_begin(drain_type, bs);
602da668aa1SThomas Huth         g_assert_cmpint(bs->in_flight, ==, 0);
603da668aa1SThomas Huth 
604da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
605da668aa1SThomas Huth             aio_context_release(ctx_a);
606da668aa1SThomas Huth         }
607da668aa1SThomas Huth         qemu_event_wait(&done_event);
608da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
609da668aa1SThomas Huth             aio_context_acquire(ctx_a);
610da668aa1SThomas Huth         }
611da668aa1SThomas Huth 
612da668aa1SThomas Huth         g_assert_cmpint(aio_ret, ==, 0);
613da668aa1SThomas Huth         do_drain_end(drain_type, bs);
614da668aa1SThomas Huth 
615da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
616da668aa1SThomas Huth             aio_context_release(ctx_a);
617da668aa1SThomas Huth         }
618da668aa1SThomas Huth         break;
619da668aa1SThomas Huth     case 1:
620ab613350SStefan Hajnoczi         co = qemu_coroutine_create(test_iothread_drain_co_entry, &data);
621ab613350SStefan Hajnoczi         aio_co_enter(ctx_a, co);
622ab613350SStefan Hajnoczi         AIO_WAIT_WHILE_UNLOCKED(NULL, !data.co_done);
623da668aa1SThomas Huth         break;
624da668aa1SThomas Huth     default:
625da668aa1SThomas Huth         g_assert_not_reached();
626da668aa1SThomas Huth     }
627da668aa1SThomas Huth 
628da668aa1SThomas Huth     aio_context_acquire(ctx_a);
629da668aa1SThomas Huth     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
630da668aa1SThomas Huth     aio_context_release(ctx_a);
631da668aa1SThomas Huth 
632da668aa1SThomas Huth     bdrv_unref(bs);
633da668aa1SThomas Huth     blk_unref(blk);
634da668aa1SThomas Huth 
635da668aa1SThomas Huth out:
636da668aa1SThomas Huth     iothread_join(a);
637da668aa1SThomas Huth     iothread_join(b);
638da668aa1SThomas Huth }
639da668aa1SThomas Huth 
640da668aa1SThomas Huth static void test_iothread_drain_all(void)
641da668aa1SThomas Huth {
642da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN_ALL, 0);
643da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN_ALL, 1);
644da668aa1SThomas Huth }
645da668aa1SThomas Huth 
646da668aa1SThomas Huth static void test_iothread_drain(void)
647da668aa1SThomas Huth {
648da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN, 0);
649da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN, 1);
650da668aa1SThomas Huth }
651da668aa1SThomas Huth 
652da668aa1SThomas Huth 
653da668aa1SThomas Huth typedef struct TestBlockJob {
654da668aa1SThomas Huth     BlockJob common;
6551b177bbeSVladimir Sementsov-Ogievskiy     BlockDriverState *bs;
656da668aa1SThomas Huth     int run_ret;
657da668aa1SThomas Huth     int prepare_ret;
658da668aa1SThomas Huth     bool running;
659da668aa1SThomas Huth     bool should_complete;
660da668aa1SThomas Huth } TestBlockJob;
661da668aa1SThomas Huth 
662da668aa1SThomas Huth static int test_job_prepare(Job *job)
663da668aa1SThomas Huth {
664da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
665da668aa1SThomas Huth 
666da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6671b177bbeSVladimir Sementsov-Ogievskiy     bdrv_flush(s->bs);
668da668aa1SThomas Huth     return s->prepare_ret;
669da668aa1SThomas Huth }
670da668aa1SThomas Huth 
671da668aa1SThomas Huth static void test_job_commit(Job *job)
672da668aa1SThomas Huth {
673da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
674da668aa1SThomas Huth 
675da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6761b177bbeSVladimir Sementsov-Ogievskiy     bdrv_flush(s->bs);
677da668aa1SThomas Huth }
678da668aa1SThomas Huth 
679da668aa1SThomas Huth static void test_job_abort(Job *job)
680da668aa1SThomas Huth {
681da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
682da668aa1SThomas Huth 
683da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6841b177bbeSVladimir Sementsov-Ogievskiy     bdrv_flush(s->bs);
685da668aa1SThomas Huth }
686da668aa1SThomas Huth 
687da668aa1SThomas Huth static int coroutine_fn test_job_run(Job *job, Error **errp)
688da668aa1SThomas Huth {
689da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
690da668aa1SThomas Huth 
691da668aa1SThomas Huth     /* We are running the actual job code past the pause point in
692da668aa1SThomas Huth      * job_co_entry(). */
693da668aa1SThomas Huth     s->running = true;
694da668aa1SThomas Huth 
695da668aa1SThomas Huth     job_transition_to_ready(&s->common.job);
696da668aa1SThomas Huth     while (!s->should_complete) {
697da668aa1SThomas Huth         /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
698da668aa1SThomas Huth          * emulate some actual activity (probably some I/O) here so that drain
699da668aa1SThomas Huth          * has to wait for this activity to stop. */
700da668aa1SThomas Huth         qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
701da668aa1SThomas Huth 
702da668aa1SThomas Huth         job_pause_point(&s->common.job);
703da668aa1SThomas Huth     }
704da668aa1SThomas Huth 
705da668aa1SThomas Huth     return s->run_ret;
706da668aa1SThomas Huth }
707da668aa1SThomas Huth 
708da668aa1SThomas Huth static void test_job_complete(Job *job, Error **errp)
709da668aa1SThomas Huth {
710da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
711da668aa1SThomas Huth     s->should_complete = true;
712da668aa1SThomas Huth }
713da668aa1SThomas Huth 
714da668aa1SThomas Huth BlockJobDriver test_job_driver = {
715da668aa1SThomas Huth     .job_driver = {
716da668aa1SThomas Huth         .instance_size  = sizeof(TestBlockJob),
717da668aa1SThomas Huth         .free           = block_job_free,
718da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
719da668aa1SThomas Huth         .run            = test_job_run,
720da668aa1SThomas Huth         .complete       = test_job_complete,
721da668aa1SThomas Huth         .prepare        = test_job_prepare,
722da668aa1SThomas Huth         .commit         = test_job_commit,
723da668aa1SThomas Huth         .abort          = test_job_abort,
724da668aa1SThomas Huth     },
725da668aa1SThomas Huth };
726da668aa1SThomas Huth 
727da668aa1SThomas Huth enum test_job_result {
728da668aa1SThomas Huth     TEST_JOB_SUCCESS,
729da668aa1SThomas Huth     TEST_JOB_FAIL_RUN,
730da668aa1SThomas Huth     TEST_JOB_FAIL_PREPARE,
731da668aa1SThomas Huth };
732da668aa1SThomas Huth 
733da668aa1SThomas Huth enum test_job_drain_node {
734da668aa1SThomas Huth     TEST_JOB_DRAIN_SRC,
735da668aa1SThomas Huth     TEST_JOB_DRAIN_SRC_CHILD,
736da668aa1SThomas Huth };
737da668aa1SThomas Huth 
738da668aa1SThomas Huth static void test_blockjob_common_drain_node(enum drain_type drain_type,
739da668aa1SThomas Huth                                             bool use_iothread,
740da668aa1SThomas Huth                                             enum test_job_result result,
741da668aa1SThomas Huth                                             enum test_job_drain_node drain_node)
742da668aa1SThomas Huth {
743da668aa1SThomas Huth     BlockBackend *blk_src, *blk_target;
744da668aa1SThomas Huth     BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
745da668aa1SThomas Huth     BlockJob *job;
746da668aa1SThomas Huth     TestBlockJob *tjob;
747da668aa1SThomas Huth     IOThread *iothread = NULL;
748da668aa1SThomas Huth     AioContext *ctx;
749da668aa1SThomas Huth     int ret;
750da668aa1SThomas Huth 
751da668aa1SThomas Huth     src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
752da668aa1SThomas Huth                                &error_abort);
753da668aa1SThomas Huth     src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
754da668aa1SThomas Huth                                        BDRV_O_RDWR, &error_abort);
755da668aa1SThomas Huth     src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
756da668aa1SThomas Huth                                        BDRV_O_RDWR, &error_abort);
757da668aa1SThomas Huth 
758da668aa1SThomas Huth     bdrv_set_backing_hd(src_overlay, src, &error_abort);
759da668aa1SThomas Huth     bdrv_unref(src);
760da668aa1SThomas Huth     bdrv_set_backing_hd(src, src_backing, &error_abort);
761da668aa1SThomas Huth     bdrv_unref(src_backing);
762da668aa1SThomas Huth 
763da668aa1SThomas Huth     blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
764da668aa1SThomas Huth     blk_insert_bs(blk_src, src_overlay, &error_abort);
765da668aa1SThomas Huth 
766da668aa1SThomas Huth     switch (drain_node) {
767da668aa1SThomas Huth     case TEST_JOB_DRAIN_SRC:
768da668aa1SThomas Huth         drain_bs = src;
769da668aa1SThomas Huth         break;
770da668aa1SThomas Huth     case TEST_JOB_DRAIN_SRC_CHILD:
771da668aa1SThomas Huth         drain_bs = src_backing;
772da668aa1SThomas Huth         break;
773da668aa1SThomas Huth     default:
774da668aa1SThomas Huth         g_assert_not_reached();
775da668aa1SThomas Huth     }
776da668aa1SThomas Huth 
777da668aa1SThomas Huth     if (use_iothread) {
778da668aa1SThomas Huth         iothread = iothread_new();
779da668aa1SThomas Huth         ctx = iothread_get_aio_context(iothread);
780da668aa1SThomas Huth         blk_set_aio_context(blk_src, ctx, &error_abort);
781da668aa1SThomas Huth     } else {
782da668aa1SThomas Huth         ctx = qemu_get_aio_context();
783da668aa1SThomas Huth     }
784da668aa1SThomas Huth 
785da668aa1SThomas Huth     target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
786da668aa1SThomas Huth                                   &error_abort);
787da668aa1SThomas Huth     blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
788da668aa1SThomas Huth     blk_insert_bs(blk_target, target, &error_abort);
789da668aa1SThomas Huth     blk_set_allow_aio_context_change(blk_target, true);
790da668aa1SThomas Huth 
791da668aa1SThomas Huth     aio_context_acquire(ctx);
792da668aa1SThomas Huth     tjob = block_job_create("job0", &test_job_driver, NULL, src,
793da668aa1SThomas Huth                             0, BLK_PERM_ALL,
794da668aa1SThomas Huth                             0, 0, NULL, NULL, &error_abort);
7951b177bbeSVladimir Sementsov-Ogievskiy     tjob->bs = src;
796da668aa1SThomas Huth     job = &tjob->common;
797*f3bbc53dSKevin Wolf 
798*f3bbc53dSKevin Wolf     bdrv_graph_wrlock(target);
799da668aa1SThomas Huth     block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
800*f3bbc53dSKevin Wolf     bdrv_graph_wrunlock();
801da668aa1SThomas Huth 
802da668aa1SThomas Huth     switch (result) {
803da668aa1SThomas Huth     case TEST_JOB_SUCCESS:
804da668aa1SThomas Huth         break;
805da668aa1SThomas Huth     case TEST_JOB_FAIL_RUN:
806da668aa1SThomas Huth         tjob->run_ret = -EIO;
807da668aa1SThomas Huth         break;
808da668aa1SThomas Huth     case TEST_JOB_FAIL_PREPARE:
809da668aa1SThomas Huth         tjob->prepare_ret = -EIO;
810da668aa1SThomas Huth         break;
811da668aa1SThomas Huth     }
8126f592e5aSEmanuele Giuseppe Esposito     aio_context_release(ctx);
813da668aa1SThomas Huth 
814da668aa1SThomas Huth     job_start(&job->job);
815da668aa1SThomas Huth 
816da668aa1SThomas Huth     if (use_iothread) {
817da668aa1SThomas Huth         /* job_co_entry() is run in the I/O thread, wait for the actual job
818da668aa1SThomas Huth          * code to start (we don't want to catch the job in the pause point in
819da668aa1SThomas Huth          * job_co_entry(). */
820da668aa1SThomas Huth         while (!tjob->running) {
821da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
822da668aa1SThomas Huth         }
823da668aa1SThomas Huth     }
824da668aa1SThomas Huth 
825191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
826da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 0);
827da668aa1SThomas Huth         g_assert_false(job->job.paused);
828da668aa1SThomas Huth         g_assert_true(tjob->running);
829da668aa1SThomas Huth         g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
830191e7af3SEmanuele Giuseppe Esposito     }
831da668aa1SThomas Huth 
832da668aa1SThomas Huth     do_drain_begin_unlocked(drain_type, drain_bs);
833da668aa1SThomas Huth 
834191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
835da668aa1SThomas Huth         if (drain_type == BDRV_DRAIN_ALL) {
836da668aa1SThomas Huth             /* bdrv_drain_all() drains both src and target */
837da668aa1SThomas Huth             g_assert_cmpint(job->job.pause_count, ==, 2);
838da668aa1SThomas Huth         } else {
839da668aa1SThomas Huth             g_assert_cmpint(job->job.pause_count, ==, 1);
840da668aa1SThomas Huth         }
841da668aa1SThomas Huth         g_assert_true(job->job.paused);
842da668aa1SThomas Huth         g_assert_false(job->job.busy); /* The job is paused */
843191e7af3SEmanuele Giuseppe Esposito     }
844da668aa1SThomas Huth 
845da668aa1SThomas Huth     do_drain_end_unlocked(drain_type, drain_bs);
846da668aa1SThomas Huth 
847da668aa1SThomas Huth     if (use_iothread) {
848191e7af3SEmanuele Giuseppe Esposito         /*
849191e7af3SEmanuele Giuseppe Esposito          * Here we are waiting for the paused status to change,
850191e7af3SEmanuele Giuseppe Esposito          * so don't bother protecting the read every time.
851191e7af3SEmanuele Giuseppe Esposito          *
852191e7af3SEmanuele Giuseppe Esposito          * paused is reset in the I/O thread, wait for it
853191e7af3SEmanuele Giuseppe Esposito          */
854da668aa1SThomas Huth         while (job->job.paused) {
855da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
856da668aa1SThomas Huth         }
857da668aa1SThomas Huth     }
858da668aa1SThomas Huth 
859191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
860da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 0);
861da668aa1SThomas Huth         g_assert_false(job->job.paused);
862da668aa1SThomas Huth         g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
863191e7af3SEmanuele Giuseppe Esposito     }
864da668aa1SThomas Huth 
865da668aa1SThomas Huth     do_drain_begin_unlocked(drain_type, target);
866da668aa1SThomas Huth 
867191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
868da668aa1SThomas Huth         if (drain_type == BDRV_DRAIN_ALL) {
869da668aa1SThomas Huth             /* bdrv_drain_all() drains both src and target */
870da668aa1SThomas Huth             g_assert_cmpint(job->job.pause_count, ==, 2);
871da668aa1SThomas Huth         } else {
872da668aa1SThomas Huth             g_assert_cmpint(job->job.pause_count, ==, 1);
873da668aa1SThomas Huth         }
874da668aa1SThomas Huth         g_assert_true(job->job.paused);
875da668aa1SThomas Huth         g_assert_false(job->job.busy); /* The job is paused */
876191e7af3SEmanuele Giuseppe Esposito     }
877da668aa1SThomas Huth 
878da668aa1SThomas Huth     do_drain_end_unlocked(drain_type, target);
879da668aa1SThomas Huth 
880da668aa1SThomas Huth     if (use_iothread) {
881191e7af3SEmanuele Giuseppe Esposito         /*
882191e7af3SEmanuele Giuseppe Esposito          * Here we are waiting for the paused status to change,
883191e7af3SEmanuele Giuseppe Esposito          * so don't bother protecting the read every time.
884191e7af3SEmanuele Giuseppe Esposito          *
885191e7af3SEmanuele Giuseppe Esposito          * paused is reset in the I/O thread, wait for it
886191e7af3SEmanuele Giuseppe Esposito          */
887da668aa1SThomas Huth         while (job->job.paused) {
888da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
889da668aa1SThomas Huth         }
890da668aa1SThomas Huth     }
891da668aa1SThomas Huth 
892191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
893da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 0);
894da668aa1SThomas Huth         g_assert_false(job->job.paused);
895da668aa1SThomas Huth         g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
896191e7af3SEmanuele Giuseppe Esposito     }
897da668aa1SThomas Huth 
898191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
899191e7af3SEmanuele Giuseppe Esposito         ret = job_complete_sync_locked(&job->job, &error_abort);
900191e7af3SEmanuele Giuseppe Esposito     }
901da668aa1SThomas Huth     g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
902da668aa1SThomas Huth 
9036f592e5aSEmanuele Giuseppe Esposito     aio_context_acquire(ctx);
904da668aa1SThomas Huth     if (use_iothread) {
905da668aa1SThomas Huth         blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
906da668aa1SThomas Huth         assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
907da668aa1SThomas Huth     }
908da668aa1SThomas Huth     aio_context_release(ctx);
909da668aa1SThomas Huth 
910da668aa1SThomas Huth     blk_unref(blk_src);
911da668aa1SThomas Huth     blk_unref(blk_target);
912da668aa1SThomas Huth     bdrv_unref(src_overlay);
913da668aa1SThomas Huth     bdrv_unref(target);
914da668aa1SThomas Huth 
915da668aa1SThomas Huth     if (iothread) {
916da668aa1SThomas Huth         iothread_join(iothread);
917da668aa1SThomas Huth     }
918da668aa1SThomas Huth }
919da668aa1SThomas Huth 
920da668aa1SThomas Huth static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
921da668aa1SThomas Huth                                  enum test_job_result result)
922da668aa1SThomas Huth {
923da668aa1SThomas Huth     test_blockjob_common_drain_node(drain_type, use_iothread, result,
924da668aa1SThomas Huth                                     TEST_JOB_DRAIN_SRC);
925da668aa1SThomas Huth     test_blockjob_common_drain_node(drain_type, use_iothread, result,
926da668aa1SThomas Huth                                     TEST_JOB_DRAIN_SRC_CHILD);
927da668aa1SThomas Huth }
928da668aa1SThomas Huth 
929da668aa1SThomas Huth static void test_blockjob_drain_all(void)
930da668aa1SThomas Huth {
931da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
932da668aa1SThomas Huth }
933da668aa1SThomas Huth 
934da668aa1SThomas Huth static void test_blockjob_drain(void)
935da668aa1SThomas Huth {
936da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
937da668aa1SThomas Huth }
938da668aa1SThomas Huth 
939da668aa1SThomas Huth static void test_blockjob_error_drain_all(void)
940da668aa1SThomas Huth {
941da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
942da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
943da668aa1SThomas Huth }
944da668aa1SThomas Huth 
945da668aa1SThomas Huth static void test_blockjob_error_drain(void)
946da668aa1SThomas Huth {
947da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
948da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
949da668aa1SThomas Huth }
950da668aa1SThomas Huth 
951da668aa1SThomas Huth static void test_blockjob_iothread_drain_all(void)
952da668aa1SThomas Huth {
953da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
954da668aa1SThomas Huth }
955da668aa1SThomas Huth 
956da668aa1SThomas Huth static void test_blockjob_iothread_drain(void)
957da668aa1SThomas Huth {
958da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
959da668aa1SThomas Huth }
960da668aa1SThomas Huth 
961da668aa1SThomas Huth static void test_blockjob_iothread_error_drain_all(void)
962da668aa1SThomas Huth {
963da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
964da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
965da668aa1SThomas Huth }
966da668aa1SThomas Huth 
967da668aa1SThomas Huth static void test_blockjob_iothread_error_drain(void)
968da668aa1SThomas Huth {
969da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
970da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
971da668aa1SThomas Huth }
972da668aa1SThomas Huth 
973da668aa1SThomas Huth 
974da668aa1SThomas Huth typedef struct BDRVTestTopState {
975da668aa1SThomas Huth     BdrvChild *wait_child;
976da668aa1SThomas Huth } BDRVTestTopState;
977da668aa1SThomas Huth 
978da668aa1SThomas Huth static void bdrv_test_top_close(BlockDriverState *bs)
979da668aa1SThomas Huth {
980da668aa1SThomas Huth     BdrvChild *c, *next_c;
98132a8aba3SKevin Wolf 
98232a8aba3SKevin Wolf     bdrv_graph_wrlock(NULL);
983da668aa1SThomas Huth     QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
984da668aa1SThomas Huth         bdrv_unref_child(bs, c);
985da668aa1SThomas Huth     }
98632a8aba3SKevin Wolf     bdrv_graph_wrunlock();
987da668aa1SThomas Huth }
988da668aa1SThomas Huth 
989b9b10c35SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
990b9b10c35SKevin Wolf bdrv_test_top_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
991b9b10c35SKevin Wolf                         QEMUIOVector *qiov, BdrvRequestFlags flags)
992da668aa1SThomas Huth {
993da668aa1SThomas Huth     BDRVTestTopState *tts = bs->opaque;
994da668aa1SThomas Huth     return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags);
995da668aa1SThomas Huth }
996da668aa1SThomas Huth 
997da668aa1SThomas Huth static BlockDriver bdrv_test_top_driver = {
998da668aa1SThomas Huth     .format_name            = "test_top_driver",
999da668aa1SThomas Huth     .instance_size          = sizeof(BDRVTestTopState),
1000da668aa1SThomas Huth 
1001da668aa1SThomas Huth     .bdrv_close             = bdrv_test_top_close,
1002da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_test_top_co_preadv,
1003da668aa1SThomas Huth 
1004da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
1005da668aa1SThomas Huth };
1006da668aa1SThomas Huth 
1007da668aa1SThomas Huth typedef struct TestCoDeleteByDrainData {
1008da668aa1SThomas Huth     BlockBackend *blk;
1009da668aa1SThomas Huth     bool detach_instead_of_delete;
1010da668aa1SThomas Huth     bool done;
1011da668aa1SThomas Huth } TestCoDeleteByDrainData;
1012da668aa1SThomas Huth 
1013da668aa1SThomas Huth static void coroutine_fn test_co_delete_by_drain(void *opaque)
1014da668aa1SThomas Huth {
1015da668aa1SThomas Huth     TestCoDeleteByDrainData *dbdd = opaque;
1016da668aa1SThomas Huth     BlockBackend *blk = dbdd->blk;
1017da668aa1SThomas Huth     BlockDriverState *bs = blk_bs(blk);
1018da668aa1SThomas Huth     BDRVTestTopState *tts = bs->opaque;
1019da668aa1SThomas Huth     void *buffer = g_malloc(65536);
1020da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
1021da668aa1SThomas Huth 
1022da668aa1SThomas Huth     /* Pretend some internal write operation from parent to child.
1023da668aa1SThomas Huth      * Important: We have to read from the child, not from the parent!
1024da668aa1SThomas Huth      * Draining works by first propagating it all up the tree to the
1025da668aa1SThomas Huth      * root and then waiting for drainage from root to the leaves
1026da668aa1SThomas Huth      * (protocol nodes).  If we have a request waiting on the root,
1027da668aa1SThomas Huth      * everything will be drained before we go back down the tree, but
1028da668aa1SThomas Huth      * we do not want that.  We want to be in the middle of draining
1029da668aa1SThomas Huth      * when this following requests returns. */
103087f130bdSKevin Wolf     bdrv_graph_co_rdlock();
1031da668aa1SThomas Huth     bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0);
103287f130bdSKevin Wolf     bdrv_graph_co_rdunlock();
1033da668aa1SThomas Huth 
1034da668aa1SThomas Huth     g_assert_cmpint(bs->refcnt, ==, 1);
1035da668aa1SThomas Huth 
1036da668aa1SThomas Huth     if (!dbdd->detach_instead_of_delete) {
103701a10c24SKevin Wolf         blk_co_unref(blk);
1038da668aa1SThomas Huth     } else {
1039da668aa1SThomas Huth         BdrvChild *c, *next_c;
1040680e0cc4SKevin Wolf         bdrv_graph_co_rdlock();
1041da668aa1SThomas Huth         QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1042680e0cc4SKevin Wolf             bdrv_graph_co_rdunlock();
104332a8aba3SKevin Wolf             bdrv_co_unref_child(bs, c);
1044680e0cc4SKevin Wolf             bdrv_graph_co_rdlock();
1045da668aa1SThomas Huth         }
1046680e0cc4SKevin Wolf         bdrv_graph_co_rdunlock();
1047da668aa1SThomas Huth     }
1048da668aa1SThomas Huth 
1049da668aa1SThomas Huth     dbdd->done = true;
1050da668aa1SThomas Huth     g_free(buffer);
1051da668aa1SThomas Huth }
1052da668aa1SThomas Huth 
1053da668aa1SThomas Huth /**
1054da668aa1SThomas Huth  * Test what happens when some BDS has some children, you drain one of
1055da668aa1SThomas Huth  * them and this results in the BDS being deleted.
1056da668aa1SThomas Huth  *
1057da668aa1SThomas Huth  * If @detach_instead_of_delete is set, the BDS is not going to be
1058da668aa1SThomas Huth  * deleted but will only detach all of its children.
1059da668aa1SThomas Huth  */
1060da668aa1SThomas Huth static void do_test_delete_by_drain(bool detach_instead_of_delete,
1061da668aa1SThomas Huth                                     enum drain_type drain_type)
1062da668aa1SThomas Huth {
1063da668aa1SThomas Huth     BlockBackend *blk;
1064da668aa1SThomas Huth     BlockDriverState *bs, *child_bs, *null_bs;
1065da668aa1SThomas Huth     BDRVTestTopState *tts;
1066da668aa1SThomas Huth     TestCoDeleteByDrainData dbdd;
1067da668aa1SThomas Huth     Coroutine *co;
1068da668aa1SThomas Huth 
1069da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR,
1070da668aa1SThomas Huth                               &error_abort);
1071da668aa1SThomas Huth     bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1072da668aa1SThomas Huth     tts = bs->opaque;
1073da668aa1SThomas Huth 
1074da668aa1SThomas Huth     null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1075da668aa1SThomas Huth                         &error_abort);
1076afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1077da668aa1SThomas Huth     bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
1078da668aa1SThomas Huth                       BDRV_CHILD_DATA, &error_abort);
1079afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1080da668aa1SThomas Huth 
1081da668aa1SThomas Huth     /* This child will be the one to pass to requests through to, and
1082da668aa1SThomas Huth      * it will stall until a drain occurs */
1083da668aa1SThomas Huth     child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR,
1084da668aa1SThomas Huth                                     &error_abort);
1085da668aa1SThomas Huth     child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1086da668aa1SThomas Huth     /* Takes our reference to child_bs */
1087afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1088da668aa1SThomas Huth     tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
1089da668aa1SThomas Huth                                         &child_of_bds,
1090da668aa1SThomas Huth                                         BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
1091da668aa1SThomas Huth                                         &error_abort);
1092afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1093da668aa1SThomas Huth 
1094da668aa1SThomas Huth     /* This child is just there to be deleted
1095da668aa1SThomas Huth      * (for detach_instead_of_delete == true) */
1096da668aa1SThomas Huth     null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1097da668aa1SThomas Huth                         &error_abort);
1098afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1099da668aa1SThomas Huth     bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
1100da668aa1SThomas Huth                       &error_abort);
1101afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1102da668aa1SThomas Huth 
1103da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1104da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
1105da668aa1SThomas Huth 
1106da668aa1SThomas Huth     /* Referenced by blk now */
1107da668aa1SThomas Huth     bdrv_unref(bs);
1108da668aa1SThomas Huth 
1109da668aa1SThomas Huth     g_assert_cmpint(bs->refcnt, ==, 1);
1110da668aa1SThomas Huth     g_assert_cmpint(child_bs->refcnt, ==, 1);
1111da668aa1SThomas Huth     g_assert_cmpint(null_bs->refcnt, ==, 1);
1112da668aa1SThomas Huth 
1113da668aa1SThomas Huth 
1114da668aa1SThomas Huth     dbdd = (TestCoDeleteByDrainData){
1115da668aa1SThomas Huth         .blk = blk,
1116da668aa1SThomas Huth         .detach_instead_of_delete = detach_instead_of_delete,
1117da668aa1SThomas Huth         .done = false,
1118da668aa1SThomas Huth     };
1119da668aa1SThomas Huth     co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd);
1120da668aa1SThomas Huth     qemu_coroutine_enter(co);
1121da668aa1SThomas Huth 
1122da668aa1SThomas Huth     /* Drain the child while the read operation is still pending.
1123da668aa1SThomas Huth      * This should result in the operation finishing and
1124da668aa1SThomas Huth      * test_co_delete_by_drain() resuming.  Thus, @bs will be deleted
1125da668aa1SThomas Huth      * and the coroutine will exit while this drain operation is still
1126da668aa1SThomas Huth      * in progress. */
1127da668aa1SThomas Huth     switch (drain_type) {
1128da668aa1SThomas Huth     case BDRV_DRAIN:
1129da668aa1SThomas Huth         bdrv_ref(child_bs);
1130da668aa1SThomas Huth         bdrv_drain(child_bs);
1131da668aa1SThomas Huth         bdrv_unref(child_bs);
1132da668aa1SThomas Huth         break;
1133da668aa1SThomas Huth     case BDRV_DRAIN_ALL:
1134da668aa1SThomas Huth         bdrv_drain_all_begin();
1135da668aa1SThomas Huth         bdrv_drain_all_end();
1136da668aa1SThomas Huth         break;
1137da668aa1SThomas Huth     default:
1138da668aa1SThomas Huth         g_assert_not_reached();
1139da668aa1SThomas Huth     }
1140da668aa1SThomas Huth 
1141da668aa1SThomas Huth     while (!dbdd.done) {
1142da668aa1SThomas Huth         aio_poll(qemu_get_aio_context(), true);
1143da668aa1SThomas Huth     }
1144da668aa1SThomas Huth 
1145da668aa1SThomas Huth     if (detach_instead_of_delete) {
1146da668aa1SThomas Huth         /* Here, the reference has not passed over to the coroutine,
1147da668aa1SThomas Huth          * so we have to delete the BB ourselves */
1148da668aa1SThomas Huth         blk_unref(blk);
1149da668aa1SThomas Huth     }
1150da668aa1SThomas Huth }
1151da668aa1SThomas Huth 
1152da668aa1SThomas Huth static void test_delete_by_drain(void)
1153da668aa1SThomas Huth {
1154da668aa1SThomas Huth     do_test_delete_by_drain(false, BDRV_DRAIN);
1155da668aa1SThomas Huth }
1156da668aa1SThomas Huth 
1157da668aa1SThomas Huth static void test_detach_by_drain_all(void)
1158da668aa1SThomas Huth {
1159da668aa1SThomas Huth     do_test_delete_by_drain(true, BDRV_DRAIN_ALL);
1160da668aa1SThomas Huth }
1161da668aa1SThomas Huth 
1162da668aa1SThomas Huth static void test_detach_by_drain(void)
1163da668aa1SThomas Huth {
1164da668aa1SThomas Huth     do_test_delete_by_drain(true, BDRV_DRAIN);
1165da668aa1SThomas Huth }
1166da668aa1SThomas Huth 
1167da668aa1SThomas Huth 
1168da668aa1SThomas Huth struct detach_by_parent_data {
1169da668aa1SThomas Huth     BlockDriverState *parent_b;
1170da668aa1SThomas Huth     BdrvChild *child_b;
1171da668aa1SThomas Huth     BlockDriverState *c;
1172da668aa1SThomas Huth     BdrvChild *child_c;
1173da668aa1SThomas Huth     bool by_parent_cb;
1174617f3a96SKevin Wolf     bool detach_on_drain;
1175da668aa1SThomas Huth };
1176da668aa1SThomas Huth static struct detach_by_parent_data detach_by_parent_data;
1177da668aa1SThomas Huth 
1178903df115SKevin Wolf static void no_coroutine_fn detach_indirect_bh(void *opaque)
1179da668aa1SThomas Huth {
1180da668aa1SThomas Huth     struct detach_by_parent_data *data = opaque;
1181da668aa1SThomas Huth 
1182617f3a96SKevin Wolf     bdrv_dec_in_flight(data->child_b->bs);
118332a8aba3SKevin Wolf 
118432a8aba3SKevin Wolf     bdrv_graph_wrlock(NULL);
1185da668aa1SThomas Huth     bdrv_unref_child(data->parent_b, data->child_b);
1186da668aa1SThomas Huth 
1187da668aa1SThomas Huth     bdrv_ref(data->c);
1188da668aa1SThomas Huth     data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
1189da668aa1SThomas Huth                                       &child_of_bds, BDRV_CHILD_DATA,
1190da668aa1SThomas Huth                                       &error_abort);
1191afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1192da668aa1SThomas Huth }
1193da668aa1SThomas Huth 
1194903df115SKevin Wolf static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
1195da668aa1SThomas Huth {
1196da668aa1SThomas Huth     struct detach_by_parent_data *data = &detach_by_parent_data;
1197da668aa1SThomas Huth 
1198da668aa1SThomas Huth     g_assert_cmpint(ret, ==, 0);
1199da668aa1SThomas Huth     if (data->by_parent_cb) {
1200617f3a96SKevin Wolf         bdrv_inc_in_flight(data->child_b->bs);
1201903df115SKevin Wolf         aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1202903df115SKevin Wolf                                 detach_indirect_bh, &detach_by_parent_data);
1203da668aa1SThomas Huth     }
1204da668aa1SThomas Huth }
1205da668aa1SThomas Huth 
1206d05ab380SEmanuele Giuseppe Esposito static void GRAPH_RDLOCK detach_by_driver_cb_drained_begin(BdrvChild *child)
1207da668aa1SThomas Huth {
1208617f3a96SKevin Wolf     struct detach_by_parent_data *data = &detach_by_parent_data;
1209617f3a96SKevin Wolf 
1210617f3a96SKevin Wolf     if (!data->detach_on_drain) {
1211617f3a96SKevin Wolf         return;
1212617f3a96SKevin Wolf     }
1213617f3a96SKevin Wolf     data->detach_on_drain = false;
1214617f3a96SKevin Wolf 
1215617f3a96SKevin Wolf     bdrv_inc_in_flight(data->child_b->bs);
1216da668aa1SThomas Huth     aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1217da668aa1SThomas Huth                             detach_indirect_bh, &detach_by_parent_data);
1218da668aa1SThomas Huth     child_of_bds.drained_begin(child);
1219da668aa1SThomas Huth }
1220da668aa1SThomas Huth 
1221da668aa1SThomas Huth static BdrvChildClass detach_by_driver_cb_class;
1222da668aa1SThomas Huth 
1223da668aa1SThomas Huth /*
1224da668aa1SThomas Huth  * Initial graph:
1225da668aa1SThomas Huth  *
1226da668aa1SThomas Huth  * PA     PB
1227da668aa1SThomas Huth  *    \ /   \
1228da668aa1SThomas Huth  *     A     B     C
1229da668aa1SThomas Huth  *
1230da668aa1SThomas Huth  * by_parent_cb == true:  Test that parent callbacks don't poll
1231da668aa1SThomas Huth  *
1232da668aa1SThomas Huth  *     PA has a pending write request whose callback changes the child nodes of
1233da668aa1SThomas Huth  *     PB: It removes B and adds C instead. The subtree of PB is drained, which
1234da668aa1SThomas Huth  *     will indirectly drain the write request, too.
1235da668aa1SThomas Huth  *
1236da668aa1SThomas Huth  * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll
1237da668aa1SThomas Huth  *
1238da668aa1SThomas Huth  *     PA's BdrvChildClass has a .drained_begin callback that schedules a BH
1239da668aa1SThomas Huth  *     that does the same graph change. If bdrv_drain_invoke() calls it, the
1240da668aa1SThomas Huth  *     state is messed up, but if it is only polled in the single
1241da668aa1SThomas Huth  *     BDRV_POLL_WHILE() at the end of the drain, this should work fine.
1242da668aa1SThomas Huth  */
1243d05ab380SEmanuele Giuseppe Esposito static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
1244da668aa1SThomas Huth {
1245da668aa1SThomas Huth     BlockBackend *blk;
1246da668aa1SThomas Huth     BlockDriverState *parent_a, *parent_b, *a, *b, *c;
1247da668aa1SThomas Huth     BdrvChild *child_a, *child_b;
1248da668aa1SThomas Huth     BlockAIOCB *acb;
1249da668aa1SThomas Huth 
1250da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
1251da668aa1SThomas Huth 
1252da668aa1SThomas Huth     if (!by_parent_cb) {
1253da668aa1SThomas Huth         detach_by_driver_cb_class = child_of_bds;
1254da668aa1SThomas Huth         detach_by_driver_cb_class.drained_begin =
1255da668aa1SThomas Huth             detach_by_driver_cb_drained_begin;
1256617f3a96SKevin Wolf         detach_by_driver_cb_class.drained_end = NULL;
1257617f3a96SKevin Wolf         detach_by_driver_cb_class.drained_poll = NULL;
1258da668aa1SThomas Huth     }
1259da668aa1SThomas Huth 
1260617f3a96SKevin Wolf     detach_by_parent_data = (struct detach_by_parent_data) {
1261617f3a96SKevin Wolf         .detach_on_drain = false,
1262617f3a96SKevin Wolf     };
1263617f3a96SKevin Wolf 
1264da668aa1SThomas Huth     /* Create all involved nodes */
1265da668aa1SThomas Huth     parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR,
1266da668aa1SThomas Huth                                     &error_abort);
1267da668aa1SThomas Huth     parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0,
1268da668aa1SThomas Huth                                     &error_abort);
1269da668aa1SThomas Huth 
1270da668aa1SThomas Huth     a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort);
1271da668aa1SThomas Huth     b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort);
1272da668aa1SThomas Huth     c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort);
1273da668aa1SThomas Huth 
1274da668aa1SThomas Huth     /* blk is a BB for parent-a */
1275da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1276da668aa1SThomas Huth     blk_insert_bs(blk, parent_a, &error_abort);
1277da668aa1SThomas Huth     bdrv_unref(parent_a);
1278da668aa1SThomas Huth 
1279da668aa1SThomas Huth     /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver
1280da668aa1SThomas Huth      * callback must not return immediately. */
1281da668aa1SThomas Huth     if (!by_parent_cb) {
1282da668aa1SThomas Huth         BDRVTestState *s = parent_a->opaque;
1283da668aa1SThomas Huth         s->sleep_in_drain_begin = true;
1284da668aa1SThomas Huth     }
1285da668aa1SThomas Huth 
1286da668aa1SThomas Huth     /* Set child relationships */
1287da668aa1SThomas Huth     bdrv_ref(b);
1288da668aa1SThomas Huth     bdrv_ref(a);
1289afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1290da668aa1SThomas Huth     child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
1291da668aa1SThomas Huth                                 BDRV_CHILD_DATA, &error_abort);
1292da668aa1SThomas Huth     child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
1293da668aa1SThomas Huth                                 BDRV_CHILD_COW, &error_abort);
1294da668aa1SThomas Huth 
1295da668aa1SThomas Huth     bdrv_ref(a);
1296da668aa1SThomas Huth     bdrv_attach_child(parent_a, a, "PA-A",
1297da668aa1SThomas Huth                       by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
1298da668aa1SThomas Huth                       BDRV_CHILD_DATA, &error_abort);
1299afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1300da668aa1SThomas Huth 
1301da668aa1SThomas Huth     g_assert_cmpint(parent_a->refcnt, ==, 1);
1302da668aa1SThomas Huth     g_assert_cmpint(parent_b->refcnt, ==, 1);
1303da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 3);
1304da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 2);
1305da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 1);
1306da668aa1SThomas Huth 
1307da668aa1SThomas Huth     g_assert(QLIST_FIRST(&parent_b->children) == child_a);
1308da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_a, next) == child_b);
1309da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_b, next) == NULL);
1310da668aa1SThomas Huth 
1311da668aa1SThomas Huth     /* Start the evil write request */
1312da668aa1SThomas Huth     detach_by_parent_data = (struct detach_by_parent_data) {
1313da668aa1SThomas Huth         .parent_b = parent_b,
1314da668aa1SThomas Huth         .child_b = child_b,
1315da668aa1SThomas Huth         .c = c,
1316da668aa1SThomas Huth         .by_parent_cb = by_parent_cb,
1317617f3a96SKevin Wolf         .detach_on_drain = true,
1318da668aa1SThomas Huth     };
1319da668aa1SThomas Huth     acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL);
1320da668aa1SThomas Huth     g_assert(acb != NULL);
1321da668aa1SThomas Huth 
1322da668aa1SThomas Huth     /* Drain and check the expected result */
1323299403aeSKevin Wolf     bdrv_drained_begin(parent_b);
1324299403aeSKevin Wolf     bdrv_drained_begin(a);
1325299403aeSKevin Wolf     bdrv_drained_begin(b);
1326299403aeSKevin Wolf     bdrv_drained_begin(c);
1327da668aa1SThomas Huth 
1328da668aa1SThomas Huth     g_assert(detach_by_parent_data.child_c != NULL);
1329da668aa1SThomas Huth 
1330da668aa1SThomas Huth     g_assert_cmpint(parent_a->refcnt, ==, 1);
1331da668aa1SThomas Huth     g_assert_cmpint(parent_b->refcnt, ==, 1);
1332da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 3);
1333da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 1);
1334da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 2);
1335da668aa1SThomas Huth 
1336da668aa1SThomas Huth     g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c);
1337da668aa1SThomas Huth     g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a);
1338da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_a, next) == NULL);
1339da668aa1SThomas Huth 
1340da668aa1SThomas Huth     g_assert_cmpint(parent_a->quiesce_counter, ==, 1);
1341299403aeSKevin Wolf     g_assert_cmpint(parent_b->quiesce_counter, ==, 3);
1342da668aa1SThomas Huth     g_assert_cmpint(a->quiesce_counter, ==, 1);
1343299403aeSKevin Wolf     g_assert_cmpint(b->quiesce_counter, ==, 1);
1344da668aa1SThomas Huth     g_assert_cmpint(c->quiesce_counter, ==, 1);
1345da668aa1SThomas Huth 
1346299403aeSKevin Wolf     bdrv_drained_end(parent_b);
1347299403aeSKevin Wolf     bdrv_drained_end(a);
1348299403aeSKevin Wolf     bdrv_drained_end(b);
1349299403aeSKevin Wolf     bdrv_drained_end(c);
1350da668aa1SThomas Huth 
1351da668aa1SThomas Huth     bdrv_unref(parent_b);
1352da668aa1SThomas Huth     blk_unref(blk);
1353da668aa1SThomas Huth 
1354da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 1);
1355da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 1);
1356da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 1);
1357da668aa1SThomas Huth     bdrv_unref(a);
1358da668aa1SThomas Huth     bdrv_unref(b);
1359da668aa1SThomas Huth     bdrv_unref(c);
1360da668aa1SThomas Huth }
1361da668aa1SThomas Huth 
1362da668aa1SThomas Huth static void test_detach_by_parent_cb(void)
1363da668aa1SThomas Huth {
1364da668aa1SThomas Huth     test_detach_indirect(true);
1365da668aa1SThomas Huth }
1366da668aa1SThomas Huth 
1367da668aa1SThomas Huth static void test_detach_by_driver_cb(void)
1368da668aa1SThomas Huth {
1369da668aa1SThomas Huth     test_detach_indirect(false);
1370da668aa1SThomas Huth }
1371da668aa1SThomas Huth 
1372da668aa1SThomas Huth static void test_append_to_drained(void)
1373da668aa1SThomas Huth {
1374da668aa1SThomas Huth     BlockBackend *blk;
1375da668aa1SThomas Huth     BlockDriverState *base, *overlay;
1376da668aa1SThomas Huth     BDRVTestState *base_s, *overlay_s;
1377da668aa1SThomas Huth 
1378da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1379da668aa1SThomas Huth     base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
1380da668aa1SThomas Huth     base_s = base->opaque;
1381da668aa1SThomas Huth     blk_insert_bs(blk, base, &error_abort);
1382da668aa1SThomas Huth 
1383da668aa1SThomas Huth     overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR,
1384da668aa1SThomas Huth                                    &error_abort);
1385da668aa1SThomas Huth     overlay_s = overlay->opaque;
1386da668aa1SThomas Huth 
1387da668aa1SThomas Huth     do_drain_begin(BDRV_DRAIN, base);
1388da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 1);
1389da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 1);
1390da668aa1SThomas Huth     g_assert_cmpint(base->in_flight, ==, 0);
1391da668aa1SThomas Huth 
1392487b9187SKevin Wolf     aio_context_acquire(qemu_get_aio_context());
1393da668aa1SThomas Huth     bdrv_append(overlay, base, &error_abort);
1394487b9187SKevin Wolf     aio_context_release(qemu_get_aio_context());
1395487b9187SKevin Wolf 
1396da668aa1SThomas Huth     g_assert_cmpint(base->in_flight, ==, 0);
1397da668aa1SThomas Huth     g_assert_cmpint(overlay->in_flight, ==, 0);
1398da668aa1SThomas Huth 
1399da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 1);
1400da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 1);
1401da668aa1SThomas Huth     g_assert_cmpint(overlay->quiesce_counter, ==, 1);
1402da668aa1SThomas Huth     g_assert_cmpint(overlay_s->drain_count, ==, 1);
1403da668aa1SThomas Huth 
1404da668aa1SThomas Huth     do_drain_end(BDRV_DRAIN, base);
1405da668aa1SThomas Huth 
1406da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 0);
1407da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 0);
1408da668aa1SThomas Huth     g_assert_cmpint(overlay->quiesce_counter, ==, 0);
1409da668aa1SThomas Huth     g_assert_cmpint(overlay_s->drain_count, ==, 0);
1410da668aa1SThomas Huth 
1411ae9d4417SVladimir Sementsov-Ogievskiy     bdrv_unref(overlay);
1412da668aa1SThomas Huth     bdrv_unref(base);
1413da668aa1SThomas Huth     blk_unref(blk);
1414da668aa1SThomas Huth }
1415da668aa1SThomas Huth 
1416da668aa1SThomas Huth static void test_set_aio_context(void)
1417da668aa1SThomas Huth {
1418da668aa1SThomas Huth     BlockDriverState *bs;
1419da668aa1SThomas Huth     IOThread *a = iothread_new();
1420da668aa1SThomas Huth     IOThread *b = iothread_new();
1421da668aa1SThomas Huth     AioContext *ctx_a = iothread_get_aio_context(a);
1422da668aa1SThomas Huth     AioContext *ctx_b = iothread_get_aio_context(b);
1423da668aa1SThomas Huth 
1424da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
1425da668aa1SThomas Huth                               &error_abort);
1426da668aa1SThomas Huth 
1427da668aa1SThomas Huth     bdrv_drained_begin(bs);
1428142e6907SEmanuele Giuseppe Esposito     bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort);
1429da668aa1SThomas Huth 
1430da668aa1SThomas Huth     aio_context_acquire(ctx_a);
1431da668aa1SThomas Huth     bdrv_drained_end(bs);
1432da668aa1SThomas Huth 
1433da668aa1SThomas Huth     bdrv_drained_begin(bs);
1434142e6907SEmanuele Giuseppe Esposito     bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort);
1435da668aa1SThomas Huth     aio_context_release(ctx_a);
1436da668aa1SThomas Huth     aio_context_acquire(ctx_b);
1437142e6907SEmanuele Giuseppe Esposito     bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort);
1438da668aa1SThomas Huth     aio_context_release(ctx_b);
1439da668aa1SThomas Huth     bdrv_drained_end(bs);
1440da668aa1SThomas Huth 
1441da668aa1SThomas Huth     bdrv_unref(bs);
1442da668aa1SThomas Huth     iothread_join(a);
1443da668aa1SThomas Huth     iothread_join(b);
1444da668aa1SThomas Huth }
1445da668aa1SThomas Huth 
1446da668aa1SThomas Huth 
1447da668aa1SThomas Huth typedef struct TestDropBackingBlockJob {
1448da668aa1SThomas Huth     BlockJob common;
1449da668aa1SThomas Huth     bool should_complete;
1450da668aa1SThomas Huth     bool *did_complete;
1451da668aa1SThomas Huth     BlockDriverState *detach_also;
14521b177bbeSVladimir Sementsov-Ogievskiy     BlockDriverState *bs;
1453da668aa1SThomas Huth } TestDropBackingBlockJob;
1454da668aa1SThomas Huth 
1455da668aa1SThomas Huth static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
1456da668aa1SThomas Huth {
1457da668aa1SThomas Huth     TestDropBackingBlockJob *s =
1458da668aa1SThomas Huth         container_of(job, TestDropBackingBlockJob, common.job);
1459da668aa1SThomas Huth 
1460da668aa1SThomas Huth     while (!s->should_complete) {
1461da668aa1SThomas Huth         job_sleep_ns(job, 0);
1462da668aa1SThomas Huth     }
1463da668aa1SThomas Huth 
1464da668aa1SThomas Huth     return 0;
1465da668aa1SThomas Huth }
1466da668aa1SThomas Huth 
1467da668aa1SThomas Huth static void test_drop_backing_job_commit(Job *job)
1468da668aa1SThomas Huth {
1469da668aa1SThomas Huth     TestDropBackingBlockJob *s =
1470da668aa1SThomas Huth         container_of(job, TestDropBackingBlockJob, common.job);
1471da668aa1SThomas Huth 
14721b177bbeSVladimir Sementsov-Ogievskiy     bdrv_set_backing_hd(s->bs, NULL, &error_abort);
1473da668aa1SThomas Huth     bdrv_set_backing_hd(s->detach_also, NULL, &error_abort);
1474da668aa1SThomas Huth 
1475da668aa1SThomas Huth     *s->did_complete = true;
1476da668aa1SThomas Huth }
1477da668aa1SThomas Huth 
1478da668aa1SThomas Huth static const BlockJobDriver test_drop_backing_job_driver = {
1479da668aa1SThomas Huth     .job_driver = {
1480da668aa1SThomas Huth         .instance_size  = sizeof(TestDropBackingBlockJob),
1481da668aa1SThomas Huth         .free           = block_job_free,
1482da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
1483da668aa1SThomas Huth         .run            = test_drop_backing_job_run,
1484da668aa1SThomas Huth         .commit         = test_drop_backing_job_commit,
1485da668aa1SThomas Huth     }
1486da668aa1SThomas Huth };
1487da668aa1SThomas Huth 
1488da668aa1SThomas Huth /**
1489da668aa1SThomas Huth  * Creates a child node with three parent nodes on it, and then runs a
1490da668aa1SThomas Huth  * block job on the final one, parent-node-2.
1491da668aa1SThomas Huth  *
1492da668aa1SThomas Huth  * The job is then asked to complete before a section where the child
1493da668aa1SThomas Huth  * is drained.
1494da668aa1SThomas Huth  *
1495da668aa1SThomas Huth  * Ending this section will undrain the child's parents, first
1496da668aa1SThomas Huth  * parent-node-2, then parent-node-1, then parent-node-0 -- the parent
1497da668aa1SThomas Huth  * list is in reverse order of how they were added.  Ending the drain
1498da668aa1SThomas Huth  * on parent-node-2 will resume the job, thus completing it and
1499da668aa1SThomas Huth  * scheduling job_exit().
1500da668aa1SThomas Huth  *
1501da668aa1SThomas Huth  * Ending the drain on parent-node-1 will poll the AioContext, which
1502da668aa1SThomas Huth  * lets job_exit() and thus test_drop_backing_job_commit() run.  That
1503da668aa1SThomas Huth  * function first removes the child as parent-node-2's backing file.
1504da668aa1SThomas Huth  *
1505da668aa1SThomas Huth  * In old (and buggy) implementations, there are two problems with
1506da668aa1SThomas Huth  * that:
1507da668aa1SThomas Huth  * (A) bdrv_drain_invoke() polls for every node that leaves the
1508da668aa1SThomas Huth  *     drained section.  This means that job_exit() is scheduled
1509da668aa1SThomas Huth  *     before the child has left the drained section.  Its
1510da668aa1SThomas Huth  *     quiesce_counter is therefore still 1 when it is removed from
1511da668aa1SThomas Huth  *     parent-node-2.
1512da668aa1SThomas Huth  *
1513da668aa1SThomas Huth  * (B) bdrv_replace_child_noperm() calls drained_end() on the old
1514da668aa1SThomas Huth  *     child's parents as many times as the child is quiesced.  This
1515da668aa1SThomas Huth  *     means it will call drained_end() on parent-node-2 once.
1516da668aa1SThomas Huth  *     Because parent-node-2 is no longer quiesced at this point, this
1517da668aa1SThomas Huth  *     will fail.
1518da668aa1SThomas Huth  *
1519da668aa1SThomas Huth  * bdrv_replace_child_noperm() therefore must call drained_end() on
1520da668aa1SThomas Huth  * the parent only if it really is still drained because the child is
1521da668aa1SThomas Huth  * drained.
1522da668aa1SThomas Huth  *
1523da668aa1SThomas Huth  * If removing child from parent-node-2 was successful (as it should
1524da668aa1SThomas Huth  * be), test_drop_backing_job_commit() will then also remove the child
1525da668aa1SThomas Huth  * from parent-node-0.
1526da668aa1SThomas Huth  *
1527da668aa1SThomas Huth  * With an old version of our drain infrastructure ((A) above), that
1528da668aa1SThomas Huth  * resulted in the following flow:
1529da668aa1SThomas Huth  *
1530da668aa1SThomas Huth  * 1. child attempts to leave its drained section.  The call recurses
1531da668aa1SThomas Huth  *    to its parents.
1532da668aa1SThomas Huth  *
1533da668aa1SThomas Huth  * 2. parent-node-2 leaves the drained section.  Polling in
1534da668aa1SThomas Huth  *    bdrv_drain_invoke() will schedule job_exit().
1535da668aa1SThomas Huth  *
1536da668aa1SThomas Huth  * 3. parent-node-1 leaves the drained section.  Polling in
1537da668aa1SThomas Huth  *    bdrv_drain_invoke() will run job_exit(), thus disconnecting
1538da668aa1SThomas Huth  *    parent-node-0 from the child node.
1539da668aa1SThomas Huth  *
1540da668aa1SThomas Huth  * 4. bdrv_parent_drained_end() uses a QLIST_FOREACH_SAFE() loop to
1541da668aa1SThomas Huth  *    iterate over the parents.  Thus, it now accesses the BdrvChild
1542da668aa1SThomas Huth  *    object that used to connect parent-node-0 and the child node.
1543da668aa1SThomas Huth  *    However, that object no longer exists, so it accesses a dangling
1544da668aa1SThomas Huth  *    pointer.
1545da668aa1SThomas Huth  *
1546da668aa1SThomas Huth  * The solution is to only poll once when running a bdrv_drained_end()
1547da668aa1SThomas Huth  * operation, specifically at the end when all drained_end()
1548da668aa1SThomas Huth  * operations for all involved nodes have been scheduled.
1549da668aa1SThomas Huth  * Note that this also solves (A) above, thus hiding (B).
1550da668aa1SThomas Huth  */
1551da668aa1SThomas Huth static void test_blockjob_commit_by_drained_end(void)
1552da668aa1SThomas Huth {
1553da668aa1SThomas Huth     BlockDriverState *bs_child, *bs_parents[3];
1554da668aa1SThomas Huth     TestDropBackingBlockJob *job;
1555da668aa1SThomas Huth     bool job_has_completed = false;
1556da668aa1SThomas Huth     int i;
1557da668aa1SThomas Huth 
1558da668aa1SThomas Huth     bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR,
1559da668aa1SThomas Huth                                     &error_abort);
1560da668aa1SThomas Huth 
1561da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1562da668aa1SThomas Huth         char name[32];
1563da668aa1SThomas Huth         snprintf(name, sizeof(name), "parent-node-%i", i);
1564da668aa1SThomas Huth         bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR,
1565da668aa1SThomas Huth                                              &error_abort);
1566da668aa1SThomas Huth         bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort);
1567da668aa1SThomas Huth     }
1568da668aa1SThomas Huth 
1569da668aa1SThomas Huth     job = block_job_create("job", &test_drop_backing_job_driver, NULL,
1570da668aa1SThomas Huth                            bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL,
1571da668aa1SThomas Huth                            &error_abort);
15721b177bbeSVladimir Sementsov-Ogievskiy     job->bs = bs_parents[2];
1573da668aa1SThomas Huth 
1574da668aa1SThomas Huth     job->detach_also = bs_parents[0];
1575da668aa1SThomas Huth     job->did_complete = &job_has_completed;
1576da668aa1SThomas Huth 
1577da668aa1SThomas Huth     job_start(&job->common.job);
1578da668aa1SThomas Huth 
1579da668aa1SThomas Huth     job->should_complete = true;
1580da668aa1SThomas Huth     bdrv_drained_begin(bs_child);
1581da668aa1SThomas Huth     g_assert(!job_has_completed);
1582da668aa1SThomas Huth     bdrv_drained_end(bs_child);
15835e8ac217SKevin Wolf     aio_poll(qemu_get_aio_context(), false);
1584da668aa1SThomas Huth     g_assert(job_has_completed);
1585da668aa1SThomas Huth 
1586da668aa1SThomas Huth     bdrv_unref(bs_parents[0]);
1587da668aa1SThomas Huth     bdrv_unref(bs_parents[1]);
1588da668aa1SThomas Huth     bdrv_unref(bs_parents[2]);
1589da668aa1SThomas Huth     bdrv_unref(bs_child);
1590da668aa1SThomas Huth }
1591da668aa1SThomas Huth 
1592da668aa1SThomas Huth 
1593da668aa1SThomas Huth typedef struct TestSimpleBlockJob {
1594da668aa1SThomas Huth     BlockJob common;
1595da668aa1SThomas Huth     bool should_complete;
1596da668aa1SThomas Huth     bool *did_complete;
1597da668aa1SThomas Huth } TestSimpleBlockJob;
1598da668aa1SThomas Huth 
1599da668aa1SThomas Huth static int coroutine_fn test_simple_job_run(Job *job, Error **errp)
1600da668aa1SThomas Huth {
1601da668aa1SThomas Huth     TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1602da668aa1SThomas Huth 
1603da668aa1SThomas Huth     while (!s->should_complete) {
1604da668aa1SThomas Huth         job_sleep_ns(job, 0);
1605da668aa1SThomas Huth     }
1606da668aa1SThomas Huth 
1607da668aa1SThomas Huth     return 0;
1608da668aa1SThomas Huth }
1609da668aa1SThomas Huth 
1610da668aa1SThomas Huth static void test_simple_job_clean(Job *job)
1611da668aa1SThomas Huth {
1612da668aa1SThomas Huth     TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1613da668aa1SThomas Huth     *s->did_complete = true;
1614da668aa1SThomas Huth }
1615da668aa1SThomas Huth 
1616da668aa1SThomas Huth static const BlockJobDriver test_simple_job_driver = {
1617da668aa1SThomas Huth     .job_driver = {
1618da668aa1SThomas Huth         .instance_size  = sizeof(TestSimpleBlockJob),
1619da668aa1SThomas Huth         .free           = block_job_free,
1620da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
1621da668aa1SThomas Huth         .run            = test_simple_job_run,
1622da668aa1SThomas Huth         .clean          = test_simple_job_clean,
1623da668aa1SThomas Huth     },
1624da668aa1SThomas Huth };
1625da668aa1SThomas Huth 
1626da668aa1SThomas Huth static int drop_intermediate_poll_update_filename(BdrvChild *child,
1627da668aa1SThomas Huth                                                   BlockDriverState *new_base,
1628da668aa1SThomas Huth                                                   const char *filename,
1629da668aa1SThomas Huth                                                   Error **errp)
1630da668aa1SThomas Huth {
1631da668aa1SThomas Huth     /*
1632da668aa1SThomas Huth      * We are free to poll here, which may change the block graph, if
1633da668aa1SThomas Huth      * it is not drained.
1634da668aa1SThomas Huth      */
1635da668aa1SThomas Huth 
1636da668aa1SThomas Huth     /* If the job is not drained: Complete it, schedule job_exit() */
1637da668aa1SThomas Huth     aio_poll(qemu_get_current_aio_context(), false);
1638da668aa1SThomas Huth     /* If the job is not drained: Run job_exit(), finish the job */
1639da668aa1SThomas Huth     aio_poll(qemu_get_current_aio_context(), false);
1640da668aa1SThomas Huth 
1641da668aa1SThomas Huth     return 0;
1642da668aa1SThomas Huth }
1643da668aa1SThomas Huth 
1644da668aa1SThomas Huth /**
1645da668aa1SThomas Huth  * Test a poll in the midst of bdrv_drop_intermediate().
1646da668aa1SThomas Huth  *
1647da668aa1SThomas Huth  * bdrv_drop_intermediate() calls BdrvChildClass.update_filename(),
1648da668aa1SThomas Huth  * which can yield or poll.  This may lead to graph changes, unless
1649da668aa1SThomas Huth  * the whole subtree in question is drained.
1650da668aa1SThomas Huth  *
1651da668aa1SThomas Huth  * We test this on the following graph:
1652da668aa1SThomas Huth  *
1653da668aa1SThomas Huth  *                    Job
1654da668aa1SThomas Huth  *
1655da668aa1SThomas Huth  *                     |
1656da668aa1SThomas Huth  *                  job-node
1657da668aa1SThomas Huth  *                     |
1658da668aa1SThomas Huth  *                     v
1659da668aa1SThomas Huth  *
1660da668aa1SThomas Huth  *                  job-node
1661da668aa1SThomas Huth  *
1662da668aa1SThomas Huth  *                     |
1663da668aa1SThomas Huth  *                  backing
1664da668aa1SThomas Huth  *                     |
1665da668aa1SThomas Huth  *                     v
1666da668aa1SThomas Huth  *
1667da668aa1SThomas Huth  * node-2 --chain--> node-1 --chain--> node-0
1668da668aa1SThomas Huth  *
1669da668aa1SThomas Huth  * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0).
1670da668aa1SThomas Huth  *
1671da668aa1SThomas Huth  * This first updates node-2's backing filename by invoking
1672da668aa1SThomas Huth  * drop_intermediate_poll_update_filename(), which polls twice.  This
1673da668aa1SThomas Huth  * causes the job to finish, which in turns causes the job-node to be
1674da668aa1SThomas Huth  * deleted.
1675da668aa1SThomas Huth  *
1676da668aa1SThomas Huth  * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it
1677da668aa1SThomas Huth  * already has a pointer to the BdrvChild edge between job-node and
1678da668aa1SThomas Huth  * node-1.  When it tries to handle that edge, we probably get a
1679da668aa1SThomas Huth  * segmentation fault because the object no longer exists.
1680da668aa1SThomas Huth  *
1681da668aa1SThomas Huth  *
1682da668aa1SThomas Huth  * The solution is for bdrv_drop_intermediate() to drain top's
1683da668aa1SThomas Huth  * subtree.  This prevents graph changes from happening just because
1684da668aa1SThomas Huth  * BdrvChildClass.update_filename() yields or polls.  Thus, the block
1685da668aa1SThomas Huth  * job is paused during that drained section and must finish before or
1686da668aa1SThomas Huth  * after.
1687da668aa1SThomas Huth  *
1688da668aa1SThomas Huth  * (In addition, bdrv_replace_child() must keep the job paused.)
1689da668aa1SThomas Huth  */
1690da668aa1SThomas Huth static void test_drop_intermediate_poll(void)
1691da668aa1SThomas Huth {
1692da668aa1SThomas Huth     static BdrvChildClass chain_child_class;
1693da668aa1SThomas Huth     BlockDriverState *chain[3];
1694da668aa1SThomas Huth     TestSimpleBlockJob *job;
1695da668aa1SThomas Huth     BlockDriverState *job_node;
1696da668aa1SThomas Huth     bool job_has_completed = false;
1697da668aa1SThomas Huth     int i;
1698da668aa1SThomas Huth     int ret;
1699da668aa1SThomas Huth 
1700da668aa1SThomas Huth     chain_child_class = child_of_bds;
1701da668aa1SThomas Huth     chain_child_class.update_filename = drop_intermediate_poll_update_filename;
1702da668aa1SThomas Huth 
1703da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1704da668aa1SThomas Huth         char name[32];
1705da668aa1SThomas Huth         snprintf(name, 32, "node-%i", i);
1706da668aa1SThomas Huth 
1707da668aa1SThomas Huth         chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort);
1708da668aa1SThomas Huth     }
1709da668aa1SThomas Huth 
1710da668aa1SThomas Huth     job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR,
1711da668aa1SThomas Huth                                     &error_abort);
1712da668aa1SThomas Huth     bdrv_set_backing_hd(job_node, chain[1], &error_abort);
1713da668aa1SThomas Huth 
1714da668aa1SThomas Huth     /*
1715da668aa1SThomas Huth      * Establish the chain last, so the chain links are the first
1716da668aa1SThomas Huth      * elements in the BDS.parents lists
1717da668aa1SThomas Huth      */
1718afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1719da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1720da668aa1SThomas Huth         if (i) {
1721da668aa1SThomas Huth             /* Takes the reference to chain[i - 1] */
17225bb04747SVladimir Sementsov-Ogievskiy             bdrv_attach_child(chain[i], chain[i - 1], "chain",
17235bb04747SVladimir Sementsov-Ogievskiy                               &chain_child_class, BDRV_CHILD_COW, &error_abort);
1724da668aa1SThomas Huth         }
1725da668aa1SThomas Huth     }
1726afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1727da668aa1SThomas Huth 
1728da668aa1SThomas Huth     job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
1729da668aa1SThomas Huth                            0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
1730da668aa1SThomas Huth 
1731da668aa1SThomas Huth     /* The job has a reference now */
1732da668aa1SThomas Huth     bdrv_unref(job_node);
1733da668aa1SThomas Huth 
1734da668aa1SThomas Huth     job->did_complete = &job_has_completed;
1735da668aa1SThomas Huth 
1736da668aa1SThomas Huth     job_start(&job->common.job);
1737da668aa1SThomas Huth     job->should_complete = true;
1738da668aa1SThomas Huth 
1739da668aa1SThomas Huth     g_assert(!job_has_completed);
1740da668aa1SThomas Huth     ret = bdrv_drop_intermediate(chain[1], chain[0], NULL);
17415e8ac217SKevin Wolf     aio_poll(qemu_get_aio_context(), false);
1742da668aa1SThomas Huth     g_assert(ret == 0);
1743da668aa1SThomas Huth     g_assert(job_has_completed);
1744da668aa1SThomas Huth 
1745da668aa1SThomas Huth     bdrv_unref(chain[2]);
1746da668aa1SThomas Huth }
1747da668aa1SThomas Huth 
1748da668aa1SThomas Huth 
1749da668aa1SThomas Huth typedef struct BDRVReplaceTestState {
175023987471SKevin Wolf     bool setup_completed;
1751da668aa1SThomas Huth     bool was_drained;
1752da668aa1SThomas Huth     bool was_undrained;
1753da668aa1SThomas Huth     bool has_read;
1754da668aa1SThomas Huth 
1755da668aa1SThomas Huth     int drain_count;
1756da668aa1SThomas Huth 
1757da668aa1SThomas Huth     bool yield_before_read;
1758da668aa1SThomas Huth     Coroutine *io_co;
1759da668aa1SThomas Huth     Coroutine *drain_co;
1760da668aa1SThomas Huth } BDRVReplaceTestState;
1761da668aa1SThomas Huth 
1762da668aa1SThomas Huth static void bdrv_replace_test_close(BlockDriverState *bs)
1763da668aa1SThomas Huth {
1764da668aa1SThomas Huth }
1765da668aa1SThomas Huth 
1766da668aa1SThomas Huth /**
1767da668aa1SThomas Huth  * If @bs has a backing file:
1768da668aa1SThomas Huth  *   Yield if .yield_before_read is true (and wait for drain_begin to
1769da668aa1SThomas Huth  *   wake us up).
1770da668aa1SThomas Huth  *   Forward the read to bs->backing.  Set .has_read to true.
1771da668aa1SThomas Huth  *   If drain_begin has woken us, wake it in turn.
1772da668aa1SThomas Huth  *
1773da668aa1SThomas Huth  * Otherwise:
1774da668aa1SThomas Huth  *   Set .has_read to true and return success.
1775da668aa1SThomas Huth  */
1776b9b10c35SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
1777b9b10c35SKevin Wolf bdrv_replace_test_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1778b9b10c35SKevin Wolf                             QEMUIOVector *qiov, BdrvRequestFlags flags)
1779da668aa1SThomas Huth {
1780da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1781da668aa1SThomas Huth 
1782da668aa1SThomas Huth     if (bs->backing) {
1783da668aa1SThomas Huth         int ret;
1784da668aa1SThomas Huth 
1785da668aa1SThomas Huth         g_assert(!s->drain_count);
1786da668aa1SThomas Huth 
1787da668aa1SThomas Huth         s->io_co = qemu_coroutine_self();
1788da668aa1SThomas Huth         if (s->yield_before_read) {
1789da668aa1SThomas Huth             s->yield_before_read = false;
1790da668aa1SThomas Huth             qemu_coroutine_yield();
1791da668aa1SThomas Huth         }
1792da668aa1SThomas Huth         s->io_co = NULL;
1793da668aa1SThomas Huth 
1794da668aa1SThomas Huth         ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0);
1795da668aa1SThomas Huth         s->has_read = true;
1796da668aa1SThomas Huth 
1797da668aa1SThomas Huth         /* Wake up drain_co if it runs */
1798da668aa1SThomas Huth         if (s->drain_co) {
1799da668aa1SThomas Huth             aio_co_wake(s->drain_co);
1800da668aa1SThomas Huth         }
1801da668aa1SThomas Huth 
1802da668aa1SThomas Huth         return ret;
1803da668aa1SThomas Huth     }
1804da668aa1SThomas Huth 
1805da668aa1SThomas Huth     s->has_read = true;
1806da668aa1SThomas Huth     return 0;
1807da668aa1SThomas Huth }
1808da668aa1SThomas Huth 
18097bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_drain_co(void *opaque)
18107bce1c29SKevin Wolf {
18117bce1c29SKevin Wolf     BlockDriverState *bs = opaque;
18127bce1c29SKevin Wolf     BDRVReplaceTestState *s = bs->opaque;
18137bce1c29SKevin Wolf 
18147bce1c29SKevin Wolf     /* Keep waking io_co up until it is done */
18157bce1c29SKevin Wolf     while (s->io_co) {
18167bce1c29SKevin Wolf         aio_co_wake(s->io_co);
18177bce1c29SKevin Wolf         s->io_co = NULL;
18187bce1c29SKevin Wolf         qemu_coroutine_yield();
18197bce1c29SKevin Wolf     }
18207bce1c29SKevin Wolf     s->drain_co = NULL;
18217bce1c29SKevin Wolf     bdrv_dec_in_flight(bs);
18227bce1c29SKevin Wolf }
18237bce1c29SKevin Wolf 
1824da668aa1SThomas Huth /**
1825da668aa1SThomas Huth  * If .drain_count is 0, wake up .io_co if there is one; and set
1826da668aa1SThomas Huth  * .was_drained.
1827da668aa1SThomas Huth  * Increment .drain_count.
1828da668aa1SThomas Huth  */
18295e8ac217SKevin Wolf static void bdrv_replace_test_drain_begin(BlockDriverState *bs)
1830da668aa1SThomas Huth {
1831da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1832da668aa1SThomas Huth 
183323987471SKevin Wolf     if (!s->setup_completed) {
183423987471SKevin Wolf         return;
183523987471SKevin Wolf     }
183623987471SKevin Wolf 
1837da668aa1SThomas Huth     if (!s->drain_count) {
18387bce1c29SKevin Wolf         s->drain_co = qemu_coroutine_create(bdrv_replace_test_drain_co, bs);
18397bce1c29SKevin Wolf         bdrv_inc_in_flight(bs);
18407bce1c29SKevin Wolf         aio_co_enter(bdrv_get_aio_context(bs), s->drain_co);
1841da668aa1SThomas Huth         s->was_drained = true;
1842da668aa1SThomas Huth     }
1843da668aa1SThomas Huth     s->drain_count++;
1844da668aa1SThomas Huth }
1845da668aa1SThomas Huth 
18467bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_read_entry(void *opaque)
18477bce1c29SKevin Wolf {
18487bce1c29SKevin Wolf     BlockDriverState *bs = opaque;
18497bce1c29SKevin Wolf     char data;
18507bce1c29SKevin Wolf     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
18517bce1c29SKevin Wolf     int ret;
18527bce1c29SKevin Wolf 
18537bce1c29SKevin Wolf     /* Queue a read request post-drain */
1854b9b10c35SKevin Wolf     bdrv_graph_co_rdlock();
18557bce1c29SKevin Wolf     ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
1856b9b10c35SKevin Wolf     bdrv_graph_co_rdunlock();
1857b9b10c35SKevin Wolf 
18587bce1c29SKevin Wolf     g_assert(ret >= 0);
18597bce1c29SKevin Wolf     bdrv_dec_in_flight(bs);
18607bce1c29SKevin Wolf }
18617bce1c29SKevin Wolf 
1862da668aa1SThomas Huth /**
1863da668aa1SThomas Huth  * Reduce .drain_count, set .was_undrained once it reaches 0.
1864da668aa1SThomas Huth  * If .drain_count reaches 0 and the node has a backing file, issue a
1865da668aa1SThomas Huth  * read request.
1866da668aa1SThomas Huth  */
18675e8ac217SKevin Wolf static void bdrv_replace_test_drain_end(BlockDriverState *bs)
1868da668aa1SThomas Huth {
1869da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1870da668aa1SThomas Huth 
187123987471SKevin Wolf     if (!s->setup_completed) {
187223987471SKevin Wolf         return;
187323987471SKevin Wolf     }
187423987471SKevin Wolf 
1875da668aa1SThomas Huth     g_assert(s->drain_count > 0);
1876da668aa1SThomas Huth     if (!--s->drain_count) {
1877da668aa1SThomas Huth         s->was_undrained = true;
1878da668aa1SThomas Huth 
1879da668aa1SThomas Huth         if (bs->backing) {
18807bce1c29SKevin Wolf             Coroutine *co = qemu_coroutine_create(bdrv_replace_test_read_entry,
18817bce1c29SKevin Wolf                                                   bs);
18827bce1c29SKevin Wolf             bdrv_inc_in_flight(bs);
18837bce1c29SKevin Wolf             aio_co_enter(bdrv_get_aio_context(bs), co);
1884da668aa1SThomas Huth         }
1885da668aa1SThomas Huth     }
1886da668aa1SThomas Huth }
1887da668aa1SThomas Huth 
1888da668aa1SThomas Huth static BlockDriver bdrv_replace_test = {
1889da668aa1SThomas Huth     .format_name            = "replace_test",
1890da668aa1SThomas Huth     .instance_size          = sizeof(BDRVReplaceTestState),
18919ebfc111SVladimir Sementsov-Ogievskiy     .supports_backing       = true,
1892da668aa1SThomas Huth 
1893da668aa1SThomas Huth     .bdrv_close             = bdrv_replace_test_close,
1894da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_replace_test_co_preadv,
1895da668aa1SThomas Huth 
18965e8ac217SKevin Wolf     .bdrv_drain_begin       = bdrv_replace_test_drain_begin,
18975e8ac217SKevin Wolf     .bdrv_drain_end         = bdrv_replace_test_drain_end,
1898da668aa1SThomas Huth 
1899da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
1900da668aa1SThomas Huth };
1901da668aa1SThomas Huth 
1902da668aa1SThomas Huth static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque)
1903da668aa1SThomas Huth {
1904da668aa1SThomas Huth     int ret;
1905da668aa1SThomas Huth     char data;
1906da668aa1SThomas Huth 
1907da668aa1SThomas Huth     ret = blk_co_pread(opaque, 0, 1, &data, 0);
1908da668aa1SThomas Huth     g_assert(ret >= 0);
1909da668aa1SThomas Huth }
1910da668aa1SThomas Huth 
1911da668aa1SThomas Huth /**
1912da668aa1SThomas Huth  * We test two things:
1913da668aa1SThomas Huth  * (1) bdrv_replace_child_noperm() must not undrain the parent if both
1914da668aa1SThomas Huth  *     children are drained.
1915da668aa1SThomas Huth  * (2) bdrv_replace_child_noperm() must never flush I/O requests to a
1916da668aa1SThomas Huth  *     drained child.  If the old child is drained, it must flush I/O
1917da668aa1SThomas Huth  *     requests after the new one has been attached.  If the new child
1918da668aa1SThomas Huth  *     is drained, it must flush I/O requests before the old one is
1919da668aa1SThomas Huth  *     detached.
1920da668aa1SThomas Huth  *
1921da668aa1SThomas Huth  * To do so, we create one parent node and two child nodes; then
1922da668aa1SThomas Huth  * attach one of the children (old_child_bs) to the parent, then
1923da668aa1SThomas Huth  * drain both old_child_bs and new_child_bs according to
1924da668aa1SThomas Huth  * old_drain_count and new_drain_count, respectively, and finally
1925da668aa1SThomas Huth  * we invoke bdrv_replace_node() to replace old_child_bs by
1926da668aa1SThomas Huth  * new_child_bs.
1927da668aa1SThomas Huth  *
1928da668aa1SThomas Huth  * The test block driver we use here (bdrv_replace_test) has a read
1929da668aa1SThomas Huth  * function that:
1930da668aa1SThomas Huth  * - For the parent node, can optionally yield, and then forwards the
1931da668aa1SThomas Huth  *   read to bdrv_preadv(),
1932da668aa1SThomas Huth  * - For the child node, just returns immediately.
1933da668aa1SThomas Huth  *
1934da668aa1SThomas Huth  * If the read yields, the drain_begin function will wake it up.
1935da668aa1SThomas Huth  *
1936da668aa1SThomas Huth  * The drain_end function issues a read on the parent once it is fully
1937da668aa1SThomas Huth  * undrained (which simulates requests starting to come in again).
1938da668aa1SThomas Huth  */
1939da668aa1SThomas Huth static void do_test_replace_child_mid_drain(int old_drain_count,
1940da668aa1SThomas Huth                                             int new_drain_count)
1941da668aa1SThomas Huth {
1942da668aa1SThomas Huth     BlockBackend *parent_blk;
1943da668aa1SThomas Huth     BlockDriverState *parent_bs;
1944da668aa1SThomas Huth     BlockDriverState *old_child_bs, *new_child_bs;
1945da668aa1SThomas Huth     BDRVReplaceTestState *parent_s;
1946da668aa1SThomas Huth     BDRVReplaceTestState *old_child_s, *new_child_s;
1947da668aa1SThomas Huth     Coroutine *io_co;
1948da668aa1SThomas Huth     int i;
1949da668aa1SThomas Huth 
1950da668aa1SThomas Huth     parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0,
1951da668aa1SThomas Huth                                      &error_abort);
1952da668aa1SThomas Huth     parent_s = parent_bs->opaque;
1953da668aa1SThomas Huth 
1954da668aa1SThomas Huth     parent_blk = blk_new(qemu_get_aio_context(),
1955da668aa1SThomas Huth                          BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
1956da668aa1SThomas Huth     blk_insert_bs(parent_blk, parent_bs, &error_abort);
1957da668aa1SThomas Huth 
1958da668aa1SThomas Huth     old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0,
1959da668aa1SThomas Huth                                         &error_abort);
1960da668aa1SThomas Huth     new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0,
1961da668aa1SThomas Huth                                         &error_abort);
1962da668aa1SThomas Huth     old_child_s = old_child_bs->opaque;
1963da668aa1SThomas Huth     new_child_s = new_child_bs->opaque;
1964da668aa1SThomas Huth 
1965da668aa1SThomas Huth     /* So that we can read something */
1966da668aa1SThomas Huth     parent_bs->total_sectors = 1;
1967da668aa1SThomas Huth     old_child_bs->total_sectors = 1;
1968da668aa1SThomas Huth     new_child_bs->total_sectors = 1;
1969da668aa1SThomas Huth 
1970da668aa1SThomas Huth     bdrv_ref(old_child_bs);
1971afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
19725bb04747SVladimir Sementsov-Ogievskiy     bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
19735bb04747SVladimir Sementsov-Ogievskiy                       BDRV_CHILD_COW, &error_abort);
1974afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
197523987471SKevin Wolf     parent_s->setup_completed = true;
1976da668aa1SThomas Huth 
1977da668aa1SThomas Huth     for (i = 0; i < old_drain_count; i++) {
1978da668aa1SThomas Huth         bdrv_drained_begin(old_child_bs);
1979da668aa1SThomas Huth     }
1980da668aa1SThomas Huth     for (i = 0; i < new_drain_count; i++) {
1981da668aa1SThomas Huth         bdrv_drained_begin(new_child_bs);
1982da668aa1SThomas Huth     }
1983da668aa1SThomas Huth 
1984da668aa1SThomas Huth     if (!old_drain_count) {
1985da668aa1SThomas Huth         /*
1986da668aa1SThomas Huth          * Start a read operation that will yield, so it will not
1987da668aa1SThomas Huth          * complete before the node is drained.
1988da668aa1SThomas Huth          */
1989da668aa1SThomas Huth         parent_s->yield_before_read = true;
1990da668aa1SThomas Huth         io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co,
1991da668aa1SThomas Huth                                       parent_blk);
1992da668aa1SThomas Huth         qemu_coroutine_enter(io_co);
1993da668aa1SThomas Huth     }
1994da668aa1SThomas Huth 
1995da668aa1SThomas Huth     /* If we have started a read operation, it should have yielded */
1996da668aa1SThomas Huth     g_assert(!parent_s->has_read);
1997da668aa1SThomas Huth 
1998da668aa1SThomas Huth     /* Reset drained status so we can see what bdrv_replace_node() does */
1999da668aa1SThomas Huth     parent_s->was_drained = false;
2000da668aa1SThomas Huth     parent_s->was_undrained = false;
2001da668aa1SThomas Huth 
2002da668aa1SThomas Huth     g_assert(parent_bs->quiesce_counter == old_drain_count);
2003da668aa1SThomas Huth     bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
2004da668aa1SThomas Huth     g_assert(parent_bs->quiesce_counter == new_drain_count);
2005da668aa1SThomas Huth 
2006da668aa1SThomas Huth     if (!old_drain_count && !new_drain_count) {
2007da668aa1SThomas Huth         /*
2008da668aa1SThomas Huth          * From undrained to undrained drains and undrains the parent,
2009da668aa1SThomas Huth          * because bdrv_replace_node() contains a drained section for
2010da668aa1SThomas Huth          * @old_child_bs.
2011da668aa1SThomas Huth          */
2012da668aa1SThomas Huth         g_assert(parent_s->was_drained && parent_s->was_undrained);
2013da668aa1SThomas Huth     } else if (!old_drain_count && new_drain_count) {
2014da668aa1SThomas Huth         /*
2015da668aa1SThomas Huth          * From undrained to drained should drain the parent and keep
2016da668aa1SThomas Huth          * it that way.
2017da668aa1SThomas Huth          */
2018da668aa1SThomas Huth         g_assert(parent_s->was_drained && !parent_s->was_undrained);
2019da668aa1SThomas Huth     } else if (old_drain_count && !new_drain_count) {
2020da668aa1SThomas Huth         /*
2021da668aa1SThomas Huth          * From drained to undrained should undrain the parent and
2022da668aa1SThomas Huth          * keep it that way.
2023da668aa1SThomas Huth          */
2024da668aa1SThomas Huth         g_assert(!parent_s->was_drained && parent_s->was_undrained);
2025da668aa1SThomas Huth     } else /* if (old_drain_count && new_drain_count) */ {
2026da668aa1SThomas Huth         /*
2027da668aa1SThomas Huth          * From drained to drained must not undrain the parent at any
2028da668aa1SThomas Huth          * point
2029da668aa1SThomas Huth          */
2030da668aa1SThomas Huth         g_assert(!parent_s->was_drained && !parent_s->was_undrained);
2031da668aa1SThomas Huth     }
2032da668aa1SThomas Huth 
2033da668aa1SThomas Huth     if (!old_drain_count || !new_drain_count) {
2034da668aa1SThomas Huth         /*
2035da668aa1SThomas Huth          * If !old_drain_count, we have started a read request before
2036da668aa1SThomas Huth          * bdrv_replace_node().  If !new_drain_count, the parent must
2037da668aa1SThomas Huth          * have been undrained at some point, and
2038da668aa1SThomas Huth          * bdrv_replace_test_co_drain_end() starts a read request
2039da668aa1SThomas Huth          * then.
2040da668aa1SThomas Huth          */
2041da668aa1SThomas Huth         g_assert(parent_s->has_read);
2042da668aa1SThomas Huth     } else {
2043da668aa1SThomas Huth         /*
2044da668aa1SThomas Huth          * If the parent was never undrained, there is no way to start
2045da668aa1SThomas Huth          * a read request.
2046da668aa1SThomas Huth          */
2047da668aa1SThomas Huth         g_assert(!parent_s->has_read);
2048da668aa1SThomas Huth     }
2049da668aa1SThomas Huth 
2050da668aa1SThomas Huth     /* A drained child must have not received any request */
2051da668aa1SThomas Huth     g_assert(!(old_drain_count && old_child_s->has_read));
2052da668aa1SThomas Huth     g_assert(!(new_drain_count && new_child_s->has_read));
2053da668aa1SThomas Huth 
2054da668aa1SThomas Huth     for (i = 0; i < new_drain_count; i++) {
2055da668aa1SThomas Huth         bdrv_drained_end(new_child_bs);
2056da668aa1SThomas Huth     }
2057da668aa1SThomas Huth     for (i = 0; i < old_drain_count; i++) {
2058da668aa1SThomas Huth         bdrv_drained_end(old_child_bs);
2059da668aa1SThomas Huth     }
2060da668aa1SThomas Huth 
2061da668aa1SThomas Huth     /*
2062da668aa1SThomas Huth      * By now, bdrv_replace_test_co_drain_end() must have been called
2063da668aa1SThomas Huth      * at some point while the new child was attached to the parent.
2064da668aa1SThomas Huth      */
2065da668aa1SThomas Huth     g_assert(parent_s->has_read);
2066da668aa1SThomas Huth     g_assert(new_child_s->has_read);
2067da668aa1SThomas Huth 
2068da668aa1SThomas Huth     blk_unref(parent_blk);
2069da668aa1SThomas Huth     bdrv_unref(parent_bs);
2070da668aa1SThomas Huth     bdrv_unref(old_child_bs);
2071da668aa1SThomas Huth     bdrv_unref(new_child_bs);
2072da668aa1SThomas Huth }
2073da668aa1SThomas Huth 
2074da668aa1SThomas Huth static void test_replace_child_mid_drain(void)
2075da668aa1SThomas Huth {
2076da668aa1SThomas Huth     int old_drain_count, new_drain_count;
2077da668aa1SThomas Huth 
2078da668aa1SThomas Huth     for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) {
2079da668aa1SThomas Huth         for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) {
2080da668aa1SThomas Huth             do_test_replace_child_mid_drain(old_drain_count, new_drain_count);
2081da668aa1SThomas Huth         }
2082da668aa1SThomas Huth     }
2083da668aa1SThomas Huth }
2084da668aa1SThomas Huth 
2085da668aa1SThomas Huth int main(int argc, char **argv)
2086da668aa1SThomas Huth {
2087da668aa1SThomas Huth     int ret;
2088da668aa1SThomas Huth 
2089da668aa1SThomas Huth     bdrv_init();
2090da668aa1SThomas Huth     qemu_init_main_loop(&error_abort);
2091da668aa1SThomas Huth 
2092da668aa1SThomas Huth     g_test_init(&argc, &argv, NULL);
2093da668aa1SThomas Huth     qemu_event_init(&done_event, false);
2094da668aa1SThomas Huth 
2095da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
2096da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
2097da668aa1SThomas Huth 
2098da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/co/drain_all",
2099da668aa1SThomas Huth                     test_drv_cb_co_drain_all);
2100da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
2101da668aa1SThomas Huth 
2102da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
2103da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
2104da668aa1SThomas Huth 
2105da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/co/drain_all",
2106da668aa1SThomas Huth                     test_quiesce_co_drain_all);
2107da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
2108da668aa1SThomas Huth 
2109da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/nested", test_nested);
2110da668aa1SThomas Huth 
2111da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/graph-change/drain_all",
2112da668aa1SThomas Huth                     test_graph_change_drain_all);
2113da668aa1SThomas Huth 
2114da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all);
2115da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain);
2116da668aa1SThomas Huth 
2117da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
2118da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
2119da668aa1SThomas Huth 
2120da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
2121da668aa1SThomas Huth                     test_blockjob_error_drain_all);
2122da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/error/drain",
2123da668aa1SThomas Huth                     test_blockjob_error_drain);
2124da668aa1SThomas Huth 
2125da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
2126da668aa1SThomas Huth                     test_blockjob_iothread_drain_all);
2127da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
2128da668aa1SThomas Huth                     test_blockjob_iothread_drain);
2129da668aa1SThomas Huth 
2130da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
2131da668aa1SThomas Huth                     test_blockjob_iothread_error_drain_all);
2132da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
2133da668aa1SThomas Huth                     test_blockjob_iothread_error_drain);
2134da668aa1SThomas Huth 
2135da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
2136da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
2137da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
2138da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb);
2139da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb);
2140da668aa1SThomas Huth 
2141da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained);
2142da668aa1SThomas Huth 
2143da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context);
2144da668aa1SThomas Huth 
2145da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end",
2146da668aa1SThomas Huth                     test_blockjob_commit_by_drained_end);
2147da668aa1SThomas Huth 
2148da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll",
2149da668aa1SThomas Huth                     test_drop_intermediate_poll);
2150da668aa1SThomas Huth 
2151da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/replace_child/mid-drain",
2152da668aa1SThomas Huth                     test_replace_child_mid_drain);
2153da668aa1SThomas Huth 
2154da668aa1SThomas Huth     ret = g_test_run();
2155da668aa1SThomas Huth     qemu_event_destroy(&done_event);
2156da668aa1SThomas Huth     return ret;
2157da668aa1SThomas Huth }
2158