1da668aa1SThomas Huth /*
2da668aa1SThomas Huth * Block node draining tests
3da668aa1SThomas Huth *
4da668aa1SThomas Huth * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com>
5da668aa1SThomas Huth *
6da668aa1SThomas Huth * Permission is hereby granted, free of charge, to any person obtaining a copy
7da668aa1SThomas Huth * of this software and associated documentation files (the "Software"), to deal
8da668aa1SThomas Huth * in the Software without restriction, including without limitation the rights
9da668aa1SThomas Huth * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10da668aa1SThomas Huth * copies of the Software, and to permit persons to whom the Software is
11da668aa1SThomas Huth * furnished to do so, subject to the following conditions:
12da668aa1SThomas Huth *
13da668aa1SThomas Huth * The above copyright notice and this permission notice shall be included in
14da668aa1SThomas Huth * all copies or substantial portions of the Software.
15da668aa1SThomas Huth *
16da668aa1SThomas Huth * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17da668aa1SThomas Huth * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18da668aa1SThomas Huth * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19da668aa1SThomas Huth * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20da668aa1SThomas Huth * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21da668aa1SThomas Huth * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22da668aa1SThomas Huth * THE SOFTWARE.
23da668aa1SThomas Huth */
24da668aa1SThomas Huth
25da668aa1SThomas Huth #include "qemu/osdep.h"
26e2c1c34fSMarkus Armbruster #include "block/block_int.h"
27da668aa1SThomas Huth #include "block/blockjob_int.h"
28da668aa1SThomas Huth #include "sysemu/block-backend.h"
29da668aa1SThomas Huth #include "qapi/error.h"
30da668aa1SThomas Huth #include "qemu/main-loop.h"
31da668aa1SThomas Huth #include "iothread.h"
32da668aa1SThomas Huth
33da668aa1SThomas Huth static QemuEvent done_event;
34da668aa1SThomas Huth
35da668aa1SThomas Huth typedef struct BDRVTestState {
36da668aa1SThomas Huth int drain_count;
37da668aa1SThomas Huth AioContext *bh_indirection_ctx;
38da668aa1SThomas Huth bool sleep_in_drain_begin;
39da668aa1SThomas Huth } BDRVTestState;
40da668aa1SThomas Huth
sleep_in_drain_begin(void * opaque)417bce1c29SKevin Wolf static void coroutine_fn sleep_in_drain_begin(void *opaque)
427bce1c29SKevin Wolf {
437bce1c29SKevin Wolf BlockDriverState *bs = opaque;
447bce1c29SKevin Wolf
457bce1c29SKevin Wolf qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
467bce1c29SKevin Wolf bdrv_dec_in_flight(bs);
477bce1c29SKevin Wolf }
487bce1c29SKevin Wolf
bdrv_test_drain_begin(BlockDriverState * bs)495e8ac217SKevin Wolf static void bdrv_test_drain_begin(BlockDriverState *bs)
50da668aa1SThomas Huth {
51da668aa1SThomas Huth BDRVTestState *s = bs->opaque;
52da668aa1SThomas Huth s->drain_count++;
53da668aa1SThomas Huth if (s->sleep_in_drain_begin) {
547bce1c29SKevin Wolf Coroutine *co = qemu_coroutine_create(sleep_in_drain_begin, bs);
557bce1c29SKevin Wolf bdrv_inc_in_flight(bs);
567bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), co);
57da668aa1SThomas Huth }
58da668aa1SThomas Huth }
59da668aa1SThomas Huth
bdrv_test_drain_end(BlockDriverState * bs)605e8ac217SKevin Wolf static void bdrv_test_drain_end(BlockDriverState *bs)
61da668aa1SThomas Huth {
62da668aa1SThomas Huth BDRVTestState *s = bs->opaque;
63da668aa1SThomas Huth s->drain_count--;
64da668aa1SThomas Huth }
65da668aa1SThomas Huth
bdrv_test_close(BlockDriverState * bs)66da668aa1SThomas Huth static void bdrv_test_close(BlockDriverState *bs)
67da668aa1SThomas Huth {
68da668aa1SThomas Huth BDRVTestState *s = bs->opaque;
69da668aa1SThomas Huth g_assert_cmpint(s->drain_count, >, 0);
70da668aa1SThomas Huth }
71da668aa1SThomas Huth
co_reenter_bh(void * opaque)72da668aa1SThomas Huth static void co_reenter_bh(void *opaque)
73da668aa1SThomas Huth {
74da668aa1SThomas Huth aio_co_wake(opaque);
75da668aa1SThomas Huth }
76da668aa1SThomas Huth
bdrv_test_co_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)77da668aa1SThomas Huth static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
78f7ef38ddSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes,
79f7ef38ddSVladimir Sementsov-Ogievskiy QEMUIOVector *qiov,
80f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
81da668aa1SThomas Huth {
82da668aa1SThomas Huth BDRVTestState *s = bs->opaque;
83da668aa1SThomas Huth
84da668aa1SThomas Huth /* We want this request to stay until the polling loop in drain waits for
85da668aa1SThomas Huth * it to complete. We need to sleep a while as bdrv_drain_invoke() comes
86da668aa1SThomas Huth * first and polls its result, too, but it shouldn't accidentally complete
87da668aa1SThomas Huth * this request yet. */
88da668aa1SThomas Huth qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
89da668aa1SThomas Huth
90da668aa1SThomas Huth if (s->bh_indirection_ctx) {
91da668aa1SThomas Huth aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh,
92da668aa1SThomas Huth qemu_coroutine_self());
93da668aa1SThomas Huth qemu_coroutine_yield();
94da668aa1SThomas Huth }
95da668aa1SThomas Huth
96da668aa1SThomas Huth return 0;
97da668aa1SThomas Huth }
98da668aa1SThomas Huth
bdrv_test_co_change_backing_file(BlockDriverState * bs,const char * backing_file,const char * backing_fmt)99e2dd2737SKevin Wolf static int bdrv_test_co_change_backing_file(BlockDriverState *bs,
100da668aa1SThomas Huth const char *backing_file,
101da668aa1SThomas Huth const char *backing_fmt)
102da668aa1SThomas Huth {
103da668aa1SThomas Huth return 0;
104da668aa1SThomas Huth }
105da668aa1SThomas Huth
106da668aa1SThomas Huth static BlockDriver bdrv_test = {
107da668aa1SThomas Huth .format_name = "test",
108da668aa1SThomas Huth .instance_size = sizeof(BDRVTestState),
10925f78d9eSVladimir Sementsov-Ogievskiy .supports_backing = true,
110da668aa1SThomas Huth
111da668aa1SThomas Huth .bdrv_close = bdrv_test_close,
112da668aa1SThomas Huth .bdrv_co_preadv = bdrv_test_co_preadv,
113da668aa1SThomas Huth
1145e8ac217SKevin Wolf .bdrv_drain_begin = bdrv_test_drain_begin,
1155e8ac217SKevin Wolf .bdrv_drain_end = bdrv_test_drain_end,
116da668aa1SThomas Huth
117da668aa1SThomas Huth .bdrv_child_perm = bdrv_default_perms,
118da668aa1SThomas Huth
119e2dd2737SKevin Wolf .bdrv_co_change_backing_file = bdrv_test_co_change_backing_file,
120da668aa1SThomas Huth };
121da668aa1SThomas Huth
aio_ret_cb(void * opaque,int ret)122da668aa1SThomas Huth static void aio_ret_cb(void *opaque, int ret)
123da668aa1SThomas Huth {
124da668aa1SThomas Huth int *aio_ret = opaque;
125da668aa1SThomas Huth *aio_ret = ret;
126da668aa1SThomas Huth }
127da668aa1SThomas Huth
128da668aa1SThomas Huth typedef struct CallInCoroutineData {
129da668aa1SThomas Huth void (*entry)(void);
130da668aa1SThomas Huth bool done;
131da668aa1SThomas Huth } CallInCoroutineData;
132da668aa1SThomas Huth
call_in_coroutine_entry(void * opaque)133da668aa1SThomas Huth static coroutine_fn void call_in_coroutine_entry(void *opaque)
134da668aa1SThomas Huth {
135da668aa1SThomas Huth CallInCoroutineData *data = opaque;
136da668aa1SThomas Huth
137da668aa1SThomas Huth data->entry();
138da668aa1SThomas Huth data->done = true;
139da668aa1SThomas Huth }
140da668aa1SThomas Huth
call_in_coroutine(void (* entry)(void))141da668aa1SThomas Huth static void call_in_coroutine(void (*entry)(void))
142da668aa1SThomas Huth {
143da668aa1SThomas Huth Coroutine *co;
144da668aa1SThomas Huth CallInCoroutineData data = {
145da668aa1SThomas Huth .entry = entry,
146da668aa1SThomas Huth .done = false,
147da668aa1SThomas Huth };
148da668aa1SThomas Huth
149da668aa1SThomas Huth co = qemu_coroutine_create(call_in_coroutine_entry, &data);
150da668aa1SThomas Huth qemu_coroutine_enter(co);
151da668aa1SThomas Huth while (!data.done) {
152da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), true);
153da668aa1SThomas Huth }
154da668aa1SThomas Huth }
155da668aa1SThomas Huth
156da668aa1SThomas Huth enum drain_type {
157da668aa1SThomas Huth BDRV_DRAIN_ALL,
158da668aa1SThomas Huth BDRV_DRAIN,
159da668aa1SThomas Huth DRAIN_TYPE_MAX,
160da668aa1SThomas Huth };
161da668aa1SThomas Huth
do_drain_begin(enum drain_type drain_type,BlockDriverState * bs)162da668aa1SThomas Huth static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
163da668aa1SThomas Huth {
164da668aa1SThomas Huth switch (drain_type) {
165da668aa1SThomas Huth case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break;
166da668aa1SThomas Huth case BDRV_DRAIN: bdrv_drained_begin(bs); break;
167da668aa1SThomas Huth default: g_assert_not_reached();
168da668aa1SThomas Huth }
169da668aa1SThomas Huth }
170da668aa1SThomas Huth
do_drain_end(enum drain_type drain_type,BlockDriverState * bs)171da668aa1SThomas Huth static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
172da668aa1SThomas Huth {
173da668aa1SThomas Huth switch (drain_type) {
174da668aa1SThomas Huth case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break;
175da668aa1SThomas Huth case BDRV_DRAIN: bdrv_drained_end(bs); break;
176da668aa1SThomas Huth default: g_assert_not_reached();
177da668aa1SThomas Huth }
178da668aa1SThomas Huth }
179da668aa1SThomas Huth
do_drain_begin_unlocked(enum drain_type drain_type,BlockDriverState * bs)180da668aa1SThomas Huth static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
181da668aa1SThomas Huth {
182da668aa1SThomas Huth do_drain_begin(drain_type, bs);
183da668aa1SThomas Huth }
184da668aa1SThomas Huth
test_setup(void)18557f3d07bSKevin Wolf static BlockBackend * no_coroutine_fn test_setup(void)
18657f3d07bSKevin Wolf {
18757f3d07bSKevin Wolf BlockBackend *blk;
18857f3d07bSKevin Wolf BlockDriverState *bs, *backing;
18957f3d07bSKevin Wolf
19057f3d07bSKevin Wolf blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
19157f3d07bSKevin Wolf bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
19257f3d07bSKevin Wolf &error_abort);
19357f3d07bSKevin Wolf blk_insert_bs(blk, bs, &error_abort);
19457f3d07bSKevin Wolf
19557f3d07bSKevin Wolf backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
19657f3d07bSKevin Wolf bdrv_set_backing_hd(bs, backing, &error_abort);
19757f3d07bSKevin Wolf
19857f3d07bSKevin Wolf bdrv_unref(backing);
19957f3d07bSKevin Wolf bdrv_unref(bs);
20057f3d07bSKevin Wolf
20157f3d07bSKevin Wolf return blk;
20257f3d07bSKevin Wolf }
20357f3d07bSKevin Wolf
do_drain_end_unlocked(enum drain_type drain_type,BlockDriverState * bs)204da668aa1SThomas Huth static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
205da668aa1SThomas Huth {
206da668aa1SThomas Huth do_drain_end(drain_type, bs);
207da668aa1SThomas Huth }
208da668aa1SThomas Huth
209004915a9SKevin Wolf /*
210004915a9SKevin Wolf * Locking the block graph would be a bit cumbersome here because this function
211004915a9SKevin Wolf * is called both in coroutine and non-coroutine context. We know this is a test
212004915a9SKevin Wolf * and nothing else is running, so don't bother with TSA.
213004915a9SKevin Wolf */
214004915a9SKevin Wolf static void coroutine_mixed_fn TSA_NO_TSA
test_drv_cb_common(BlockBackend * blk,enum drain_type drain_type,bool recursive)215004915a9SKevin Wolf test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type,
21657f3d07bSKevin Wolf bool recursive)
217da668aa1SThomas Huth {
21857f3d07bSKevin Wolf BlockDriverState *bs = blk_bs(blk);
21957f3d07bSKevin Wolf BlockDriverState *backing = bs->backing->bs;
220da668aa1SThomas Huth BDRVTestState *s, *backing_s;
221da668aa1SThomas Huth BlockAIOCB *acb;
222da668aa1SThomas Huth int aio_ret;
223da668aa1SThomas Huth
224da668aa1SThomas Huth QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
225da668aa1SThomas Huth
226da668aa1SThomas Huth s = bs->opaque;
227da668aa1SThomas Huth backing_s = backing->opaque;
228da668aa1SThomas Huth
229da668aa1SThomas Huth /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */
230da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0);
231da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0);
232da668aa1SThomas Huth
233da668aa1SThomas Huth do_drain_begin(drain_type, bs);
234da668aa1SThomas Huth
235da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 1);
236da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
237da668aa1SThomas Huth
238da668aa1SThomas Huth do_drain_end(drain_type, bs);
239da668aa1SThomas Huth
240da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0);
241da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0);
242da668aa1SThomas Huth
243da668aa1SThomas Huth /* Now do the same while a request is pending */
244da668aa1SThomas Huth aio_ret = -EINPROGRESS;
245da668aa1SThomas Huth acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
246da668aa1SThomas Huth g_assert(acb != NULL);
247da668aa1SThomas Huth g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
248da668aa1SThomas Huth
249da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0);
250da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0);
251da668aa1SThomas Huth
252da668aa1SThomas Huth do_drain_begin(drain_type, bs);
253da668aa1SThomas Huth
254da668aa1SThomas Huth g_assert_cmpint(aio_ret, ==, 0);
255da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 1);
256da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
257da668aa1SThomas Huth
258da668aa1SThomas Huth do_drain_end(drain_type, bs);
259da668aa1SThomas Huth
260da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0);
261da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0);
262da668aa1SThomas Huth }
263da668aa1SThomas Huth
test_drv_cb_drain_all(void)264da668aa1SThomas Huth static void test_drv_cb_drain_all(void)
265da668aa1SThomas Huth {
26657f3d07bSKevin Wolf BlockBackend *blk = test_setup();
26757f3d07bSKevin Wolf test_drv_cb_common(blk, BDRV_DRAIN_ALL, true);
26857f3d07bSKevin Wolf blk_unref(blk);
269da668aa1SThomas Huth }
270da668aa1SThomas Huth
test_drv_cb_drain(void)271da668aa1SThomas Huth static void test_drv_cb_drain(void)
272da668aa1SThomas Huth {
27357f3d07bSKevin Wolf BlockBackend *blk = test_setup();
27457f3d07bSKevin Wolf test_drv_cb_common(blk, BDRV_DRAIN, false);
27557f3d07bSKevin Wolf blk_unref(blk);
27657f3d07bSKevin Wolf }
27757f3d07bSKevin Wolf
test_drv_cb_co_drain_all_entry(void)27857f3d07bSKevin Wolf static void coroutine_fn test_drv_cb_co_drain_all_entry(void)
27957f3d07bSKevin Wolf {
28057f3d07bSKevin Wolf BlockBackend *blk = blk_all_next(NULL);
28157f3d07bSKevin Wolf test_drv_cb_common(blk, BDRV_DRAIN_ALL, true);
282da668aa1SThomas Huth }
283da668aa1SThomas Huth
test_drv_cb_co_drain_all(void)284da668aa1SThomas Huth static void test_drv_cb_co_drain_all(void)
285da668aa1SThomas Huth {
28657f3d07bSKevin Wolf BlockBackend *blk = test_setup();
28757f3d07bSKevin Wolf call_in_coroutine(test_drv_cb_co_drain_all_entry);
28857f3d07bSKevin Wolf blk_unref(blk);
28957f3d07bSKevin Wolf }
29057f3d07bSKevin Wolf
test_drv_cb_co_drain_entry(void)29157f3d07bSKevin Wolf static void coroutine_fn test_drv_cb_co_drain_entry(void)
29257f3d07bSKevin Wolf {
29357f3d07bSKevin Wolf BlockBackend *blk = blk_all_next(NULL);
29457f3d07bSKevin Wolf test_drv_cb_common(blk, BDRV_DRAIN, false);
295da668aa1SThomas Huth }
296da668aa1SThomas Huth
test_drv_cb_co_drain(void)297da668aa1SThomas Huth static void test_drv_cb_co_drain(void)
298da668aa1SThomas Huth {
29957f3d07bSKevin Wolf BlockBackend *blk = test_setup();
30057f3d07bSKevin Wolf call_in_coroutine(test_drv_cb_co_drain_entry);
30157f3d07bSKevin Wolf blk_unref(blk);
302da668aa1SThomas Huth }
303da668aa1SThomas Huth
304004915a9SKevin Wolf /*
305004915a9SKevin Wolf * Locking the block graph would be a bit cumbersome here because this function
306004915a9SKevin Wolf * is called both in coroutine and non-coroutine context. We know this is a test
307004915a9SKevin Wolf * and nothing else is running, so don't bother with TSA.
308004915a9SKevin Wolf */
309004915a9SKevin Wolf static void coroutine_mixed_fn TSA_NO_TSA
test_quiesce_common(BlockBackend * blk,enum drain_type drain_type,bool recursive)310004915a9SKevin Wolf test_quiesce_common(BlockBackend *blk, enum drain_type drain_type,
31157f3d07bSKevin Wolf bool recursive)
312da668aa1SThomas Huth {
31357f3d07bSKevin Wolf BlockDriverState *bs = blk_bs(blk);
31457f3d07bSKevin Wolf BlockDriverState *backing = bs->backing->bs;
315da668aa1SThomas Huth
316da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 0);
317da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0);
318da668aa1SThomas Huth
319da668aa1SThomas Huth do_drain_begin(drain_type, bs);
320da668aa1SThomas Huth
32157e05be3SKevin Wolf if (drain_type == BDRV_DRAIN_ALL) {
32257e05be3SKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 2);
32357e05be3SKevin Wolf } else {
324da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 1);
32557e05be3SKevin Wolf }
326da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
327da668aa1SThomas Huth
328da668aa1SThomas Huth do_drain_end(drain_type, bs);
329da668aa1SThomas Huth
330da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 0);
331da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0);
332da668aa1SThomas Huth }
333da668aa1SThomas Huth
test_quiesce_drain_all(void)334da668aa1SThomas Huth static void test_quiesce_drain_all(void)
335da668aa1SThomas Huth {
33657f3d07bSKevin Wolf BlockBackend *blk = test_setup();
33757f3d07bSKevin Wolf test_quiesce_common(blk, BDRV_DRAIN_ALL, true);
33857f3d07bSKevin Wolf blk_unref(blk);
339da668aa1SThomas Huth }
340da668aa1SThomas Huth
test_quiesce_drain(void)341da668aa1SThomas Huth static void test_quiesce_drain(void)
342da668aa1SThomas Huth {
34357f3d07bSKevin Wolf BlockBackend *blk = test_setup();
34457f3d07bSKevin Wolf test_quiesce_common(blk, BDRV_DRAIN, false);
34557f3d07bSKevin Wolf blk_unref(blk);
34657f3d07bSKevin Wolf }
34757f3d07bSKevin Wolf
test_quiesce_co_drain_all_entry(void)34857f3d07bSKevin Wolf static void coroutine_fn test_quiesce_co_drain_all_entry(void)
34957f3d07bSKevin Wolf {
35057f3d07bSKevin Wolf BlockBackend *blk = blk_all_next(NULL);
35157f3d07bSKevin Wolf test_quiesce_common(blk, BDRV_DRAIN_ALL, true);
352da668aa1SThomas Huth }
353da668aa1SThomas Huth
test_quiesce_co_drain_all(void)354da668aa1SThomas Huth static void test_quiesce_co_drain_all(void)
355da668aa1SThomas Huth {
35657f3d07bSKevin Wolf BlockBackend *blk = test_setup();
35757f3d07bSKevin Wolf call_in_coroutine(test_quiesce_co_drain_all_entry);
35857f3d07bSKevin Wolf blk_unref(blk);
35957f3d07bSKevin Wolf }
36057f3d07bSKevin Wolf
test_quiesce_co_drain_entry(void)36157f3d07bSKevin Wolf static void coroutine_fn test_quiesce_co_drain_entry(void)
36257f3d07bSKevin Wolf {
36357f3d07bSKevin Wolf BlockBackend *blk = blk_all_next(NULL);
36457f3d07bSKevin Wolf test_quiesce_common(blk, BDRV_DRAIN, false);
365da668aa1SThomas Huth }
366da668aa1SThomas Huth
test_quiesce_co_drain(void)367da668aa1SThomas Huth static void test_quiesce_co_drain(void)
368da668aa1SThomas Huth {
36957f3d07bSKevin Wolf BlockBackend *blk = test_setup();
37057f3d07bSKevin Wolf call_in_coroutine(test_quiesce_co_drain_entry);
37157f3d07bSKevin Wolf blk_unref(blk);
372da668aa1SThomas Huth }
373da668aa1SThomas Huth
test_nested(void)374da668aa1SThomas Huth static void test_nested(void)
375da668aa1SThomas Huth {
376da668aa1SThomas Huth BlockBackend *blk;
377da668aa1SThomas Huth BlockDriverState *bs, *backing;
378da668aa1SThomas Huth BDRVTestState *s, *backing_s;
379da668aa1SThomas Huth enum drain_type outer, inner;
380da668aa1SThomas Huth
381da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
382da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
383da668aa1SThomas Huth &error_abort);
384da668aa1SThomas Huth s = bs->opaque;
385da668aa1SThomas Huth blk_insert_bs(blk, bs, &error_abort);
386da668aa1SThomas Huth
387da668aa1SThomas Huth backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
388da668aa1SThomas Huth backing_s = backing->opaque;
389da668aa1SThomas Huth bdrv_set_backing_hd(bs, backing, &error_abort);
390da668aa1SThomas Huth
391da668aa1SThomas Huth for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
392da668aa1SThomas Huth for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
39357e05be3SKevin Wolf int backing_quiesce = (outer == BDRV_DRAIN_ALL) +
39457e05be3SKevin Wolf (inner == BDRV_DRAIN_ALL);
395da668aa1SThomas Huth
396da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 0);
397da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0);
398da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0);
399da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0);
400da668aa1SThomas Huth
401da668aa1SThomas Huth do_drain_begin(outer, bs);
402da668aa1SThomas Huth do_drain_begin(inner, bs);
403da668aa1SThomas Huth
40457e05be3SKevin Wolf g_assert_cmpint(bs->quiesce_counter, ==, 2 + !!backing_quiesce);
405da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
40657e05be3SKevin Wolf g_assert_cmpint(s->drain_count, ==, 1);
40757e05be3SKevin Wolf g_assert_cmpint(backing_s->drain_count, ==, !!backing_quiesce);
408da668aa1SThomas Huth
409da668aa1SThomas Huth do_drain_end(inner, bs);
410da668aa1SThomas Huth do_drain_end(outer, bs);
411da668aa1SThomas Huth
412da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 0);
413da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0);
414da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0);
415da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0);
416da668aa1SThomas Huth }
417da668aa1SThomas Huth }
418da668aa1SThomas Huth
419da668aa1SThomas Huth bdrv_unref(backing);
420da668aa1SThomas Huth bdrv_unref(bs);
421da668aa1SThomas Huth blk_unref(blk);
422da668aa1SThomas Huth }
423da668aa1SThomas Huth
test_graph_change_drain_all(void)424da668aa1SThomas Huth static void test_graph_change_drain_all(void)
425da668aa1SThomas Huth {
426da668aa1SThomas Huth BlockBackend *blk_a, *blk_b;
427da668aa1SThomas Huth BlockDriverState *bs_a, *bs_b;
428da668aa1SThomas Huth BDRVTestState *a_s, *b_s;
429da668aa1SThomas Huth
430da668aa1SThomas Huth /* Create node A with a BlockBackend */
431da668aa1SThomas Huth blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
432da668aa1SThomas Huth bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
433da668aa1SThomas Huth &error_abort);
434da668aa1SThomas Huth a_s = bs_a->opaque;
435da668aa1SThomas Huth blk_insert_bs(blk_a, bs_a, &error_abort);
436da668aa1SThomas Huth
437da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
438da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 0);
439da668aa1SThomas Huth
440da668aa1SThomas Huth /* Call bdrv_drain_all_begin() */
441da668aa1SThomas Huth bdrv_drain_all_begin();
442da668aa1SThomas Huth
443da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
444da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 1);
445da668aa1SThomas Huth
446da668aa1SThomas Huth /* Create node B with a BlockBackend */
447da668aa1SThomas Huth blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
448da668aa1SThomas Huth bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
449da668aa1SThomas Huth &error_abort);
450da668aa1SThomas Huth b_s = bs_b->opaque;
451da668aa1SThomas Huth blk_insert_bs(blk_b, bs_b, &error_abort);
452da668aa1SThomas Huth
453da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
454da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
455da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 1);
456da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 1);
457da668aa1SThomas Huth
458da668aa1SThomas Huth /* Unref and finally delete node A */
459da668aa1SThomas Huth blk_unref(blk_a);
460da668aa1SThomas Huth
461da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
462da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
463da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 1);
464da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 1);
465da668aa1SThomas Huth
466da668aa1SThomas Huth bdrv_unref(bs_a);
467da668aa1SThomas Huth
468da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
469da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 1);
470da668aa1SThomas Huth
471da668aa1SThomas Huth /* End the drained section */
472da668aa1SThomas Huth bdrv_drain_all_end();
473da668aa1SThomas Huth
474da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
475da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 0);
476da668aa1SThomas Huth
477da668aa1SThomas Huth bdrv_unref(bs_b);
478da668aa1SThomas Huth blk_unref(blk_b);
479da668aa1SThomas Huth }
480da668aa1SThomas Huth
481da668aa1SThomas Huth struct test_iothread_data {
482da668aa1SThomas Huth BlockDriverState *bs;
483da668aa1SThomas Huth enum drain_type drain_type;
484da668aa1SThomas Huth int *aio_ret;
485ab613350SStefan Hajnoczi bool co_done;
486da668aa1SThomas Huth };
487da668aa1SThomas Huth
test_iothread_drain_co_entry(void * opaque)488ab613350SStefan Hajnoczi static void coroutine_fn test_iothread_drain_co_entry(void *opaque)
489da668aa1SThomas Huth {
490da668aa1SThomas Huth struct test_iothread_data *data = opaque;
491da668aa1SThomas Huth
492da668aa1SThomas Huth do_drain_begin(data->drain_type, data->bs);
493da668aa1SThomas Huth g_assert_cmpint(*data->aio_ret, ==, 0);
494da668aa1SThomas Huth do_drain_end(data->drain_type, data->bs);
495da668aa1SThomas Huth
496ab613350SStefan Hajnoczi data->co_done = true;
497ab613350SStefan Hajnoczi aio_wait_kick();
498da668aa1SThomas Huth }
499da668aa1SThomas Huth
test_iothread_aio_cb(void * opaque,int ret)500da668aa1SThomas Huth static void test_iothread_aio_cb(void *opaque, int ret)
501da668aa1SThomas Huth {
502da668aa1SThomas Huth int *aio_ret = opaque;
503da668aa1SThomas Huth *aio_ret = ret;
504da668aa1SThomas Huth qemu_event_set(&done_event);
505da668aa1SThomas Huth }
506da668aa1SThomas Huth
test_iothread_main_thread_bh(void * opaque)507da668aa1SThomas Huth static void test_iothread_main_thread_bh(void *opaque)
508da668aa1SThomas Huth {
509da668aa1SThomas Huth struct test_iothread_data *data = opaque;
510da668aa1SThomas Huth
511da668aa1SThomas Huth bdrv_flush(data->bs);
512c8bf923dSStefan Hajnoczi bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */
513da668aa1SThomas Huth }
514da668aa1SThomas Huth
515da668aa1SThomas Huth /*
516da668aa1SThomas Huth * Starts an AIO request on a BDS that runs in the AioContext of iothread 1.
517da668aa1SThomas Huth * The request involves a BH on iothread 2 before it can complete.
518da668aa1SThomas Huth *
519da668aa1SThomas Huth * @drain_thread = 0 means that do_drain_begin/end are called from the main
520da668aa1SThomas Huth * thread, @drain_thread = 1 means that they are called from iothread 1. Drain
521da668aa1SThomas Huth * for this BDS cannot be called from iothread 2 because only the main thread
522da668aa1SThomas Huth * may do cross-AioContext polling.
523da668aa1SThomas Huth */
test_iothread_common(enum drain_type drain_type,int drain_thread)524da668aa1SThomas Huth static void test_iothread_common(enum drain_type drain_type, int drain_thread)
525da668aa1SThomas Huth {
526da668aa1SThomas Huth BlockBackend *blk;
527da668aa1SThomas Huth BlockDriverState *bs;
528da668aa1SThomas Huth BDRVTestState *s;
529da668aa1SThomas Huth BlockAIOCB *acb;
530ab613350SStefan Hajnoczi Coroutine *co;
531da668aa1SThomas Huth int aio_ret;
532da668aa1SThomas Huth struct test_iothread_data data;
533da668aa1SThomas Huth
534da668aa1SThomas Huth IOThread *a = iothread_new();
535da668aa1SThomas Huth IOThread *b = iothread_new();
536da668aa1SThomas Huth AioContext *ctx_a = iothread_get_aio_context(a);
537da668aa1SThomas Huth AioContext *ctx_b = iothread_get_aio_context(b);
538da668aa1SThomas Huth
539da668aa1SThomas Huth QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
540da668aa1SThomas Huth
541da668aa1SThomas Huth /* bdrv_drain_all() may only be called from the main loop thread */
542da668aa1SThomas Huth if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
543da668aa1SThomas Huth goto out;
544da668aa1SThomas Huth }
545da668aa1SThomas Huth
546da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
547da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
548da668aa1SThomas Huth &error_abort);
549da668aa1SThomas Huth s = bs->opaque;
550da668aa1SThomas Huth blk_insert_bs(blk, bs, &error_abort);
551da668aa1SThomas Huth blk_set_disable_request_queuing(blk, true);
552da668aa1SThomas Huth
553da668aa1SThomas Huth blk_set_aio_context(blk, ctx_a, &error_abort);
554da668aa1SThomas Huth
555da668aa1SThomas Huth s->bh_indirection_ctx = ctx_b;
556da668aa1SThomas Huth
557da668aa1SThomas Huth aio_ret = -EINPROGRESS;
558da668aa1SThomas Huth qemu_event_reset(&done_event);
559da668aa1SThomas Huth
560da668aa1SThomas Huth if (drain_thread == 0) {
561da668aa1SThomas Huth acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret);
562da668aa1SThomas Huth } else {
563da668aa1SThomas Huth acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
564da668aa1SThomas Huth }
565da668aa1SThomas Huth g_assert(acb != NULL);
566da668aa1SThomas Huth g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
567da668aa1SThomas Huth
568da668aa1SThomas Huth data = (struct test_iothread_data) {
569da668aa1SThomas Huth .bs = bs,
570da668aa1SThomas Huth .drain_type = drain_type,
571da668aa1SThomas Huth .aio_ret = &aio_ret,
572da668aa1SThomas Huth };
573da668aa1SThomas Huth
574da668aa1SThomas Huth switch (drain_thread) {
575da668aa1SThomas Huth case 0:
576c8bf923dSStefan Hajnoczi /*
577c8bf923dSStefan Hajnoczi * Increment in_flight so that do_drain_begin() waits for
578c8bf923dSStefan Hajnoczi * test_iothread_main_thread_bh(). This prevents the race between
579c8bf923dSStefan Hajnoczi * test_iothread_main_thread_bh() in IOThread a and do_drain_begin() in
580c8bf923dSStefan Hajnoczi * this thread. test_iothread_main_thread_bh() decrements in_flight.
581c8bf923dSStefan Hajnoczi */
582c8bf923dSStefan Hajnoczi bdrv_inc_in_flight(bs);
583da668aa1SThomas Huth aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
584da668aa1SThomas Huth
585da668aa1SThomas Huth /* The request is running on the IOThread a. Draining its block device
586da668aa1SThomas Huth * will make sure that it has completed as far as the BDS is concerned,
587da668aa1SThomas Huth * but the drain in this thread can continue immediately after
588da668aa1SThomas Huth * bdrv_dec_in_flight() and aio_ret might be assigned only slightly
589da668aa1SThomas Huth * later. */
590da668aa1SThomas Huth do_drain_begin(drain_type, bs);
591da668aa1SThomas Huth g_assert_cmpint(bs->in_flight, ==, 0);
592da668aa1SThomas Huth
593da668aa1SThomas Huth qemu_event_wait(&done_event);
594da668aa1SThomas Huth
595da668aa1SThomas Huth g_assert_cmpint(aio_ret, ==, 0);
596da668aa1SThomas Huth do_drain_end(drain_type, bs);
597da668aa1SThomas Huth break;
598da668aa1SThomas Huth case 1:
599ab613350SStefan Hajnoczi co = qemu_coroutine_create(test_iothread_drain_co_entry, &data);
600ab613350SStefan Hajnoczi aio_co_enter(ctx_a, co);
601ab613350SStefan Hajnoczi AIO_WAIT_WHILE_UNLOCKED(NULL, !data.co_done);
602da668aa1SThomas Huth break;
603da668aa1SThomas Huth default:
604da668aa1SThomas Huth g_assert_not_reached();
605da668aa1SThomas Huth }
606da668aa1SThomas Huth
607da668aa1SThomas Huth blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
608da668aa1SThomas Huth
609da668aa1SThomas Huth bdrv_unref(bs);
610da668aa1SThomas Huth blk_unref(blk);
611da668aa1SThomas Huth
612da668aa1SThomas Huth out:
613da668aa1SThomas Huth iothread_join(a);
614da668aa1SThomas Huth iothread_join(b);
615da668aa1SThomas Huth }
616da668aa1SThomas Huth
test_iothread_drain_all(void)617da668aa1SThomas Huth static void test_iothread_drain_all(void)
618da668aa1SThomas Huth {
619da668aa1SThomas Huth test_iothread_common(BDRV_DRAIN_ALL, 0);
620da668aa1SThomas Huth test_iothread_common(BDRV_DRAIN_ALL, 1);
621da668aa1SThomas Huth }
622da668aa1SThomas Huth
test_iothread_drain(void)623da668aa1SThomas Huth static void test_iothread_drain(void)
624da668aa1SThomas Huth {
625da668aa1SThomas Huth test_iothread_common(BDRV_DRAIN, 0);
626da668aa1SThomas Huth test_iothread_common(BDRV_DRAIN, 1);
627da668aa1SThomas Huth }
628da668aa1SThomas Huth
629da668aa1SThomas Huth
630da668aa1SThomas Huth typedef struct TestBlockJob {
631da668aa1SThomas Huth BlockJob common;
6321b177bbeSVladimir Sementsov-Ogievskiy BlockDriverState *bs;
633da668aa1SThomas Huth int run_ret;
634da668aa1SThomas Huth int prepare_ret;
635da668aa1SThomas Huth bool running;
636da668aa1SThomas Huth bool should_complete;
637da668aa1SThomas Huth } TestBlockJob;
638da668aa1SThomas Huth
test_job_prepare(Job * job)639da668aa1SThomas Huth static int test_job_prepare(Job *job)
640da668aa1SThomas Huth {
641da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job);
642da668aa1SThomas Huth
643da668aa1SThomas Huth /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6441b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs);
645da668aa1SThomas Huth return s->prepare_ret;
646da668aa1SThomas Huth }
647da668aa1SThomas Huth
test_job_commit(Job * job)648da668aa1SThomas Huth static void test_job_commit(Job *job)
649da668aa1SThomas Huth {
650da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job);
651da668aa1SThomas Huth
652da668aa1SThomas Huth /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6531b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs);
654da668aa1SThomas Huth }
655da668aa1SThomas Huth
test_job_abort(Job * job)656da668aa1SThomas Huth static void test_job_abort(Job *job)
657da668aa1SThomas Huth {
658da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job);
659da668aa1SThomas Huth
660da668aa1SThomas Huth /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6611b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs);
662da668aa1SThomas Huth }
663da668aa1SThomas Huth
test_job_run(Job * job,Error ** errp)664da668aa1SThomas Huth static int coroutine_fn test_job_run(Job *job, Error **errp)
665da668aa1SThomas Huth {
666da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job);
667da668aa1SThomas Huth
668da668aa1SThomas Huth /* We are running the actual job code past the pause point in
669da668aa1SThomas Huth * job_co_entry(). */
670da668aa1SThomas Huth s->running = true;
671da668aa1SThomas Huth
672da668aa1SThomas Huth job_transition_to_ready(&s->common.job);
673da668aa1SThomas Huth while (!s->should_complete) {
674da668aa1SThomas Huth /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
675da668aa1SThomas Huth * emulate some actual activity (probably some I/O) here so that drain
676da668aa1SThomas Huth * has to wait for this activity to stop. */
677da668aa1SThomas Huth qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
678da668aa1SThomas Huth
679da668aa1SThomas Huth job_pause_point(&s->common.job);
680da668aa1SThomas Huth }
681da668aa1SThomas Huth
682da668aa1SThomas Huth return s->run_ret;
683da668aa1SThomas Huth }
684da668aa1SThomas Huth
test_job_complete(Job * job,Error ** errp)685da668aa1SThomas Huth static void test_job_complete(Job *job, Error **errp)
686da668aa1SThomas Huth {
687da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job);
688da668aa1SThomas Huth s->should_complete = true;
689da668aa1SThomas Huth }
690da668aa1SThomas Huth
691da668aa1SThomas Huth BlockJobDriver test_job_driver = {
692da668aa1SThomas Huth .job_driver = {
693da668aa1SThomas Huth .instance_size = sizeof(TestBlockJob),
694da668aa1SThomas Huth .free = block_job_free,
695da668aa1SThomas Huth .user_resume = block_job_user_resume,
696da668aa1SThomas Huth .run = test_job_run,
697da668aa1SThomas Huth .complete = test_job_complete,
698da668aa1SThomas Huth .prepare = test_job_prepare,
699da668aa1SThomas Huth .commit = test_job_commit,
700da668aa1SThomas Huth .abort = test_job_abort,
701da668aa1SThomas Huth },
702da668aa1SThomas Huth };
703da668aa1SThomas Huth
704da668aa1SThomas Huth enum test_job_result {
705da668aa1SThomas Huth TEST_JOB_SUCCESS,
706da668aa1SThomas Huth TEST_JOB_FAIL_RUN,
707da668aa1SThomas Huth TEST_JOB_FAIL_PREPARE,
708da668aa1SThomas Huth };
709da668aa1SThomas Huth
710da668aa1SThomas Huth enum test_job_drain_node {
711da668aa1SThomas Huth TEST_JOB_DRAIN_SRC,
712da668aa1SThomas Huth TEST_JOB_DRAIN_SRC_CHILD,
713da668aa1SThomas Huth };
714da668aa1SThomas Huth
test_blockjob_common_drain_node(enum drain_type drain_type,bool use_iothread,enum test_job_result result,enum test_job_drain_node drain_node)715da668aa1SThomas Huth static void test_blockjob_common_drain_node(enum drain_type drain_type,
716da668aa1SThomas Huth bool use_iothread,
717da668aa1SThomas Huth enum test_job_result result,
718da668aa1SThomas Huth enum test_job_drain_node drain_node)
719da668aa1SThomas Huth {
720da668aa1SThomas Huth BlockBackend *blk_src, *blk_target;
721da668aa1SThomas Huth BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
722da668aa1SThomas Huth BlockJob *job;
723da668aa1SThomas Huth TestBlockJob *tjob;
724da668aa1SThomas Huth IOThread *iothread = NULL;
725*4770030bSMarc-André Lureau int ret = -1;
726da668aa1SThomas Huth
727da668aa1SThomas Huth src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
728da668aa1SThomas Huth &error_abort);
729da668aa1SThomas Huth src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
730da668aa1SThomas Huth BDRV_O_RDWR, &error_abort);
731da668aa1SThomas Huth src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
732da668aa1SThomas Huth BDRV_O_RDWR, &error_abort);
733da668aa1SThomas Huth
734da668aa1SThomas Huth bdrv_set_backing_hd(src_overlay, src, &error_abort);
735da668aa1SThomas Huth bdrv_unref(src);
736da668aa1SThomas Huth bdrv_set_backing_hd(src, src_backing, &error_abort);
737da668aa1SThomas Huth bdrv_unref(src_backing);
738da668aa1SThomas Huth
739da668aa1SThomas Huth blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
740da668aa1SThomas Huth blk_insert_bs(blk_src, src_overlay, &error_abort);
741da668aa1SThomas Huth
742da668aa1SThomas Huth switch (drain_node) {
743da668aa1SThomas Huth case TEST_JOB_DRAIN_SRC:
744da668aa1SThomas Huth drain_bs = src;
745da668aa1SThomas Huth break;
746da668aa1SThomas Huth case TEST_JOB_DRAIN_SRC_CHILD:
747da668aa1SThomas Huth drain_bs = src_backing;
748da668aa1SThomas Huth break;
749da668aa1SThomas Huth default:
750da668aa1SThomas Huth g_assert_not_reached();
751da668aa1SThomas Huth }
752da668aa1SThomas Huth
753da668aa1SThomas Huth if (use_iothread) {
754b49f4755SStefan Hajnoczi AioContext *ctx;
755b49f4755SStefan Hajnoczi
756da668aa1SThomas Huth iothread = iothread_new();
757da668aa1SThomas Huth ctx = iothread_get_aio_context(iothread);
758da668aa1SThomas Huth blk_set_aio_context(blk_src, ctx, &error_abort);
759da668aa1SThomas Huth }
760da668aa1SThomas Huth
761da668aa1SThomas Huth target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
762da668aa1SThomas Huth &error_abort);
763da668aa1SThomas Huth blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
764da668aa1SThomas Huth blk_insert_bs(blk_target, target, &error_abort);
765da668aa1SThomas Huth blk_set_allow_aio_context_change(blk_target, true);
766da668aa1SThomas Huth
767da668aa1SThomas Huth tjob = block_job_create("job0", &test_job_driver, NULL, src,
768da668aa1SThomas Huth 0, BLK_PERM_ALL,
769da668aa1SThomas Huth 0, 0, NULL, NULL, &error_abort);
7701b177bbeSVladimir Sementsov-Ogievskiy tjob->bs = src;
771da668aa1SThomas Huth job = &tjob->common;
772f3bbc53dSKevin Wolf
7736bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
774da668aa1SThomas Huth block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
7756bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
776da668aa1SThomas Huth
777da668aa1SThomas Huth switch (result) {
778da668aa1SThomas Huth case TEST_JOB_SUCCESS:
779da668aa1SThomas Huth break;
780da668aa1SThomas Huth case TEST_JOB_FAIL_RUN:
781da668aa1SThomas Huth tjob->run_ret = -EIO;
782da668aa1SThomas Huth break;
783da668aa1SThomas Huth case TEST_JOB_FAIL_PREPARE:
784da668aa1SThomas Huth tjob->prepare_ret = -EIO;
785da668aa1SThomas Huth break;
786da668aa1SThomas Huth }
787da668aa1SThomas Huth
788da668aa1SThomas Huth job_start(&job->job);
789da668aa1SThomas Huth
790da668aa1SThomas Huth if (use_iothread) {
791da668aa1SThomas Huth /* job_co_entry() is run in the I/O thread, wait for the actual job
792da668aa1SThomas Huth * code to start (we don't want to catch the job in the pause point in
793da668aa1SThomas Huth * job_co_entry(). */
794da668aa1SThomas Huth while (!tjob->running) {
795da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), false);
796da668aa1SThomas Huth }
797da668aa1SThomas Huth }
798da668aa1SThomas Huth
799191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() {
800da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 0);
801da668aa1SThomas Huth g_assert_false(job->job.paused);
802da668aa1SThomas Huth g_assert_true(tjob->running);
803da668aa1SThomas Huth g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
804191e7af3SEmanuele Giuseppe Esposito }
805da668aa1SThomas Huth
806da668aa1SThomas Huth do_drain_begin_unlocked(drain_type, drain_bs);
807da668aa1SThomas Huth
808191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() {
809da668aa1SThomas Huth if (drain_type == BDRV_DRAIN_ALL) {
810da668aa1SThomas Huth /* bdrv_drain_all() drains both src and target */
811da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 2);
812da668aa1SThomas Huth } else {
813da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 1);
814da668aa1SThomas Huth }
815da668aa1SThomas Huth g_assert_true(job->job.paused);
816da668aa1SThomas Huth g_assert_false(job->job.busy); /* The job is paused */
817191e7af3SEmanuele Giuseppe Esposito }
818da668aa1SThomas Huth
819da668aa1SThomas Huth do_drain_end_unlocked(drain_type, drain_bs);
820da668aa1SThomas Huth
821da668aa1SThomas Huth if (use_iothread) {
822191e7af3SEmanuele Giuseppe Esposito /*
823191e7af3SEmanuele Giuseppe Esposito * Here we are waiting for the paused status to change,
824191e7af3SEmanuele Giuseppe Esposito * so don't bother protecting the read every time.
825191e7af3SEmanuele Giuseppe Esposito *
826191e7af3SEmanuele Giuseppe Esposito * paused is reset in the I/O thread, wait for it
827191e7af3SEmanuele Giuseppe Esposito */
828da668aa1SThomas Huth while (job->job.paused) {
829da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), false);
830da668aa1SThomas Huth }
831da668aa1SThomas Huth }
832da668aa1SThomas Huth
833191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() {
834da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 0);
835da668aa1SThomas Huth g_assert_false(job->job.paused);
836da668aa1SThomas Huth g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
837191e7af3SEmanuele Giuseppe Esposito }
838da668aa1SThomas Huth
839da668aa1SThomas Huth do_drain_begin_unlocked(drain_type, target);
840da668aa1SThomas Huth
841191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() {
842da668aa1SThomas Huth if (drain_type == BDRV_DRAIN_ALL) {
843da668aa1SThomas Huth /* bdrv_drain_all() drains both src and target */
844da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 2);
845da668aa1SThomas Huth } else {
846da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 1);
847da668aa1SThomas Huth }
848da668aa1SThomas Huth g_assert_true(job->job.paused);
849da668aa1SThomas Huth g_assert_false(job->job.busy); /* The job is paused */
850191e7af3SEmanuele Giuseppe Esposito }
851da668aa1SThomas Huth
852da668aa1SThomas Huth do_drain_end_unlocked(drain_type, target);
853da668aa1SThomas Huth
854da668aa1SThomas Huth if (use_iothread) {
855191e7af3SEmanuele Giuseppe Esposito /*
856191e7af3SEmanuele Giuseppe Esposito * Here we are waiting for the paused status to change,
857191e7af3SEmanuele Giuseppe Esposito * so don't bother protecting the read every time.
858191e7af3SEmanuele Giuseppe Esposito *
859191e7af3SEmanuele Giuseppe Esposito * paused is reset in the I/O thread, wait for it
860191e7af3SEmanuele Giuseppe Esposito */
861da668aa1SThomas Huth while (job->job.paused) {
862da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), false);
863da668aa1SThomas Huth }
864da668aa1SThomas Huth }
865da668aa1SThomas Huth
866191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() {
867da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 0);
868da668aa1SThomas Huth g_assert_false(job->job.paused);
869da668aa1SThomas Huth g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
870191e7af3SEmanuele Giuseppe Esposito }
871da668aa1SThomas Huth
872191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() {
873191e7af3SEmanuele Giuseppe Esposito ret = job_complete_sync_locked(&job->job, &error_abort);
874191e7af3SEmanuele Giuseppe Esposito }
875da668aa1SThomas Huth g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
876da668aa1SThomas Huth
877da668aa1SThomas Huth if (use_iothread) {
878da668aa1SThomas Huth blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
879da668aa1SThomas Huth assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
880da668aa1SThomas Huth }
881da668aa1SThomas Huth
882da668aa1SThomas Huth blk_unref(blk_src);
883da668aa1SThomas Huth blk_unref(blk_target);
884da668aa1SThomas Huth bdrv_unref(src_overlay);
885da668aa1SThomas Huth bdrv_unref(target);
886da668aa1SThomas Huth
887da668aa1SThomas Huth if (iothread) {
888da668aa1SThomas Huth iothread_join(iothread);
889da668aa1SThomas Huth }
890da668aa1SThomas Huth }
891da668aa1SThomas Huth
test_blockjob_common(enum drain_type drain_type,bool use_iothread,enum test_job_result result)892da668aa1SThomas Huth static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
893da668aa1SThomas Huth enum test_job_result result)
894da668aa1SThomas Huth {
895da668aa1SThomas Huth test_blockjob_common_drain_node(drain_type, use_iothread, result,
896da668aa1SThomas Huth TEST_JOB_DRAIN_SRC);
897da668aa1SThomas Huth test_blockjob_common_drain_node(drain_type, use_iothread, result,
898da668aa1SThomas Huth TEST_JOB_DRAIN_SRC_CHILD);
899da668aa1SThomas Huth }
900da668aa1SThomas Huth
test_blockjob_drain_all(void)901da668aa1SThomas Huth static void test_blockjob_drain_all(void)
902da668aa1SThomas Huth {
903da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
904da668aa1SThomas Huth }
905da668aa1SThomas Huth
test_blockjob_drain(void)906da668aa1SThomas Huth static void test_blockjob_drain(void)
907da668aa1SThomas Huth {
908da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
909da668aa1SThomas Huth }
910da668aa1SThomas Huth
test_blockjob_error_drain_all(void)911da668aa1SThomas Huth static void test_blockjob_error_drain_all(void)
912da668aa1SThomas Huth {
913da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
914da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
915da668aa1SThomas Huth }
916da668aa1SThomas Huth
test_blockjob_error_drain(void)917da668aa1SThomas Huth static void test_blockjob_error_drain(void)
918da668aa1SThomas Huth {
919da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
920da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
921da668aa1SThomas Huth }
922da668aa1SThomas Huth
test_blockjob_iothread_drain_all(void)923da668aa1SThomas Huth static void test_blockjob_iothread_drain_all(void)
924da668aa1SThomas Huth {
925da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
926da668aa1SThomas Huth }
927da668aa1SThomas Huth
test_blockjob_iothread_drain(void)928da668aa1SThomas Huth static void test_blockjob_iothread_drain(void)
929da668aa1SThomas Huth {
930da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
931da668aa1SThomas Huth }
932da668aa1SThomas Huth
test_blockjob_iothread_error_drain_all(void)933da668aa1SThomas Huth static void test_blockjob_iothread_error_drain_all(void)
934da668aa1SThomas Huth {
935da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
936da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
937da668aa1SThomas Huth }
938da668aa1SThomas Huth
test_blockjob_iothread_error_drain(void)939da668aa1SThomas Huth static void test_blockjob_iothread_error_drain(void)
940da668aa1SThomas Huth {
941da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
942da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
943da668aa1SThomas Huth }
944da668aa1SThomas Huth
945da668aa1SThomas Huth
946da668aa1SThomas Huth typedef struct BDRVTestTopState {
947da668aa1SThomas Huth BdrvChild *wait_child;
948da668aa1SThomas Huth } BDRVTestTopState;
949da668aa1SThomas Huth
bdrv_test_top_close(BlockDriverState * bs)950da668aa1SThomas Huth static void bdrv_test_top_close(BlockDriverState *bs)
951da668aa1SThomas Huth {
952da668aa1SThomas Huth BdrvChild *c, *next_c;
95332a8aba3SKevin Wolf
9546bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
955da668aa1SThomas Huth QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
956da668aa1SThomas Huth bdrv_unref_child(bs, c);
957da668aa1SThomas Huth }
9586bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
959da668aa1SThomas Huth }
960da668aa1SThomas Huth
961b9b10c35SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_test_top_co_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)962b9b10c35SKevin Wolf bdrv_test_top_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
963b9b10c35SKevin Wolf QEMUIOVector *qiov, BdrvRequestFlags flags)
964da668aa1SThomas Huth {
965da668aa1SThomas Huth BDRVTestTopState *tts = bs->opaque;
966da668aa1SThomas Huth return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags);
967da668aa1SThomas Huth }
968da668aa1SThomas Huth
969da668aa1SThomas Huth static BlockDriver bdrv_test_top_driver = {
970da668aa1SThomas Huth .format_name = "test_top_driver",
971da668aa1SThomas Huth .instance_size = sizeof(BDRVTestTopState),
972da668aa1SThomas Huth
973da668aa1SThomas Huth .bdrv_close = bdrv_test_top_close,
974da668aa1SThomas Huth .bdrv_co_preadv = bdrv_test_top_co_preadv,
975da668aa1SThomas Huth
976da668aa1SThomas Huth .bdrv_child_perm = bdrv_default_perms,
977da668aa1SThomas Huth };
978da668aa1SThomas Huth
979da668aa1SThomas Huth typedef struct TestCoDeleteByDrainData {
980da668aa1SThomas Huth BlockBackend *blk;
981da668aa1SThomas Huth bool detach_instead_of_delete;
982da668aa1SThomas Huth bool done;
983da668aa1SThomas Huth } TestCoDeleteByDrainData;
984da668aa1SThomas Huth
test_co_delete_by_drain(void * opaque)985da668aa1SThomas Huth static void coroutine_fn test_co_delete_by_drain(void *opaque)
986da668aa1SThomas Huth {
987da668aa1SThomas Huth TestCoDeleteByDrainData *dbdd = opaque;
988da668aa1SThomas Huth BlockBackend *blk = dbdd->blk;
989da668aa1SThomas Huth BlockDriverState *bs = blk_bs(blk);
990da668aa1SThomas Huth BDRVTestTopState *tts = bs->opaque;
991da668aa1SThomas Huth void *buffer = g_malloc(65536);
992da668aa1SThomas Huth QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
993da668aa1SThomas Huth
994da668aa1SThomas Huth /* Pretend some internal write operation from parent to child.
995da668aa1SThomas Huth * Important: We have to read from the child, not from the parent!
996da668aa1SThomas Huth * Draining works by first propagating it all up the tree to the
997da668aa1SThomas Huth * root and then waiting for drainage from root to the leaves
998da668aa1SThomas Huth * (protocol nodes). If we have a request waiting on the root,
999da668aa1SThomas Huth * everything will be drained before we go back down the tree, but
1000da668aa1SThomas Huth * we do not want that. We want to be in the middle of draining
1001da668aa1SThomas Huth * when this following requests returns. */
100287f130bdSKevin Wolf bdrv_graph_co_rdlock();
1003da668aa1SThomas Huth bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0);
100487f130bdSKevin Wolf bdrv_graph_co_rdunlock();
1005da668aa1SThomas Huth
1006da668aa1SThomas Huth g_assert_cmpint(bs->refcnt, ==, 1);
1007da668aa1SThomas Huth
1008da668aa1SThomas Huth if (!dbdd->detach_instead_of_delete) {
100901a10c24SKevin Wolf blk_co_unref(blk);
1010da668aa1SThomas Huth } else {
1011da668aa1SThomas Huth BdrvChild *c, *next_c;
1012680e0cc4SKevin Wolf bdrv_graph_co_rdlock();
1013da668aa1SThomas Huth QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1014680e0cc4SKevin Wolf bdrv_graph_co_rdunlock();
101532a8aba3SKevin Wolf bdrv_co_unref_child(bs, c);
1016680e0cc4SKevin Wolf bdrv_graph_co_rdlock();
1017da668aa1SThomas Huth }
1018680e0cc4SKevin Wolf bdrv_graph_co_rdunlock();
1019da668aa1SThomas Huth }
1020da668aa1SThomas Huth
1021da668aa1SThomas Huth dbdd->done = true;
1022da668aa1SThomas Huth g_free(buffer);
1023da668aa1SThomas Huth }
1024da668aa1SThomas Huth
1025da668aa1SThomas Huth /**
1026da668aa1SThomas Huth * Test what happens when some BDS has some children, you drain one of
1027da668aa1SThomas Huth * them and this results in the BDS being deleted.
1028da668aa1SThomas Huth *
1029da668aa1SThomas Huth * If @detach_instead_of_delete is set, the BDS is not going to be
1030da668aa1SThomas Huth * deleted but will only detach all of its children.
1031da668aa1SThomas Huth */
do_test_delete_by_drain(bool detach_instead_of_delete,enum drain_type drain_type)1032da668aa1SThomas Huth static void do_test_delete_by_drain(bool detach_instead_of_delete,
1033da668aa1SThomas Huth enum drain_type drain_type)
1034da668aa1SThomas Huth {
1035da668aa1SThomas Huth BlockBackend *blk;
1036da668aa1SThomas Huth BlockDriverState *bs, *child_bs, *null_bs;
1037da668aa1SThomas Huth BDRVTestTopState *tts;
1038da668aa1SThomas Huth TestCoDeleteByDrainData dbdd;
1039da668aa1SThomas Huth Coroutine *co;
1040da668aa1SThomas Huth
1041da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR,
1042da668aa1SThomas Huth &error_abort);
1043da668aa1SThomas Huth bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1044da668aa1SThomas Huth tts = bs->opaque;
1045da668aa1SThomas Huth
1046da668aa1SThomas Huth null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1047da668aa1SThomas Huth &error_abort);
10486bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
1049da668aa1SThomas Huth bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
1050da668aa1SThomas Huth BDRV_CHILD_DATA, &error_abort);
10516bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
1052da668aa1SThomas Huth
1053da668aa1SThomas Huth /* This child will be the one to pass to requests through to, and
1054da668aa1SThomas Huth * it will stall until a drain occurs */
1055da668aa1SThomas Huth child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR,
1056da668aa1SThomas Huth &error_abort);
1057da668aa1SThomas Huth child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1058da668aa1SThomas Huth /* Takes our reference to child_bs */
10596bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
1060da668aa1SThomas Huth tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
1061da668aa1SThomas Huth &child_of_bds,
1062da668aa1SThomas Huth BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
1063da668aa1SThomas Huth &error_abort);
10646bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
1065da668aa1SThomas Huth
1066da668aa1SThomas Huth /* This child is just there to be deleted
1067da668aa1SThomas Huth * (for detach_instead_of_delete == true) */
1068da668aa1SThomas Huth null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1069da668aa1SThomas Huth &error_abort);
10706bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
1071da668aa1SThomas Huth bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
1072da668aa1SThomas Huth &error_abort);
10736bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
1074da668aa1SThomas Huth
1075da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1076da668aa1SThomas Huth blk_insert_bs(blk, bs, &error_abort);
1077da668aa1SThomas Huth
1078da668aa1SThomas Huth /* Referenced by blk now */
1079da668aa1SThomas Huth bdrv_unref(bs);
1080da668aa1SThomas Huth
1081da668aa1SThomas Huth g_assert_cmpint(bs->refcnt, ==, 1);
1082da668aa1SThomas Huth g_assert_cmpint(child_bs->refcnt, ==, 1);
1083da668aa1SThomas Huth g_assert_cmpint(null_bs->refcnt, ==, 1);
1084da668aa1SThomas Huth
1085da668aa1SThomas Huth
1086da668aa1SThomas Huth dbdd = (TestCoDeleteByDrainData){
1087da668aa1SThomas Huth .blk = blk,
1088da668aa1SThomas Huth .detach_instead_of_delete = detach_instead_of_delete,
1089da668aa1SThomas Huth .done = false,
1090da668aa1SThomas Huth };
1091da668aa1SThomas Huth co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd);
1092da668aa1SThomas Huth qemu_coroutine_enter(co);
1093da668aa1SThomas Huth
1094da668aa1SThomas Huth /* Drain the child while the read operation is still pending.
1095da668aa1SThomas Huth * This should result in the operation finishing and
1096da668aa1SThomas Huth * test_co_delete_by_drain() resuming. Thus, @bs will be deleted
1097da668aa1SThomas Huth * and the coroutine will exit while this drain operation is still
1098da668aa1SThomas Huth * in progress. */
1099da668aa1SThomas Huth switch (drain_type) {
1100da668aa1SThomas Huth case BDRV_DRAIN:
1101da668aa1SThomas Huth bdrv_ref(child_bs);
1102da668aa1SThomas Huth bdrv_drain(child_bs);
1103da668aa1SThomas Huth bdrv_unref(child_bs);
1104da668aa1SThomas Huth break;
1105da668aa1SThomas Huth case BDRV_DRAIN_ALL:
1106da668aa1SThomas Huth bdrv_drain_all_begin();
1107da668aa1SThomas Huth bdrv_drain_all_end();
1108da668aa1SThomas Huth break;
1109da668aa1SThomas Huth default:
1110da668aa1SThomas Huth g_assert_not_reached();
1111da668aa1SThomas Huth }
1112da668aa1SThomas Huth
1113da668aa1SThomas Huth while (!dbdd.done) {
1114da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), true);
1115da668aa1SThomas Huth }
1116da668aa1SThomas Huth
1117da668aa1SThomas Huth if (detach_instead_of_delete) {
1118da668aa1SThomas Huth /* Here, the reference has not passed over to the coroutine,
1119da668aa1SThomas Huth * so we have to delete the BB ourselves */
1120da668aa1SThomas Huth blk_unref(blk);
1121da668aa1SThomas Huth }
1122da668aa1SThomas Huth }
1123da668aa1SThomas Huth
test_delete_by_drain(void)1124da668aa1SThomas Huth static void test_delete_by_drain(void)
1125da668aa1SThomas Huth {
1126da668aa1SThomas Huth do_test_delete_by_drain(false, BDRV_DRAIN);
1127da668aa1SThomas Huth }
1128da668aa1SThomas Huth
test_detach_by_drain_all(void)1129da668aa1SThomas Huth static void test_detach_by_drain_all(void)
1130da668aa1SThomas Huth {
1131da668aa1SThomas Huth do_test_delete_by_drain(true, BDRV_DRAIN_ALL);
1132da668aa1SThomas Huth }
1133da668aa1SThomas Huth
test_detach_by_drain(void)1134da668aa1SThomas Huth static void test_detach_by_drain(void)
1135da668aa1SThomas Huth {
1136da668aa1SThomas Huth do_test_delete_by_drain(true, BDRV_DRAIN);
1137da668aa1SThomas Huth }
1138da668aa1SThomas Huth
1139da668aa1SThomas Huth
1140da668aa1SThomas Huth struct detach_by_parent_data {
1141da668aa1SThomas Huth BlockDriverState *parent_b;
1142da668aa1SThomas Huth BdrvChild *child_b;
1143da668aa1SThomas Huth BlockDriverState *c;
1144da668aa1SThomas Huth BdrvChild *child_c;
1145da668aa1SThomas Huth bool by_parent_cb;
1146617f3a96SKevin Wolf bool detach_on_drain;
1147da668aa1SThomas Huth };
1148da668aa1SThomas Huth static struct detach_by_parent_data detach_by_parent_data;
1149da668aa1SThomas Huth
detach_indirect_bh(void * opaque)1150903df115SKevin Wolf static void no_coroutine_fn detach_indirect_bh(void *opaque)
1151da668aa1SThomas Huth {
1152da668aa1SThomas Huth struct detach_by_parent_data *data = opaque;
1153da668aa1SThomas Huth
1154617f3a96SKevin Wolf bdrv_dec_in_flight(data->child_b->bs);
115532a8aba3SKevin Wolf
11566bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
1157da668aa1SThomas Huth bdrv_unref_child(data->parent_b, data->child_b);
1158da668aa1SThomas Huth
1159da668aa1SThomas Huth bdrv_ref(data->c);
1160da668aa1SThomas Huth data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
1161da668aa1SThomas Huth &child_of_bds, BDRV_CHILD_DATA,
1162da668aa1SThomas Huth &error_abort);
11636bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
1164da668aa1SThomas Huth }
1165da668aa1SThomas Huth
detach_by_parent_aio_cb(void * opaque,int ret)1166903df115SKevin Wolf static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
1167da668aa1SThomas Huth {
1168da668aa1SThomas Huth struct detach_by_parent_data *data = &detach_by_parent_data;
1169da668aa1SThomas Huth
1170da668aa1SThomas Huth g_assert_cmpint(ret, ==, 0);
1171da668aa1SThomas Huth if (data->by_parent_cb) {
1172617f3a96SKevin Wolf bdrv_inc_in_flight(data->child_b->bs);
1173903df115SKevin Wolf aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1174903df115SKevin Wolf detach_indirect_bh, &detach_by_parent_data);
1175da668aa1SThomas Huth }
1176da668aa1SThomas Huth }
1177da668aa1SThomas Huth
detach_by_driver_cb_drained_begin(BdrvChild * child)1178d05ab380SEmanuele Giuseppe Esposito static void GRAPH_RDLOCK detach_by_driver_cb_drained_begin(BdrvChild *child)
1179da668aa1SThomas Huth {
1180617f3a96SKevin Wolf struct detach_by_parent_data *data = &detach_by_parent_data;
1181617f3a96SKevin Wolf
1182617f3a96SKevin Wolf if (!data->detach_on_drain) {
1183617f3a96SKevin Wolf return;
1184617f3a96SKevin Wolf }
1185617f3a96SKevin Wolf data->detach_on_drain = false;
1186617f3a96SKevin Wolf
1187617f3a96SKevin Wolf bdrv_inc_in_flight(data->child_b->bs);
1188da668aa1SThomas Huth aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1189da668aa1SThomas Huth detach_indirect_bh, &detach_by_parent_data);
1190da668aa1SThomas Huth child_of_bds.drained_begin(child);
1191da668aa1SThomas Huth }
1192da668aa1SThomas Huth
1193da668aa1SThomas Huth static BdrvChildClass detach_by_driver_cb_class;
1194da668aa1SThomas Huth
1195da668aa1SThomas Huth /*
1196da668aa1SThomas Huth * Initial graph:
1197da668aa1SThomas Huth *
1198da668aa1SThomas Huth * PA PB
1199da668aa1SThomas Huth * \ / \
1200da668aa1SThomas Huth * A B C
1201da668aa1SThomas Huth *
1202da668aa1SThomas Huth * by_parent_cb == true: Test that parent callbacks don't poll
1203da668aa1SThomas Huth *
1204da668aa1SThomas Huth * PA has a pending write request whose callback changes the child nodes of
1205da668aa1SThomas Huth * PB: It removes B and adds C instead. The subtree of PB is drained, which
1206da668aa1SThomas Huth * will indirectly drain the write request, too.
1207da668aa1SThomas Huth *
1208da668aa1SThomas Huth * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll
1209da668aa1SThomas Huth *
1210da668aa1SThomas Huth * PA's BdrvChildClass has a .drained_begin callback that schedules a BH
1211da668aa1SThomas Huth * that does the same graph change. If bdrv_drain_invoke() calls it, the
1212da668aa1SThomas Huth * state is messed up, but if it is only polled in the single
1213da668aa1SThomas Huth * BDRV_POLL_WHILE() at the end of the drain, this should work fine.
1214da668aa1SThomas Huth */
test_detach_indirect(bool by_parent_cb)1215d05ab380SEmanuele Giuseppe Esposito static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
1216da668aa1SThomas Huth {
1217da668aa1SThomas Huth BlockBackend *blk;
1218da668aa1SThomas Huth BlockDriverState *parent_a, *parent_b, *a, *b, *c;
1219da668aa1SThomas Huth BdrvChild *child_a, *child_b;
1220da668aa1SThomas Huth BlockAIOCB *acb;
1221da668aa1SThomas Huth
1222da668aa1SThomas Huth QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
1223da668aa1SThomas Huth
1224da668aa1SThomas Huth if (!by_parent_cb) {
1225da668aa1SThomas Huth detach_by_driver_cb_class = child_of_bds;
1226da668aa1SThomas Huth detach_by_driver_cb_class.drained_begin =
1227da668aa1SThomas Huth detach_by_driver_cb_drained_begin;
1228617f3a96SKevin Wolf detach_by_driver_cb_class.drained_end = NULL;
1229617f3a96SKevin Wolf detach_by_driver_cb_class.drained_poll = NULL;
1230da668aa1SThomas Huth }
1231da668aa1SThomas Huth
1232617f3a96SKevin Wolf detach_by_parent_data = (struct detach_by_parent_data) {
1233617f3a96SKevin Wolf .detach_on_drain = false,
1234617f3a96SKevin Wolf };
1235617f3a96SKevin Wolf
1236da668aa1SThomas Huth /* Create all involved nodes */
1237da668aa1SThomas Huth parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR,
1238da668aa1SThomas Huth &error_abort);
1239da668aa1SThomas Huth parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0,
1240da668aa1SThomas Huth &error_abort);
1241da668aa1SThomas Huth
1242da668aa1SThomas Huth a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort);
1243da668aa1SThomas Huth b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort);
1244da668aa1SThomas Huth c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort);
1245da668aa1SThomas Huth
1246da668aa1SThomas Huth /* blk is a BB for parent-a */
1247da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1248da668aa1SThomas Huth blk_insert_bs(blk, parent_a, &error_abort);
1249da668aa1SThomas Huth bdrv_unref(parent_a);
1250da668aa1SThomas Huth
1251da668aa1SThomas Huth /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver
1252da668aa1SThomas Huth * callback must not return immediately. */
1253da668aa1SThomas Huth if (!by_parent_cb) {
1254da668aa1SThomas Huth BDRVTestState *s = parent_a->opaque;
1255da668aa1SThomas Huth s->sleep_in_drain_begin = true;
1256da668aa1SThomas Huth }
1257da668aa1SThomas Huth
1258da668aa1SThomas Huth /* Set child relationships */
1259da668aa1SThomas Huth bdrv_ref(b);
1260da668aa1SThomas Huth bdrv_ref(a);
12616bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
1262da668aa1SThomas Huth child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
1263da668aa1SThomas Huth BDRV_CHILD_DATA, &error_abort);
1264da668aa1SThomas Huth child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
1265da668aa1SThomas Huth BDRV_CHILD_COW, &error_abort);
1266da668aa1SThomas Huth
1267da668aa1SThomas Huth bdrv_ref(a);
1268da668aa1SThomas Huth bdrv_attach_child(parent_a, a, "PA-A",
1269da668aa1SThomas Huth by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
1270da668aa1SThomas Huth BDRV_CHILD_DATA, &error_abort);
12716bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
1272da668aa1SThomas Huth
1273da668aa1SThomas Huth g_assert_cmpint(parent_a->refcnt, ==, 1);
1274da668aa1SThomas Huth g_assert_cmpint(parent_b->refcnt, ==, 1);
1275da668aa1SThomas Huth g_assert_cmpint(a->refcnt, ==, 3);
1276da668aa1SThomas Huth g_assert_cmpint(b->refcnt, ==, 2);
1277da668aa1SThomas Huth g_assert_cmpint(c->refcnt, ==, 1);
1278da668aa1SThomas Huth
1279da668aa1SThomas Huth g_assert(QLIST_FIRST(&parent_b->children) == child_a);
1280da668aa1SThomas Huth g_assert(QLIST_NEXT(child_a, next) == child_b);
1281da668aa1SThomas Huth g_assert(QLIST_NEXT(child_b, next) == NULL);
1282da668aa1SThomas Huth
1283da668aa1SThomas Huth /* Start the evil write request */
1284da668aa1SThomas Huth detach_by_parent_data = (struct detach_by_parent_data) {
1285da668aa1SThomas Huth .parent_b = parent_b,
1286da668aa1SThomas Huth .child_b = child_b,
1287da668aa1SThomas Huth .c = c,
1288da668aa1SThomas Huth .by_parent_cb = by_parent_cb,
1289617f3a96SKevin Wolf .detach_on_drain = true,
1290da668aa1SThomas Huth };
1291da668aa1SThomas Huth acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL);
1292da668aa1SThomas Huth g_assert(acb != NULL);
1293da668aa1SThomas Huth
1294da668aa1SThomas Huth /* Drain and check the expected result */
1295299403aeSKevin Wolf bdrv_drained_begin(parent_b);
1296299403aeSKevin Wolf bdrv_drained_begin(a);
1297299403aeSKevin Wolf bdrv_drained_begin(b);
1298299403aeSKevin Wolf bdrv_drained_begin(c);
1299da668aa1SThomas Huth
1300da668aa1SThomas Huth g_assert(detach_by_parent_data.child_c != NULL);
1301da668aa1SThomas Huth
1302da668aa1SThomas Huth g_assert_cmpint(parent_a->refcnt, ==, 1);
1303da668aa1SThomas Huth g_assert_cmpint(parent_b->refcnt, ==, 1);
1304da668aa1SThomas Huth g_assert_cmpint(a->refcnt, ==, 3);
1305da668aa1SThomas Huth g_assert_cmpint(b->refcnt, ==, 1);
1306da668aa1SThomas Huth g_assert_cmpint(c->refcnt, ==, 2);
1307da668aa1SThomas Huth
1308da668aa1SThomas Huth g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c);
1309da668aa1SThomas Huth g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a);
1310da668aa1SThomas Huth g_assert(QLIST_NEXT(child_a, next) == NULL);
1311da668aa1SThomas Huth
1312da668aa1SThomas Huth g_assert_cmpint(parent_a->quiesce_counter, ==, 1);
1313299403aeSKevin Wolf g_assert_cmpint(parent_b->quiesce_counter, ==, 3);
1314da668aa1SThomas Huth g_assert_cmpint(a->quiesce_counter, ==, 1);
1315299403aeSKevin Wolf g_assert_cmpint(b->quiesce_counter, ==, 1);
1316da668aa1SThomas Huth g_assert_cmpint(c->quiesce_counter, ==, 1);
1317da668aa1SThomas Huth
1318299403aeSKevin Wolf bdrv_drained_end(parent_b);
1319299403aeSKevin Wolf bdrv_drained_end(a);
1320299403aeSKevin Wolf bdrv_drained_end(b);
1321299403aeSKevin Wolf bdrv_drained_end(c);
1322da668aa1SThomas Huth
1323da668aa1SThomas Huth bdrv_unref(parent_b);
1324da668aa1SThomas Huth blk_unref(blk);
1325da668aa1SThomas Huth
1326da668aa1SThomas Huth g_assert_cmpint(a->refcnt, ==, 1);
1327da668aa1SThomas Huth g_assert_cmpint(b->refcnt, ==, 1);
1328da668aa1SThomas Huth g_assert_cmpint(c->refcnt, ==, 1);
1329da668aa1SThomas Huth bdrv_unref(a);
1330da668aa1SThomas Huth bdrv_unref(b);
1331da668aa1SThomas Huth bdrv_unref(c);
1332da668aa1SThomas Huth }
1333da668aa1SThomas Huth
test_detach_by_parent_cb(void)1334da668aa1SThomas Huth static void test_detach_by_parent_cb(void)
1335da668aa1SThomas Huth {
1336da668aa1SThomas Huth test_detach_indirect(true);
1337da668aa1SThomas Huth }
1338da668aa1SThomas Huth
test_detach_by_driver_cb(void)1339da668aa1SThomas Huth static void test_detach_by_driver_cb(void)
1340da668aa1SThomas Huth {
1341da668aa1SThomas Huth test_detach_indirect(false);
1342da668aa1SThomas Huth }
1343da668aa1SThomas Huth
test_append_to_drained(void)1344da668aa1SThomas Huth static void test_append_to_drained(void)
1345da668aa1SThomas Huth {
1346da668aa1SThomas Huth BlockBackend *blk;
1347da668aa1SThomas Huth BlockDriverState *base, *overlay;
1348da668aa1SThomas Huth BDRVTestState *base_s, *overlay_s;
1349da668aa1SThomas Huth
1350da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1351da668aa1SThomas Huth base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
1352da668aa1SThomas Huth base_s = base->opaque;
1353da668aa1SThomas Huth blk_insert_bs(blk, base, &error_abort);
1354da668aa1SThomas Huth
1355da668aa1SThomas Huth overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR,
1356da668aa1SThomas Huth &error_abort);
1357da668aa1SThomas Huth overlay_s = overlay->opaque;
1358da668aa1SThomas Huth
1359da668aa1SThomas Huth do_drain_begin(BDRV_DRAIN, base);
1360da668aa1SThomas Huth g_assert_cmpint(base->quiesce_counter, ==, 1);
1361da668aa1SThomas Huth g_assert_cmpint(base_s->drain_count, ==, 1);
1362da668aa1SThomas Huth g_assert_cmpint(base->in_flight, ==, 0);
1363da668aa1SThomas Huth
1364da668aa1SThomas Huth bdrv_append(overlay, base, &error_abort);
1365487b9187SKevin Wolf
1366da668aa1SThomas Huth g_assert_cmpint(base->in_flight, ==, 0);
1367da668aa1SThomas Huth g_assert_cmpint(overlay->in_flight, ==, 0);
1368da668aa1SThomas Huth
1369da668aa1SThomas Huth g_assert_cmpint(base->quiesce_counter, ==, 1);
1370da668aa1SThomas Huth g_assert_cmpint(base_s->drain_count, ==, 1);
1371da668aa1SThomas Huth g_assert_cmpint(overlay->quiesce_counter, ==, 1);
1372da668aa1SThomas Huth g_assert_cmpint(overlay_s->drain_count, ==, 1);
1373da668aa1SThomas Huth
1374da668aa1SThomas Huth do_drain_end(BDRV_DRAIN, base);
1375da668aa1SThomas Huth
1376da668aa1SThomas Huth g_assert_cmpint(base->quiesce_counter, ==, 0);
1377da668aa1SThomas Huth g_assert_cmpint(base_s->drain_count, ==, 0);
1378da668aa1SThomas Huth g_assert_cmpint(overlay->quiesce_counter, ==, 0);
1379da668aa1SThomas Huth g_assert_cmpint(overlay_s->drain_count, ==, 0);
1380da668aa1SThomas Huth
1381ae9d4417SVladimir Sementsov-Ogievskiy bdrv_unref(overlay);
1382da668aa1SThomas Huth bdrv_unref(base);
1383da668aa1SThomas Huth blk_unref(blk);
1384da668aa1SThomas Huth }
1385da668aa1SThomas Huth
test_set_aio_context(void)1386da668aa1SThomas Huth static void test_set_aio_context(void)
1387da668aa1SThomas Huth {
1388da668aa1SThomas Huth BlockDriverState *bs;
1389da668aa1SThomas Huth IOThread *a = iothread_new();
1390da668aa1SThomas Huth IOThread *b = iothread_new();
1391da668aa1SThomas Huth AioContext *ctx_a = iothread_get_aio_context(a);
1392da668aa1SThomas Huth AioContext *ctx_b = iothread_get_aio_context(b);
1393da668aa1SThomas Huth
1394da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
1395da668aa1SThomas Huth &error_abort);
1396da668aa1SThomas Huth
1397da668aa1SThomas Huth bdrv_drained_begin(bs);
1398142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort);
1399da668aa1SThomas Huth bdrv_drained_end(bs);
1400da668aa1SThomas Huth
1401da668aa1SThomas Huth bdrv_drained_begin(bs);
1402142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort);
1403142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort);
1404da668aa1SThomas Huth bdrv_drained_end(bs);
1405da668aa1SThomas Huth
1406da668aa1SThomas Huth bdrv_unref(bs);
1407da668aa1SThomas Huth iothread_join(a);
1408da668aa1SThomas Huth iothread_join(b);
1409da668aa1SThomas Huth }
1410da668aa1SThomas Huth
1411da668aa1SThomas Huth
1412da668aa1SThomas Huth typedef struct TestDropBackingBlockJob {
1413da668aa1SThomas Huth BlockJob common;
1414da668aa1SThomas Huth bool should_complete;
1415da668aa1SThomas Huth bool *did_complete;
1416da668aa1SThomas Huth BlockDriverState *detach_also;
14171b177bbeSVladimir Sementsov-Ogievskiy BlockDriverState *bs;
1418da668aa1SThomas Huth } TestDropBackingBlockJob;
1419da668aa1SThomas Huth
test_drop_backing_job_run(Job * job,Error ** errp)1420da668aa1SThomas Huth static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
1421da668aa1SThomas Huth {
1422da668aa1SThomas Huth TestDropBackingBlockJob *s =
1423da668aa1SThomas Huth container_of(job, TestDropBackingBlockJob, common.job);
1424da668aa1SThomas Huth
1425da668aa1SThomas Huth while (!s->should_complete) {
1426da668aa1SThomas Huth job_sleep_ns(job, 0);
1427da668aa1SThomas Huth }
1428da668aa1SThomas Huth
1429da668aa1SThomas Huth return 0;
1430da668aa1SThomas Huth }
1431da668aa1SThomas Huth
test_drop_backing_job_commit(Job * job)1432da668aa1SThomas Huth static void test_drop_backing_job_commit(Job *job)
1433da668aa1SThomas Huth {
1434da668aa1SThomas Huth TestDropBackingBlockJob *s =
1435da668aa1SThomas Huth container_of(job, TestDropBackingBlockJob, common.job);
1436da668aa1SThomas Huth
14371b177bbeSVladimir Sementsov-Ogievskiy bdrv_set_backing_hd(s->bs, NULL, &error_abort);
1438da668aa1SThomas Huth bdrv_set_backing_hd(s->detach_also, NULL, &error_abort);
1439da668aa1SThomas Huth
1440da668aa1SThomas Huth *s->did_complete = true;
1441da668aa1SThomas Huth }
1442da668aa1SThomas Huth
1443da668aa1SThomas Huth static const BlockJobDriver test_drop_backing_job_driver = {
1444da668aa1SThomas Huth .job_driver = {
1445da668aa1SThomas Huth .instance_size = sizeof(TestDropBackingBlockJob),
1446da668aa1SThomas Huth .free = block_job_free,
1447da668aa1SThomas Huth .user_resume = block_job_user_resume,
1448da668aa1SThomas Huth .run = test_drop_backing_job_run,
1449da668aa1SThomas Huth .commit = test_drop_backing_job_commit,
1450da668aa1SThomas Huth }
1451da668aa1SThomas Huth };
1452da668aa1SThomas Huth
1453da668aa1SThomas Huth /**
1454da668aa1SThomas Huth * Creates a child node with three parent nodes on it, and then runs a
1455da668aa1SThomas Huth * block job on the final one, parent-node-2.
1456da668aa1SThomas Huth *
1457da668aa1SThomas Huth * The job is then asked to complete before a section where the child
1458da668aa1SThomas Huth * is drained.
1459da668aa1SThomas Huth *
1460da668aa1SThomas Huth * Ending this section will undrain the child's parents, first
1461da668aa1SThomas Huth * parent-node-2, then parent-node-1, then parent-node-0 -- the parent
1462da668aa1SThomas Huth * list is in reverse order of how they were added. Ending the drain
1463da668aa1SThomas Huth * on parent-node-2 will resume the job, thus completing it and
1464da668aa1SThomas Huth * scheduling job_exit().
1465da668aa1SThomas Huth *
1466da668aa1SThomas Huth * Ending the drain on parent-node-1 will poll the AioContext, which
1467da668aa1SThomas Huth * lets job_exit() and thus test_drop_backing_job_commit() run. That
1468da668aa1SThomas Huth * function first removes the child as parent-node-2's backing file.
1469da668aa1SThomas Huth *
1470da668aa1SThomas Huth * In old (and buggy) implementations, there are two problems with
1471da668aa1SThomas Huth * that:
1472da668aa1SThomas Huth * (A) bdrv_drain_invoke() polls for every node that leaves the
1473da668aa1SThomas Huth * drained section. This means that job_exit() is scheduled
1474da668aa1SThomas Huth * before the child has left the drained section. Its
1475da668aa1SThomas Huth * quiesce_counter is therefore still 1 when it is removed from
1476da668aa1SThomas Huth * parent-node-2.
1477da668aa1SThomas Huth *
1478da668aa1SThomas Huth * (B) bdrv_replace_child_noperm() calls drained_end() on the old
1479da668aa1SThomas Huth * child's parents as many times as the child is quiesced. This
1480da668aa1SThomas Huth * means it will call drained_end() on parent-node-2 once.
1481da668aa1SThomas Huth * Because parent-node-2 is no longer quiesced at this point, this
1482da668aa1SThomas Huth * will fail.
1483da668aa1SThomas Huth *
1484da668aa1SThomas Huth * bdrv_replace_child_noperm() therefore must call drained_end() on
1485da668aa1SThomas Huth * the parent only if it really is still drained because the child is
1486da668aa1SThomas Huth * drained.
1487da668aa1SThomas Huth *
1488da668aa1SThomas Huth * If removing child from parent-node-2 was successful (as it should
1489da668aa1SThomas Huth * be), test_drop_backing_job_commit() will then also remove the child
1490da668aa1SThomas Huth * from parent-node-0.
1491da668aa1SThomas Huth *
1492da668aa1SThomas Huth * With an old version of our drain infrastructure ((A) above), that
1493da668aa1SThomas Huth * resulted in the following flow:
1494da668aa1SThomas Huth *
1495da668aa1SThomas Huth * 1. child attempts to leave its drained section. The call recurses
1496da668aa1SThomas Huth * to its parents.
1497da668aa1SThomas Huth *
1498da668aa1SThomas Huth * 2. parent-node-2 leaves the drained section. Polling in
1499da668aa1SThomas Huth * bdrv_drain_invoke() will schedule job_exit().
1500da668aa1SThomas Huth *
1501da668aa1SThomas Huth * 3. parent-node-1 leaves the drained section. Polling in
1502da668aa1SThomas Huth * bdrv_drain_invoke() will run job_exit(), thus disconnecting
1503da668aa1SThomas Huth * parent-node-0 from the child node.
1504da668aa1SThomas Huth *
1505da668aa1SThomas Huth * 4. bdrv_parent_drained_end() uses a QLIST_FOREACH_SAFE() loop to
1506da668aa1SThomas Huth * iterate over the parents. Thus, it now accesses the BdrvChild
1507da668aa1SThomas Huth * object that used to connect parent-node-0 and the child node.
1508da668aa1SThomas Huth * However, that object no longer exists, so it accesses a dangling
1509da668aa1SThomas Huth * pointer.
1510da668aa1SThomas Huth *
1511da668aa1SThomas Huth * The solution is to only poll once when running a bdrv_drained_end()
1512da668aa1SThomas Huth * operation, specifically at the end when all drained_end()
1513da668aa1SThomas Huth * operations for all involved nodes have been scheduled.
1514da668aa1SThomas Huth * Note that this also solves (A) above, thus hiding (B).
1515da668aa1SThomas Huth */
test_blockjob_commit_by_drained_end(void)1516da668aa1SThomas Huth static void test_blockjob_commit_by_drained_end(void)
1517da668aa1SThomas Huth {
1518da668aa1SThomas Huth BlockDriverState *bs_child, *bs_parents[3];
1519da668aa1SThomas Huth TestDropBackingBlockJob *job;
1520da668aa1SThomas Huth bool job_has_completed = false;
1521da668aa1SThomas Huth int i;
1522da668aa1SThomas Huth
1523da668aa1SThomas Huth bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR,
1524da668aa1SThomas Huth &error_abort);
1525da668aa1SThomas Huth
1526da668aa1SThomas Huth for (i = 0; i < 3; i++) {
1527da668aa1SThomas Huth char name[32];
1528da668aa1SThomas Huth snprintf(name, sizeof(name), "parent-node-%i", i);
1529da668aa1SThomas Huth bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR,
1530da668aa1SThomas Huth &error_abort);
1531da668aa1SThomas Huth bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort);
1532da668aa1SThomas Huth }
1533da668aa1SThomas Huth
1534da668aa1SThomas Huth job = block_job_create("job", &test_drop_backing_job_driver, NULL,
1535da668aa1SThomas Huth bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL,
1536da668aa1SThomas Huth &error_abort);
15371b177bbeSVladimir Sementsov-Ogievskiy job->bs = bs_parents[2];
1538da668aa1SThomas Huth
1539da668aa1SThomas Huth job->detach_also = bs_parents[0];
1540da668aa1SThomas Huth job->did_complete = &job_has_completed;
1541da668aa1SThomas Huth
1542da668aa1SThomas Huth job_start(&job->common.job);
1543da668aa1SThomas Huth
1544da668aa1SThomas Huth job->should_complete = true;
1545da668aa1SThomas Huth bdrv_drained_begin(bs_child);
1546da668aa1SThomas Huth g_assert(!job_has_completed);
1547da668aa1SThomas Huth bdrv_drained_end(bs_child);
15485e8ac217SKevin Wolf aio_poll(qemu_get_aio_context(), false);
1549da668aa1SThomas Huth g_assert(job_has_completed);
1550da668aa1SThomas Huth
1551da668aa1SThomas Huth bdrv_unref(bs_parents[0]);
1552da668aa1SThomas Huth bdrv_unref(bs_parents[1]);
1553da668aa1SThomas Huth bdrv_unref(bs_parents[2]);
1554da668aa1SThomas Huth bdrv_unref(bs_child);
1555da668aa1SThomas Huth }
1556da668aa1SThomas Huth
1557da668aa1SThomas Huth
1558da668aa1SThomas Huth typedef struct TestSimpleBlockJob {
1559da668aa1SThomas Huth BlockJob common;
1560da668aa1SThomas Huth bool should_complete;
1561da668aa1SThomas Huth bool *did_complete;
1562da668aa1SThomas Huth } TestSimpleBlockJob;
1563da668aa1SThomas Huth
test_simple_job_run(Job * job,Error ** errp)1564da668aa1SThomas Huth static int coroutine_fn test_simple_job_run(Job *job, Error **errp)
1565da668aa1SThomas Huth {
1566da668aa1SThomas Huth TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1567da668aa1SThomas Huth
1568da668aa1SThomas Huth while (!s->should_complete) {
1569da668aa1SThomas Huth job_sleep_ns(job, 0);
1570da668aa1SThomas Huth }
1571da668aa1SThomas Huth
1572da668aa1SThomas Huth return 0;
1573da668aa1SThomas Huth }
1574da668aa1SThomas Huth
test_simple_job_clean(Job * job)1575da668aa1SThomas Huth static void test_simple_job_clean(Job *job)
1576da668aa1SThomas Huth {
1577da668aa1SThomas Huth TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1578da668aa1SThomas Huth *s->did_complete = true;
1579da668aa1SThomas Huth }
1580da668aa1SThomas Huth
1581da668aa1SThomas Huth static const BlockJobDriver test_simple_job_driver = {
1582da668aa1SThomas Huth .job_driver = {
1583da668aa1SThomas Huth .instance_size = sizeof(TestSimpleBlockJob),
1584da668aa1SThomas Huth .free = block_job_free,
1585da668aa1SThomas Huth .user_resume = block_job_user_resume,
1586da668aa1SThomas Huth .run = test_simple_job_run,
1587da668aa1SThomas Huth .clean = test_simple_job_clean,
1588da668aa1SThomas Huth },
1589da668aa1SThomas Huth };
1590da668aa1SThomas Huth
drop_intermediate_poll_update_filename(BdrvChild * child,BlockDriverState * new_base,const char * filename,bool backing_mask_protocol,Error ** errp)1591da668aa1SThomas Huth static int drop_intermediate_poll_update_filename(BdrvChild *child,
1592da668aa1SThomas Huth BlockDriverState *new_base,
1593da668aa1SThomas Huth const char *filename,
15944b028cbeSPeter Krempa bool backing_mask_protocol,
1595da668aa1SThomas Huth Error **errp)
1596da668aa1SThomas Huth {
1597da668aa1SThomas Huth /*
1598da668aa1SThomas Huth * We are free to poll here, which may change the block graph, if
1599da668aa1SThomas Huth * it is not drained.
1600da668aa1SThomas Huth */
1601da668aa1SThomas Huth
1602da668aa1SThomas Huth /* If the job is not drained: Complete it, schedule job_exit() */
1603da668aa1SThomas Huth aio_poll(qemu_get_current_aio_context(), false);
1604da668aa1SThomas Huth /* If the job is not drained: Run job_exit(), finish the job */
1605da668aa1SThomas Huth aio_poll(qemu_get_current_aio_context(), false);
1606da668aa1SThomas Huth
1607da668aa1SThomas Huth return 0;
1608da668aa1SThomas Huth }
1609da668aa1SThomas Huth
1610da668aa1SThomas Huth /**
1611da668aa1SThomas Huth * Test a poll in the midst of bdrv_drop_intermediate().
1612da668aa1SThomas Huth *
1613da668aa1SThomas Huth * bdrv_drop_intermediate() calls BdrvChildClass.update_filename(),
1614da668aa1SThomas Huth * which can yield or poll. This may lead to graph changes, unless
1615da668aa1SThomas Huth * the whole subtree in question is drained.
1616da668aa1SThomas Huth *
1617da668aa1SThomas Huth * We test this on the following graph:
1618da668aa1SThomas Huth *
1619da668aa1SThomas Huth * Job
1620da668aa1SThomas Huth *
1621da668aa1SThomas Huth * |
1622da668aa1SThomas Huth * job-node
1623da668aa1SThomas Huth * |
1624da668aa1SThomas Huth * v
1625da668aa1SThomas Huth *
1626da668aa1SThomas Huth * job-node
1627da668aa1SThomas Huth *
1628da668aa1SThomas Huth * |
1629da668aa1SThomas Huth * backing
1630da668aa1SThomas Huth * |
1631da668aa1SThomas Huth * v
1632da668aa1SThomas Huth *
1633da668aa1SThomas Huth * node-2 --chain--> node-1 --chain--> node-0
1634da668aa1SThomas Huth *
1635da668aa1SThomas Huth * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0).
1636da668aa1SThomas Huth *
1637da668aa1SThomas Huth * This first updates node-2's backing filename by invoking
1638da668aa1SThomas Huth * drop_intermediate_poll_update_filename(), which polls twice. This
1639da668aa1SThomas Huth * causes the job to finish, which in turns causes the job-node to be
1640da668aa1SThomas Huth * deleted.
1641da668aa1SThomas Huth *
1642da668aa1SThomas Huth * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it
1643da668aa1SThomas Huth * already has a pointer to the BdrvChild edge between job-node and
1644da668aa1SThomas Huth * node-1. When it tries to handle that edge, we probably get a
1645da668aa1SThomas Huth * segmentation fault because the object no longer exists.
1646da668aa1SThomas Huth *
1647da668aa1SThomas Huth *
1648da668aa1SThomas Huth * The solution is for bdrv_drop_intermediate() to drain top's
1649da668aa1SThomas Huth * subtree. This prevents graph changes from happening just because
1650da668aa1SThomas Huth * BdrvChildClass.update_filename() yields or polls. Thus, the block
1651da668aa1SThomas Huth * job is paused during that drained section and must finish before or
1652da668aa1SThomas Huth * after.
1653da668aa1SThomas Huth *
1654da668aa1SThomas Huth * (In addition, bdrv_replace_child() must keep the job paused.)
1655da668aa1SThomas Huth */
test_drop_intermediate_poll(void)1656da668aa1SThomas Huth static void test_drop_intermediate_poll(void)
1657da668aa1SThomas Huth {
1658da668aa1SThomas Huth static BdrvChildClass chain_child_class;
1659da668aa1SThomas Huth BlockDriverState *chain[3];
1660da668aa1SThomas Huth TestSimpleBlockJob *job;
1661da668aa1SThomas Huth BlockDriverState *job_node;
1662da668aa1SThomas Huth bool job_has_completed = false;
1663da668aa1SThomas Huth int i;
1664da668aa1SThomas Huth int ret;
1665da668aa1SThomas Huth
1666da668aa1SThomas Huth chain_child_class = child_of_bds;
1667da668aa1SThomas Huth chain_child_class.update_filename = drop_intermediate_poll_update_filename;
1668da668aa1SThomas Huth
1669da668aa1SThomas Huth for (i = 0; i < 3; i++) {
1670da668aa1SThomas Huth char name[32];
1671da668aa1SThomas Huth snprintf(name, 32, "node-%i", i);
1672da668aa1SThomas Huth
1673da668aa1SThomas Huth chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort);
1674da668aa1SThomas Huth }
1675da668aa1SThomas Huth
1676da668aa1SThomas Huth job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR,
1677da668aa1SThomas Huth &error_abort);
1678da668aa1SThomas Huth bdrv_set_backing_hd(job_node, chain[1], &error_abort);
1679da668aa1SThomas Huth
1680da668aa1SThomas Huth /*
1681da668aa1SThomas Huth * Establish the chain last, so the chain links are the first
1682da668aa1SThomas Huth * elements in the BDS.parents lists
1683da668aa1SThomas Huth */
16846bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
1685da668aa1SThomas Huth for (i = 0; i < 3; i++) {
1686da668aa1SThomas Huth if (i) {
1687da668aa1SThomas Huth /* Takes the reference to chain[i - 1] */
16885bb04747SVladimir Sementsov-Ogievskiy bdrv_attach_child(chain[i], chain[i - 1], "chain",
16895bb04747SVladimir Sementsov-Ogievskiy &chain_child_class, BDRV_CHILD_COW, &error_abort);
1690da668aa1SThomas Huth }
1691da668aa1SThomas Huth }
16926bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
1693da668aa1SThomas Huth
1694da668aa1SThomas Huth job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
1695da668aa1SThomas Huth 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
1696da668aa1SThomas Huth
1697da668aa1SThomas Huth /* The job has a reference now */
1698da668aa1SThomas Huth bdrv_unref(job_node);
1699da668aa1SThomas Huth
1700da668aa1SThomas Huth job->did_complete = &job_has_completed;
1701da668aa1SThomas Huth
1702da668aa1SThomas Huth job_start(&job->common.job);
1703da668aa1SThomas Huth job->should_complete = true;
1704da668aa1SThomas Huth
1705da668aa1SThomas Huth g_assert(!job_has_completed);
17064b028cbeSPeter Krempa ret = bdrv_drop_intermediate(chain[1], chain[0], NULL, false);
17075e8ac217SKevin Wolf aio_poll(qemu_get_aio_context(), false);
1708da668aa1SThomas Huth g_assert(ret == 0);
1709da668aa1SThomas Huth g_assert(job_has_completed);
1710da668aa1SThomas Huth
1711da668aa1SThomas Huth bdrv_unref(chain[2]);
1712da668aa1SThomas Huth }
1713da668aa1SThomas Huth
1714da668aa1SThomas Huth
1715da668aa1SThomas Huth typedef struct BDRVReplaceTestState {
171623987471SKevin Wolf bool setup_completed;
1717da668aa1SThomas Huth bool was_drained;
1718da668aa1SThomas Huth bool was_undrained;
1719da668aa1SThomas Huth bool has_read;
1720da668aa1SThomas Huth
1721da668aa1SThomas Huth int drain_count;
1722da668aa1SThomas Huth
1723da668aa1SThomas Huth bool yield_before_read;
1724da668aa1SThomas Huth Coroutine *io_co;
1725da668aa1SThomas Huth Coroutine *drain_co;
1726da668aa1SThomas Huth } BDRVReplaceTestState;
1727da668aa1SThomas Huth
bdrv_replace_test_close(BlockDriverState * bs)1728da668aa1SThomas Huth static void bdrv_replace_test_close(BlockDriverState *bs)
1729da668aa1SThomas Huth {
1730da668aa1SThomas Huth }
1731da668aa1SThomas Huth
1732da668aa1SThomas Huth /**
1733da668aa1SThomas Huth * If @bs has a backing file:
1734da668aa1SThomas Huth * Yield if .yield_before_read is true (and wait for drain_begin to
1735da668aa1SThomas Huth * wake us up).
1736da668aa1SThomas Huth * Forward the read to bs->backing. Set .has_read to true.
1737da668aa1SThomas Huth * If drain_begin has woken us, wake it in turn.
1738da668aa1SThomas Huth *
1739da668aa1SThomas Huth * Otherwise:
1740da668aa1SThomas Huth * Set .has_read to true and return success.
1741da668aa1SThomas Huth */
1742b9b10c35SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_replace_test_co_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)1743b9b10c35SKevin Wolf bdrv_replace_test_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1744b9b10c35SKevin Wolf QEMUIOVector *qiov, BdrvRequestFlags flags)
1745da668aa1SThomas Huth {
1746da668aa1SThomas Huth BDRVReplaceTestState *s = bs->opaque;
1747da668aa1SThomas Huth
1748da668aa1SThomas Huth if (bs->backing) {
1749da668aa1SThomas Huth int ret;
1750da668aa1SThomas Huth
1751da668aa1SThomas Huth g_assert(!s->drain_count);
1752da668aa1SThomas Huth
1753da668aa1SThomas Huth s->io_co = qemu_coroutine_self();
1754da668aa1SThomas Huth if (s->yield_before_read) {
1755da668aa1SThomas Huth s->yield_before_read = false;
1756da668aa1SThomas Huth qemu_coroutine_yield();
1757da668aa1SThomas Huth }
1758da668aa1SThomas Huth s->io_co = NULL;
1759da668aa1SThomas Huth
1760da668aa1SThomas Huth ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0);
1761da668aa1SThomas Huth s->has_read = true;
1762da668aa1SThomas Huth
1763da668aa1SThomas Huth /* Wake up drain_co if it runs */
1764da668aa1SThomas Huth if (s->drain_co) {
1765da668aa1SThomas Huth aio_co_wake(s->drain_co);
1766da668aa1SThomas Huth }
1767da668aa1SThomas Huth
1768da668aa1SThomas Huth return ret;
1769da668aa1SThomas Huth }
1770da668aa1SThomas Huth
1771da668aa1SThomas Huth s->has_read = true;
1772da668aa1SThomas Huth return 0;
1773da668aa1SThomas Huth }
1774da668aa1SThomas Huth
bdrv_replace_test_drain_co(void * opaque)17757bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_drain_co(void *opaque)
17767bce1c29SKevin Wolf {
17777bce1c29SKevin Wolf BlockDriverState *bs = opaque;
17787bce1c29SKevin Wolf BDRVReplaceTestState *s = bs->opaque;
17797bce1c29SKevin Wolf
17807bce1c29SKevin Wolf /* Keep waking io_co up until it is done */
17817bce1c29SKevin Wolf while (s->io_co) {
17827bce1c29SKevin Wolf aio_co_wake(s->io_co);
17837bce1c29SKevin Wolf s->io_co = NULL;
17847bce1c29SKevin Wolf qemu_coroutine_yield();
17857bce1c29SKevin Wolf }
17867bce1c29SKevin Wolf s->drain_co = NULL;
17877bce1c29SKevin Wolf bdrv_dec_in_flight(bs);
17887bce1c29SKevin Wolf }
17897bce1c29SKevin Wolf
1790da668aa1SThomas Huth /**
1791da668aa1SThomas Huth * If .drain_count is 0, wake up .io_co if there is one; and set
1792da668aa1SThomas Huth * .was_drained.
1793da668aa1SThomas Huth * Increment .drain_count.
1794da668aa1SThomas Huth */
bdrv_replace_test_drain_begin(BlockDriverState * bs)17955e8ac217SKevin Wolf static void bdrv_replace_test_drain_begin(BlockDriverState *bs)
1796da668aa1SThomas Huth {
1797da668aa1SThomas Huth BDRVReplaceTestState *s = bs->opaque;
1798da668aa1SThomas Huth
179923987471SKevin Wolf if (!s->setup_completed) {
180023987471SKevin Wolf return;
180123987471SKevin Wolf }
180223987471SKevin Wolf
1803da668aa1SThomas Huth if (!s->drain_count) {
18047bce1c29SKevin Wolf s->drain_co = qemu_coroutine_create(bdrv_replace_test_drain_co, bs);
18057bce1c29SKevin Wolf bdrv_inc_in_flight(bs);
18067bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), s->drain_co);
1807da668aa1SThomas Huth s->was_drained = true;
1808da668aa1SThomas Huth }
1809da668aa1SThomas Huth s->drain_count++;
1810da668aa1SThomas Huth }
1811da668aa1SThomas Huth
bdrv_replace_test_read_entry(void * opaque)18127bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_read_entry(void *opaque)
18137bce1c29SKevin Wolf {
18147bce1c29SKevin Wolf BlockDriverState *bs = opaque;
18157bce1c29SKevin Wolf char data;
18167bce1c29SKevin Wolf QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
18177bce1c29SKevin Wolf int ret;
18187bce1c29SKevin Wolf
18197bce1c29SKevin Wolf /* Queue a read request post-drain */
1820b9b10c35SKevin Wolf bdrv_graph_co_rdlock();
18217bce1c29SKevin Wolf ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
1822b9b10c35SKevin Wolf bdrv_graph_co_rdunlock();
1823b9b10c35SKevin Wolf
18247bce1c29SKevin Wolf g_assert(ret >= 0);
18257bce1c29SKevin Wolf bdrv_dec_in_flight(bs);
18267bce1c29SKevin Wolf }
18277bce1c29SKevin Wolf
1828da668aa1SThomas Huth /**
1829da668aa1SThomas Huth * Reduce .drain_count, set .was_undrained once it reaches 0.
1830da668aa1SThomas Huth * If .drain_count reaches 0 and the node has a backing file, issue a
1831da668aa1SThomas Huth * read request.
1832da668aa1SThomas Huth */
bdrv_replace_test_drain_end(BlockDriverState * bs)18335e8ac217SKevin Wolf static void bdrv_replace_test_drain_end(BlockDriverState *bs)
1834da668aa1SThomas Huth {
1835da668aa1SThomas Huth BDRVReplaceTestState *s = bs->opaque;
1836da668aa1SThomas Huth
1837004915a9SKevin Wolf GRAPH_RDLOCK_GUARD_MAINLOOP();
1838004915a9SKevin Wolf
183923987471SKevin Wolf if (!s->setup_completed) {
184023987471SKevin Wolf return;
184123987471SKevin Wolf }
184223987471SKevin Wolf
1843da668aa1SThomas Huth g_assert(s->drain_count > 0);
1844da668aa1SThomas Huth if (!--s->drain_count) {
1845da668aa1SThomas Huth s->was_undrained = true;
1846da668aa1SThomas Huth
1847da668aa1SThomas Huth if (bs->backing) {
18487bce1c29SKevin Wolf Coroutine *co = qemu_coroutine_create(bdrv_replace_test_read_entry,
18497bce1c29SKevin Wolf bs);
18507bce1c29SKevin Wolf bdrv_inc_in_flight(bs);
18517bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), co);
1852da668aa1SThomas Huth }
1853da668aa1SThomas Huth }
1854da668aa1SThomas Huth }
1855da668aa1SThomas Huth
1856da668aa1SThomas Huth static BlockDriver bdrv_replace_test = {
1857da668aa1SThomas Huth .format_name = "replace_test",
1858da668aa1SThomas Huth .instance_size = sizeof(BDRVReplaceTestState),
18599ebfc111SVladimir Sementsov-Ogievskiy .supports_backing = true,
1860da668aa1SThomas Huth
1861da668aa1SThomas Huth .bdrv_close = bdrv_replace_test_close,
1862da668aa1SThomas Huth .bdrv_co_preadv = bdrv_replace_test_co_preadv,
1863da668aa1SThomas Huth
18645e8ac217SKevin Wolf .bdrv_drain_begin = bdrv_replace_test_drain_begin,
18655e8ac217SKevin Wolf .bdrv_drain_end = bdrv_replace_test_drain_end,
1866da668aa1SThomas Huth
1867da668aa1SThomas Huth .bdrv_child_perm = bdrv_default_perms,
1868da668aa1SThomas Huth };
1869da668aa1SThomas Huth
test_replace_child_mid_drain_read_co(void * opaque)1870da668aa1SThomas Huth static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque)
1871da668aa1SThomas Huth {
1872da668aa1SThomas Huth int ret;
1873da668aa1SThomas Huth char data;
1874da668aa1SThomas Huth
1875da668aa1SThomas Huth ret = blk_co_pread(opaque, 0, 1, &data, 0);
1876da668aa1SThomas Huth g_assert(ret >= 0);
1877da668aa1SThomas Huth }
1878da668aa1SThomas Huth
1879da668aa1SThomas Huth /**
1880da668aa1SThomas Huth * We test two things:
1881da668aa1SThomas Huth * (1) bdrv_replace_child_noperm() must not undrain the parent if both
1882da668aa1SThomas Huth * children are drained.
1883da668aa1SThomas Huth * (2) bdrv_replace_child_noperm() must never flush I/O requests to a
1884da668aa1SThomas Huth * drained child. If the old child is drained, it must flush I/O
1885da668aa1SThomas Huth * requests after the new one has been attached. If the new child
1886da668aa1SThomas Huth * is drained, it must flush I/O requests before the old one is
1887da668aa1SThomas Huth * detached.
1888da668aa1SThomas Huth *
1889da668aa1SThomas Huth * To do so, we create one parent node and two child nodes; then
1890da668aa1SThomas Huth * attach one of the children (old_child_bs) to the parent, then
1891da668aa1SThomas Huth * drain both old_child_bs and new_child_bs according to
1892da668aa1SThomas Huth * old_drain_count and new_drain_count, respectively, and finally
1893da668aa1SThomas Huth * we invoke bdrv_replace_node() to replace old_child_bs by
1894da668aa1SThomas Huth * new_child_bs.
1895da668aa1SThomas Huth *
1896da668aa1SThomas Huth * The test block driver we use here (bdrv_replace_test) has a read
1897da668aa1SThomas Huth * function that:
1898da668aa1SThomas Huth * - For the parent node, can optionally yield, and then forwards the
1899da668aa1SThomas Huth * read to bdrv_preadv(),
1900da668aa1SThomas Huth * - For the child node, just returns immediately.
1901da668aa1SThomas Huth *
1902da668aa1SThomas Huth * If the read yields, the drain_begin function will wake it up.
1903da668aa1SThomas Huth *
1904da668aa1SThomas Huth * The drain_end function issues a read on the parent once it is fully
1905da668aa1SThomas Huth * undrained (which simulates requests starting to come in again).
1906da668aa1SThomas Huth */
do_test_replace_child_mid_drain(int old_drain_count,int new_drain_count)1907da668aa1SThomas Huth static void do_test_replace_child_mid_drain(int old_drain_count,
1908da668aa1SThomas Huth int new_drain_count)
1909da668aa1SThomas Huth {
1910da668aa1SThomas Huth BlockBackend *parent_blk;
1911da668aa1SThomas Huth BlockDriverState *parent_bs;
1912da668aa1SThomas Huth BlockDriverState *old_child_bs, *new_child_bs;
1913da668aa1SThomas Huth BDRVReplaceTestState *parent_s;
1914da668aa1SThomas Huth BDRVReplaceTestState *old_child_s, *new_child_s;
1915da668aa1SThomas Huth Coroutine *io_co;
1916da668aa1SThomas Huth int i;
1917da668aa1SThomas Huth
1918da668aa1SThomas Huth parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0,
1919da668aa1SThomas Huth &error_abort);
1920da668aa1SThomas Huth parent_s = parent_bs->opaque;
1921da668aa1SThomas Huth
1922da668aa1SThomas Huth parent_blk = blk_new(qemu_get_aio_context(),
1923da668aa1SThomas Huth BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
1924da668aa1SThomas Huth blk_insert_bs(parent_blk, parent_bs, &error_abort);
1925da668aa1SThomas Huth
1926da668aa1SThomas Huth old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0,
1927da668aa1SThomas Huth &error_abort);
1928da668aa1SThomas Huth new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0,
1929da668aa1SThomas Huth &error_abort);
1930da668aa1SThomas Huth old_child_s = old_child_bs->opaque;
1931da668aa1SThomas Huth new_child_s = new_child_bs->opaque;
1932da668aa1SThomas Huth
1933da668aa1SThomas Huth /* So that we can read something */
1934da668aa1SThomas Huth parent_bs->total_sectors = 1;
1935da668aa1SThomas Huth old_child_bs->total_sectors = 1;
1936da668aa1SThomas Huth new_child_bs->total_sectors = 1;
1937da668aa1SThomas Huth
1938da668aa1SThomas Huth bdrv_ref(old_child_bs);
19396bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
19405bb04747SVladimir Sementsov-Ogievskiy bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
19415bb04747SVladimir Sementsov-Ogievskiy BDRV_CHILD_COW, &error_abort);
19426bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
194323987471SKevin Wolf parent_s->setup_completed = true;
1944da668aa1SThomas Huth
1945da668aa1SThomas Huth for (i = 0; i < old_drain_count; i++) {
1946da668aa1SThomas Huth bdrv_drained_begin(old_child_bs);
1947da668aa1SThomas Huth }
1948da668aa1SThomas Huth for (i = 0; i < new_drain_count; i++) {
1949da668aa1SThomas Huth bdrv_drained_begin(new_child_bs);
1950da668aa1SThomas Huth }
1951da668aa1SThomas Huth
1952da668aa1SThomas Huth if (!old_drain_count) {
1953da668aa1SThomas Huth /*
1954da668aa1SThomas Huth * Start a read operation that will yield, so it will not
1955da668aa1SThomas Huth * complete before the node is drained.
1956da668aa1SThomas Huth */
1957da668aa1SThomas Huth parent_s->yield_before_read = true;
1958da668aa1SThomas Huth io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co,
1959da668aa1SThomas Huth parent_blk);
1960da668aa1SThomas Huth qemu_coroutine_enter(io_co);
1961da668aa1SThomas Huth }
1962da668aa1SThomas Huth
1963da668aa1SThomas Huth /* If we have started a read operation, it should have yielded */
1964da668aa1SThomas Huth g_assert(!parent_s->has_read);
1965da668aa1SThomas Huth
1966da668aa1SThomas Huth /* Reset drained status so we can see what bdrv_replace_node() does */
1967da668aa1SThomas Huth parent_s->was_drained = false;
1968da668aa1SThomas Huth parent_s->was_undrained = false;
1969da668aa1SThomas Huth
1970da668aa1SThomas Huth g_assert(parent_bs->quiesce_counter == old_drain_count);
1971ccd6a379SKevin Wolf bdrv_drained_begin(old_child_bs);
1972ccd6a379SKevin Wolf bdrv_drained_begin(new_child_bs);
19736bc30f19SStefan Hajnoczi bdrv_graph_wrlock();
1974da668aa1SThomas Huth bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
19756bc30f19SStefan Hajnoczi bdrv_graph_wrunlock();
1976ccd6a379SKevin Wolf bdrv_drained_end(new_child_bs);
1977ccd6a379SKevin Wolf bdrv_drained_end(old_child_bs);
1978da668aa1SThomas Huth g_assert(parent_bs->quiesce_counter == new_drain_count);
1979da668aa1SThomas Huth
1980da668aa1SThomas Huth if (!old_drain_count && !new_drain_count) {
1981da668aa1SThomas Huth /*
1982da668aa1SThomas Huth * From undrained to undrained drains and undrains the parent,
1983da668aa1SThomas Huth * because bdrv_replace_node() contains a drained section for
1984da668aa1SThomas Huth * @old_child_bs.
1985da668aa1SThomas Huth */
1986da668aa1SThomas Huth g_assert(parent_s->was_drained && parent_s->was_undrained);
1987da668aa1SThomas Huth } else if (!old_drain_count && new_drain_count) {
1988da668aa1SThomas Huth /*
1989da668aa1SThomas Huth * From undrained to drained should drain the parent and keep
1990da668aa1SThomas Huth * it that way.
1991da668aa1SThomas Huth */
1992da668aa1SThomas Huth g_assert(parent_s->was_drained && !parent_s->was_undrained);
1993da668aa1SThomas Huth } else if (old_drain_count && !new_drain_count) {
1994da668aa1SThomas Huth /*
1995da668aa1SThomas Huth * From drained to undrained should undrain the parent and
1996da668aa1SThomas Huth * keep it that way.
1997da668aa1SThomas Huth */
1998da668aa1SThomas Huth g_assert(!parent_s->was_drained && parent_s->was_undrained);
1999da668aa1SThomas Huth } else /* if (old_drain_count && new_drain_count) */ {
2000da668aa1SThomas Huth /*
2001da668aa1SThomas Huth * From drained to drained must not undrain the parent at any
2002da668aa1SThomas Huth * point
2003da668aa1SThomas Huth */
2004da668aa1SThomas Huth g_assert(!parent_s->was_drained && !parent_s->was_undrained);
2005da668aa1SThomas Huth }
2006da668aa1SThomas Huth
2007da668aa1SThomas Huth if (!old_drain_count || !new_drain_count) {
2008da668aa1SThomas Huth /*
2009da668aa1SThomas Huth * If !old_drain_count, we have started a read request before
2010da668aa1SThomas Huth * bdrv_replace_node(). If !new_drain_count, the parent must
2011da668aa1SThomas Huth * have been undrained at some point, and
2012da668aa1SThomas Huth * bdrv_replace_test_co_drain_end() starts a read request
2013da668aa1SThomas Huth * then.
2014da668aa1SThomas Huth */
2015da668aa1SThomas Huth g_assert(parent_s->has_read);
2016da668aa1SThomas Huth } else {
2017da668aa1SThomas Huth /*
2018da668aa1SThomas Huth * If the parent was never undrained, there is no way to start
2019da668aa1SThomas Huth * a read request.
2020da668aa1SThomas Huth */
2021da668aa1SThomas Huth g_assert(!parent_s->has_read);
2022da668aa1SThomas Huth }
2023da668aa1SThomas Huth
2024da668aa1SThomas Huth /* A drained child must have not received any request */
2025da668aa1SThomas Huth g_assert(!(old_drain_count && old_child_s->has_read));
2026da668aa1SThomas Huth g_assert(!(new_drain_count && new_child_s->has_read));
2027da668aa1SThomas Huth
2028da668aa1SThomas Huth for (i = 0; i < new_drain_count; i++) {
2029da668aa1SThomas Huth bdrv_drained_end(new_child_bs);
2030da668aa1SThomas Huth }
2031da668aa1SThomas Huth for (i = 0; i < old_drain_count; i++) {
2032da668aa1SThomas Huth bdrv_drained_end(old_child_bs);
2033da668aa1SThomas Huth }
2034da668aa1SThomas Huth
2035da668aa1SThomas Huth /*
2036da668aa1SThomas Huth * By now, bdrv_replace_test_co_drain_end() must have been called
2037da668aa1SThomas Huth * at some point while the new child was attached to the parent.
2038da668aa1SThomas Huth */
2039da668aa1SThomas Huth g_assert(parent_s->has_read);
2040da668aa1SThomas Huth g_assert(new_child_s->has_read);
2041da668aa1SThomas Huth
2042da668aa1SThomas Huth blk_unref(parent_blk);
2043da668aa1SThomas Huth bdrv_unref(parent_bs);
2044da668aa1SThomas Huth bdrv_unref(old_child_bs);
2045da668aa1SThomas Huth bdrv_unref(new_child_bs);
2046da668aa1SThomas Huth }
2047da668aa1SThomas Huth
test_replace_child_mid_drain(void)2048da668aa1SThomas Huth static void test_replace_child_mid_drain(void)
2049da668aa1SThomas Huth {
2050da668aa1SThomas Huth int old_drain_count, new_drain_count;
2051da668aa1SThomas Huth
2052da668aa1SThomas Huth for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) {
2053da668aa1SThomas Huth for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) {
2054da668aa1SThomas Huth do_test_replace_child_mid_drain(old_drain_count, new_drain_count);
2055da668aa1SThomas Huth }
2056da668aa1SThomas Huth }
2057da668aa1SThomas Huth }
2058da668aa1SThomas Huth
main(int argc,char ** argv)2059da668aa1SThomas Huth int main(int argc, char **argv)
2060da668aa1SThomas Huth {
2061da668aa1SThomas Huth int ret;
2062da668aa1SThomas Huth
2063da668aa1SThomas Huth bdrv_init();
2064da668aa1SThomas Huth qemu_init_main_loop(&error_abort);
2065da668aa1SThomas Huth
2066da668aa1SThomas Huth g_test_init(&argc, &argv, NULL);
2067da668aa1SThomas Huth qemu_event_init(&done_event, false);
2068da668aa1SThomas Huth
2069da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
2070da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
2071da668aa1SThomas Huth
2072da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/co/drain_all",
2073da668aa1SThomas Huth test_drv_cb_co_drain_all);
2074da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
2075da668aa1SThomas Huth
2076da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
2077da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
2078da668aa1SThomas Huth
2079da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/co/drain_all",
2080da668aa1SThomas Huth test_quiesce_co_drain_all);
2081da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
2082da668aa1SThomas Huth
2083da668aa1SThomas Huth g_test_add_func("/bdrv-drain/nested", test_nested);
2084da668aa1SThomas Huth
2085da668aa1SThomas Huth g_test_add_func("/bdrv-drain/graph-change/drain_all",
2086da668aa1SThomas Huth test_graph_change_drain_all);
2087da668aa1SThomas Huth
2088da668aa1SThomas Huth g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all);
2089da668aa1SThomas Huth g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain);
2090da668aa1SThomas Huth
2091da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
2092da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
2093da668aa1SThomas Huth
2094da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
2095da668aa1SThomas Huth test_blockjob_error_drain_all);
2096da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/error/drain",
2097da668aa1SThomas Huth test_blockjob_error_drain);
2098da668aa1SThomas Huth
2099da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
2100da668aa1SThomas Huth test_blockjob_iothread_drain_all);
2101da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
2102da668aa1SThomas Huth test_blockjob_iothread_drain);
2103da668aa1SThomas Huth
2104da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
2105da668aa1SThomas Huth test_blockjob_iothread_error_drain_all);
2106da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
2107da668aa1SThomas Huth test_blockjob_iothread_error_drain);
2108da668aa1SThomas Huth
2109da668aa1SThomas Huth g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
2110da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
2111da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
2112da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb);
2113da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb);
2114da668aa1SThomas Huth
2115da668aa1SThomas Huth g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained);
2116da668aa1SThomas Huth
2117da668aa1SThomas Huth g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context);
2118da668aa1SThomas Huth
2119da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end",
2120da668aa1SThomas Huth test_blockjob_commit_by_drained_end);
2121da668aa1SThomas Huth
2122da668aa1SThomas Huth g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll",
2123da668aa1SThomas Huth test_drop_intermediate_poll);
2124da668aa1SThomas Huth
2125da668aa1SThomas Huth g_test_add_func("/bdrv-drain/replace_child/mid-drain",
2126da668aa1SThomas Huth test_replace_child_mid_drain);
2127da668aa1SThomas Huth
2128da668aa1SThomas Huth ret = g_test_run();
2129da668aa1SThomas Huth qemu_event_destroy(&done_event);
2130da668aa1SThomas Huth return ret;
2131da668aa1SThomas Huth }
2132