xref: /openbmc/qemu/tests/unit/test-bdrv-drain.c (revision da668aa15b99150a8595c491aee00d5d2426aaf9)
1*da668aa1SThomas Huth /*
2*da668aa1SThomas Huth  * Block node draining tests
3*da668aa1SThomas Huth  *
4*da668aa1SThomas Huth  * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com>
5*da668aa1SThomas Huth  *
6*da668aa1SThomas Huth  * Permission is hereby granted, free of charge, to any person obtaining a copy
7*da668aa1SThomas Huth  * of this software and associated documentation files (the "Software"), to deal
8*da668aa1SThomas Huth  * in the Software without restriction, including without limitation the rights
9*da668aa1SThomas Huth  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10*da668aa1SThomas Huth  * copies of the Software, and to permit persons to whom the Software is
11*da668aa1SThomas Huth  * furnished to do so, subject to the following conditions:
12*da668aa1SThomas Huth  *
13*da668aa1SThomas Huth  * The above copyright notice and this permission notice shall be included in
14*da668aa1SThomas Huth  * all copies or substantial portions of the Software.
15*da668aa1SThomas Huth  *
16*da668aa1SThomas Huth  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*da668aa1SThomas Huth  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*da668aa1SThomas Huth  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19*da668aa1SThomas Huth  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20*da668aa1SThomas Huth  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21*da668aa1SThomas Huth  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22*da668aa1SThomas Huth  * THE SOFTWARE.
23*da668aa1SThomas Huth  */
24*da668aa1SThomas Huth 
25*da668aa1SThomas Huth #include "qemu/osdep.h"
26*da668aa1SThomas Huth #include "block/block.h"
27*da668aa1SThomas Huth #include "block/blockjob_int.h"
28*da668aa1SThomas Huth #include "sysemu/block-backend.h"
29*da668aa1SThomas Huth #include "qapi/error.h"
30*da668aa1SThomas Huth #include "qemu/main-loop.h"
31*da668aa1SThomas Huth #include "iothread.h"
32*da668aa1SThomas Huth 
33*da668aa1SThomas Huth static QemuEvent done_event;
34*da668aa1SThomas Huth 
35*da668aa1SThomas Huth typedef struct BDRVTestState {
36*da668aa1SThomas Huth     int drain_count;
37*da668aa1SThomas Huth     AioContext *bh_indirection_ctx;
38*da668aa1SThomas Huth     bool sleep_in_drain_begin;
39*da668aa1SThomas Huth } BDRVTestState;
40*da668aa1SThomas Huth 
41*da668aa1SThomas Huth static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
42*da668aa1SThomas Huth {
43*da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
44*da668aa1SThomas Huth     s->drain_count++;
45*da668aa1SThomas Huth     if (s->sleep_in_drain_begin) {
46*da668aa1SThomas Huth         qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
47*da668aa1SThomas Huth     }
48*da668aa1SThomas Huth }
49*da668aa1SThomas Huth 
50*da668aa1SThomas Huth static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs)
51*da668aa1SThomas Huth {
52*da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
53*da668aa1SThomas Huth     s->drain_count--;
54*da668aa1SThomas Huth }
55*da668aa1SThomas Huth 
56*da668aa1SThomas Huth static void bdrv_test_close(BlockDriverState *bs)
57*da668aa1SThomas Huth {
58*da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
59*da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, >, 0);
60*da668aa1SThomas Huth }
61*da668aa1SThomas Huth 
62*da668aa1SThomas Huth static void co_reenter_bh(void *opaque)
63*da668aa1SThomas Huth {
64*da668aa1SThomas Huth     aio_co_wake(opaque);
65*da668aa1SThomas Huth }
66*da668aa1SThomas Huth 
67*da668aa1SThomas Huth static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
68*da668aa1SThomas Huth                                             uint64_t offset, uint64_t bytes,
69*da668aa1SThomas Huth                                             QEMUIOVector *qiov, int flags)
70*da668aa1SThomas Huth {
71*da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
72*da668aa1SThomas Huth 
73*da668aa1SThomas Huth     /* We want this request to stay until the polling loop in drain waits for
74*da668aa1SThomas Huth      * it to complete. We need to sleep a while as bdrv_drain_invoke() comes
75*da668aa1SThomas Huth      * first and polls its result, too, but it shouldn't accidentally complete
76*da668aa1SThomas Huth      * this request yet. */
77*da668aa1SThomas Huth     qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
78*da668aa1SThomas Huth 
79*da668aa1SThomas Huth     if (s->bh_indirection_ctx) {
80*da668aa1SThomas Huth         aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh,
81*da668aa1SThomas Huth                                 qemu_coroutine_self());
82*da668aa1SThomas Huth         qemu_coroutine_yield();
83*da668aa1SThomas Huth     }
84*da668aa1SThomas Huth 
85*da668aa1SThomas Huth     return 0;
86*da668aa1SThomas Huth }
87*da668aa1SThomas Huth 
88*da668aa1SThomas Huth static int bdrv_test_change_backing_file(BlockDriverState *bs,
89*da668aa1SThomas Huth                                          const char *backing_file,
90*da668aa1SThomas Huth                                          const char *backing_fmt)
91*da668aa1SThomas Huth {
92*da668aa1SThomas Huth     return 0;
93*da668aa1SThomas Huth }
94*da668aa1SThomas Huth 
95*da668aa1SThomas Huth static BlockDriver bdrv_test = {
96*da668aa1SThomas Huth     .format_name            = "test",
97*da668aa1SThomas Huth     .instance_size          = sizeof(BDRVTestState),
98*da668aa1SThomas Huth 
99*da668aa1SThomas Huth     .bdrv_close             = bdrv_test_close,
100*da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_test_co_preadv,
101*da668aa1SThomas Huth 
102*da668aa1SThomas Huth     .bdrv_co_drain_begin    = bdrv_test_co_drain_begin,
103*da668aa1SThomas Huth     .bdrv_co_drain_end      = bdrv_test_co_drain_end,
104*da668aa1SThomas Huth 
105*da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
106*da668aa1SThomas Huth 
107*da668aa1SThomas Huth     .bdrv_change_backing_file = bdrv_test_change_backing_file,
108*da668aa1SThomas Huth };
109*da668aa1SThomas Huth 
110*da668aa1SThomas Huth static void aio_ret_cb(void *opaque, int ret)
111*da668aa1SThomas Huth {
112*da668aa1SThomas Huth     int *aio_ret = opaque;
113*da668aa1SThomas Huth     *aio_ret = ret;
114*da668aa1SThomas Huth }
115*da668aa1SThomas Huth 
116*da668aa1SThomas Huth typedef struct CallInCoroutineData {
117*da668aa1SThomas Huth     void (*entry)(void);
118*da668aa1SThomas Huth     bool done;
119*da668aa1SThomas Huth } CallInCoroutineData;
120*da668aa1SThomas Huth 
121*da668aa1SThomas Huth static coroutine_fn void call_in_coroutine_entry(void *opaque)
122*da668aa1SThomas Huth {
123*da668aa1SThomas Huth     CallInCoroutineData *data = opaque;
124*da668aa1SThomas Huth 
125*da668aa1SThomas Huth     data->entry();
126*da668aa1SThomas Huth     data->done = true;
127*da668aa1SThomas Huth }
128*da668aa1SThomas Huth 
129*da668aa1SThomas Huth static void call_in_coroutine(void (*entry)(void))
130*da668aa1SThomas Huth {
131*da668aa1SThomas Huth     Coroutine *co;
132*da668aa1SThomas Huth     CallInCoroutineData data = {
133*da668aa1SThomas Huth         .entry  = entry,
134*da668aa1SThomas Huth         .done   = false,
135*da668aa1SThomas Huth     };
136*da668aa1SThomas Huth 
137*da668aa1SThomas Huth     co = qemu_coroutine_create(call_in_coroutine_entry, &data);
138*da668aa1SThomas Huth     qemu_coroutine_enter(co);
139*da668aa1SThomas Huth     while (!data.done) {
140*da668aa1SThomas Huth         aio_poll(qemu_get_aio_context(), true);
141*da668aa1SThomas Huth     }
142*da668aa1SThomas Huth }
143*da668aa1SThomas Huth 
144*da668aa1SThomas Huth enum drain_type {
145*da668aa1SThomas Huth     BDRV_DRAIN_ALL,
146*da668aa1SThomas Huth     BDRV_DRAIN,
147*da668aa1SThomas Huth     BDRV_SUBTREE_DRAIN,
148*da668aa1SThomas Huth     DRAIN_TYPE_MAX,
149*da668aa1SThomas Huth };
150*da668aa1SThomas Huth 
151*da668aa1SThomas Huth static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
152*da668aa1SThomas Huth {
153*da668aa1SThomas Huth     switch (drain_type) {
154*da668aa1SThomas Huth     case BDRV_DRAIN_ALL:        bdrv_drain_all_begin(); break;
155*da668aa1SThomas Huth     case BDRV_DRAIN:            bdrv_drained_begin(bs); break;
156*da668aa1SThomas Huth     case BDRV_SUBTREE_DRAIN:    bdrv_subtree_drained_begin(bs); break;
157*da668aa1SThomas Huth     default:                    g_assert_not_reached();
158*da668aa1SThomas Huth     }
159*da668aa1SThomas Huth }
160*da668aa1SThomas Huth 
161*da668aa1SThomas Huth static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
162*da668aa1SThomas Huth {
163*da668aa1SThomas Huth     switch (drain_type) {
164*da668aa1SThomas Huth     case BDRV_DRAIN_ALL:        bdrv_drain_all_end(); break;
165*da668aa1SThomas Huth     case BDRV_DRAIN:            bdrv_drained_end(bs); break;
166*da668aa1SThomas Huth     case BDRV_SUBTREE_DRAIN:    bdrv_subtree_drained_end(bs); break;
167*da668aa1SThomas Huth     default:                    g_assert_not_reached();
168*da668aa1SThomas Huth     }
169*da668aa1SThomas Huth }
170*da668aa1SThomas Huth 
171*da668aa1SThomas Huth static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
172*da668aa1SThomas Huth {
173*da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
174*da668aa1SThomas Huth         aio_context_acquire(bdrv_get_aio_context(bs));
175*da668aa1SThomas Huth     }
176*da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
177*da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
178*da668aa1SThomas Huth         aio_context_release(bdrv_get_aio_context(bs));
179*da668aa1SThomas Huth     }
180*da668aa1SThomas Huth }
181*da668aa1SThomas Huth 
182*da668aa1SThomas Huth static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
183*da668aa1SThomas Huth {
184*da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
185*da668aa1SThomas Huth         aio_context_acquire(bdrv_get_aio_context(bs));
186*da668aa1SThomas Huth     }
187*da668aa1SThomas Huth     do_drain_end(drain_type, bs);
188*da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
189*da668aa1SThomas Huth         aio_context_release(bdrv_get_aio_context(bs));
190*da668aa1SThomas Huth     }
191*da668aa1SThomas Huth }
192*da668aa1SThomas Huth 
193*da668aa1SThomas Huth static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
194*da668aa1SThomas Huth {
195*da668aa1SThomas Huth     BlockBackend *blk;
196*da668aa1SThomas Huth     BlockDriverState *bs, *backing;
197*da668aa1SThomas Huth     BDRVTestState *s, *backing_s;
198*da668aa1SThomas Huth     BlockAIOCB *acb;
199*da668aa1SThomas Huth     int aio_ret;
200*da668aa1SThomas Huth 
201*da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
202*da668aa1SThomas Huth 
203*da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
204*da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
205*da668aa1SThomas Huth                               &error_abort);
206*da668aa1SThomas Huth     s = bs->opaque;
207*da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
208*da668aa1SThomas Huth 
209*da668aa1SThomas Huth     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
210*da668aa1SThomas Huth     backing_s = backing->opaque;
211*da668aa1SThomas Huth     bdrv_set_backing_hd(bs, backing, &error_abort);
212*da668aa1SThomas Huth 
213*da668aa1SThomas Huth     /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */
214*da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
215*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
216*da668aa1SThomas Huth 
217*da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
218*da668aa1SThomas Huth 
219*da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 1);
220*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
221*da668aa1SThomas Huth 
222*da668aa1SThomas Huth     do_drain_end(drain_type, bs);
223*da668aa1SThomas Huth 
224*da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
225*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
226*da668aa1SThomas Huth 
227*da668aa1SThomas Huth     /* Now do the same while a request is pending */
228*da668aa1SThomas Huth     aio_ret = -EINPROGRESS;
229*da668aa1SThomas Huth     acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
230*da668aa1SThomas Huth     g_assert(acb != NULL);
231*da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
232*da668aa1SThomas Huth 
233*da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
234*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
235*da668aa1SThomas Huth 
236*da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
237*da668aa1SThomas Huth 
238*da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, 0);
239*da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 1);
240*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
241*da668aa1SThomas Huth 
242*da668aa1SThomas Huth     do_drain_end(drain_type, bs);
243*da668aa1SThomas Huth 
244*da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
245*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
246*da668aa1SThomas Huth 
247*da668aa1SThomas Huth     bdrv_unref(backing);
248*da668aa1SThomas Huth     bdrv_unref(bs);
249*da668aa1SThomas Huth     blk_unref(blk);
250*da668aa1SThomas Huth }
251*da668aa1SThomas Huth 
252*da668aa1SThomas Huth static void test_drv_cb_drain_all(void)
253*da668aa1SThomas Huth {
254*da668aa1SThomas Huth     test_drv_cb_common(BDRV_DRAIN_ALL, true);
255*da668aa1SThomas Huth }
256*da668aa1SThomas Huth 
257*da668aa1SThomas Huth static void test_drv_cb_drain(void)
258*da668aa1SThomas Huth {
259*da668aa1SThomas Huth     test_drv_cb_common(BDRV_DRAIN, false);
260*da668aa1SThomas Huth }
261*da668aa1SThomas Huth 
262*da668aa1SThomas Huth static void test_drv_cb_drain_subtree(void)
263*da668aa1SThomas Huth {
264*da668aa1SThomas Huth     test_drv_cb_common(BDRV_SUBTREE_DRAIN, true);
265*da668aa1SThomas Huth }
266*da668aa1SThomas Huth 
267*da668aa1SThomas Huth static void test_drv_cb_co_drain_all(void)
268*da668aa1SThomas Huth {
269*da668aa1SThomas Huth     call_in_coroutine(test_drv_cb_drain_all);
270*da668aa1SThomas Huth }
271*da668aa1SThomas Huth 
272*da668aa1SThomas Huth static void test_drv_cb_co_drain(void)
273*da668aa1SThomas Huth {
274*da668aa1SThomas Huth     call_in_coroutine(test_drv_cb_drain);
275*da668aa1SThomas Huth }
276*da668aa1SThomas Huth 
277*da668aa1SThomas Huth static void test_drv_cb_co_drain_subtree(void)
278*da668aa1SThomas Huth {
279*da668aa1SThomas Huth     call_in_coroutine(test_drv_cb_drain_subtree);
280*da668aa1SThomas Huth }
281*da668aa1SThomas Huth 
282*da668aa1SThomas Huth static void test_quiesce_common(enum drain_type drain_type, bool recursive)
283*da668aa1SThomas Huth {
284*da668aa1SThomas Huth     BlockBackend *blk;
285*da668aa1SThomas Huth     BlockDriverState *bs, *backing;
286*da668aa1SThomas Huth 
287*da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
288*da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
289*da668aa1SThomas Huth                               &error_abort);
290*da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
291*da668aa1SThomas Huth 
292*da668aa1SThomas Huth     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
293*da668aa1SThomas Huth     bdrv_set_backing_hd(bs, backing, &error_abort);
294*da668aa1SThomas Huth 
295*da668aa1SThomas Huth     g_assert_cmpint(bs->quiesce_counter, ==, 0);
296*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
297*da668aa1SThomas Huth 
298*da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
299*da668aa1SThomas Huth 
300*da668aa1SThomas Huth     g_assert_cmpint(bs->quiesce_counter, ==, 1);
301*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
302*da668aa1SThomas Huth 
303*da668aa1SThomas Huth     do_drain_end(drain_type, bs);
304*da668aa1SThomas Huth 
305*da668aa1SThomas Huth     g_assert_cmpint(bs->quiesce_counter, ==, 0);
306*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
307*da668aa1SThomas Huth 
308*da668aa1SThomas Huth     bdrv_unref(backing);
309*da668aa1SThomas Huth     bdrv_unref(bs);
310*da668aa1SThomas Huth     blk_unref(blk);
311*da668aa1SThomas Huth }
312*da668aa1SThomas Huth 
313*da668aa1SThomas Huth static void test_quiesce_drain_all(void)
314*da668aa1SThomas Huth {
315*da668aa1SThomas Huth     test_quiesce_common(BDRV_DRAIN_ALL, true);
316*da668aa1SThomas Huth }
317*da668aa1SThomas Huth 
318*da668aa1SThomas Huth static void test_quiesce_drain(void)
319*da668aa1SThomas Huth {
320*da668aa1SThomas Huth     test_quiesce_common(BDRV_DRAIN, false);
321*da668aa1SThomas Huth }
322*da668aa1SThomas Huth 
323*da668aa1SThomas Huth static void test_quiesce_drain_subtree(void)
324*da668aa1SThomas Huth {
325*da668aa1SThomas Huth     test_quiesce_common(BDRV_SUBTREE_DRAIN, true);
326*da668aa1SThomas Huth }
327*da668aa1SThomas Huth 
328*da668aa1SThomas Huth static void test_quiesce_co_drain_all(void)
329*da668aa1SThomas Huth {
330*da668aa1SThomas Huth     call_in_coroutine(test_quiesce_drain_all);
331*da668aa1SThomas Huth }
332*da668aa1SThomas Huth 
333*da668aa1SThomas Huth static void test_quiesce_co_drain(void)
334*da668aa1SThomas Huth {
335*da668aa1SThomas Huth     call_in_coroutine(test_quiesce_drain);
336*da668aa1SThomas Huth }
337*da668aa1SThomas Huth 
338*da668aa1SThomas Huth static void test_quiesce_co_drain_subtree(void)
339*da668aa1SThomas Huth {
340*da668aa1SThomas Huth     call_in_coroutine(test_quiesce_drain_subtree);
341*da668aa1SThomas Huth }
342*da668aa1SThomas Huth 
343*da668aa1SThomas Huth static void test_nested(void)
344*da668aa1SThomas Huth {
345*da668aa1SThomas Huth     BlockBackend *blk;
346*da668aa1SThomas Huth     BlockDriverState *bs, *backing;
347*da668aa1SThomas Huth     BDRVTestState *s, *backing_s;
348*da668aa1SThomas Huth     enum drain_type outer, inner;
349*da668aa1SThomas Huth 
350*da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
351*da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
352*da668aa1SThomas Huth                               &error_abort);
353*da668aa1SThomas Huth     s = bs->opaque;
354*da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
355*da668aa1SThomas Huth 
356*da668aa1SThomas Huth     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
357*da668aa1SThomas Huth     backing_s = backing->opaque;
358*da668aa1SThomas Huth     bdrv_set_backing_hd(bs, backing, &error_abort);
359*da668aa1SThomas Huth 
360*da668aa1SThomas Huth     for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
361*da668aa1SThomas Huth         for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
362*da668aa1SThomas Huth             int backing_quiesce = (outer != BDRV_DRAIN) +
363*da668aa1SThomas Huth                                   (inner != BDRV_DRAIN);
364*da668aa1SThomas Huth 
365*da668aa1SThomas Huth             g_assert_cmpint(bs->quiesce_counter, ==, 0);
366*da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, 0);
367*da668aa1SThomas Huth             g_assert_cmpint(s->drain_count, ==, 0);
368*da668aa1SThomas Huth             g_assert_cmpint(backing_s->drain_count, ==, 0);
369*da668aa1SThomas Huth 
370*da668aa1SThomas Huth             do_drain_begin(outer, bs);
371*da668aa1SThomas Huth             do_drain_begin(inner, bs);
372*da668aa1SThomas Huth 
373*da668aa1SThomas Huth             g_assert_cmpint(bs->quiesce_counter, ==, 2);
374*da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
375*da668aa1SThomas Huth             g_assert_cmpint(s->drain_count, ==, 2);
376*da668aa1SThomas Huth             g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce);
377*da668aa1SThomas Huth 
378*da668aa1SThomas Huth             do_drain_end(inner, bs);
379*da668aa1SThomas Huth             do_drain_end(outer, bs);
380*da668aa1SThomas Huth 
381*da668aa1SThomas Huth             g_assert_cmpint(bs->quiesce_counter, ==, 0);
382*da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, 0);
383*da668aa1SThomas Huth             g_assert_cmpint(s->drain_count, ==, 0);
384*da668aa1SThomas Huth             g_assert_cmpint(backing_s->drain_count, ==, 0);
385*da668aa1SThomas Huth         }
386*da668aa1SThomas Huth     }
387*da668aa1SThomas Huth 
388*da668aa1SThomas Huth     bdrv_unref(backing);
389*da668aa1SThomas Huth     bdrv_unref(bs);
390*da668aa1SThomas Huth     blk_unref(blk);
391*da668aa1SThomas Huth }
392*da668aa1SThomas Huth 
393*da668aa1SThomas Huth static void test_multiparent(void)
394*da668aa1SThomas Huth {
395*da668aa1SThomas Huth     BlockBackend *blk_a, *blk_b;
396*da668aa1SThomas Huth     BlockDriverState *bs_a, *bs_b, *backing;
397*da668aa1SThomas Huth     BDRVTestState *a_s, *b_s, *backing_s;
398*da668aa1SThomas Huth 
399*da668aa1SThomas Huth     blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
400*da668aa1SThomas Huth     bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
401*da668aa1SThomas Huth                                 &error_abort);
402*da668aa1SThomas Huth     a_s = bs_a->opaque;
403*da668aa1SThomas Huth     blk_insert_bs(blk_a, bs_a, &error_abort);
404*da668aa1SThomas Huth 
405*da668aa1SThomas Huth     blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
406*da668aa1SThomas Huth     bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
407*da668aa1SThomas Huth                                 &error_abort);
408*da668aa1SThomas Huth     b_s = bs_b->opaque;
409*da668aa1SThomas Huth     blk_insert_bs(blk_b, bs_b, &error_abort);
410*da668aa1SThomas Huth 
411*da668aa1SThomas Huth     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
412*da668aa1SThomas Huth     backing_s = backing->opaque;
413*da668aa1SThomas Huth     bdrv_set_backing_hd(bs_a, backing, &error_abort);
414*da668aa1SThomas Huth     bdrv_set_backing_hd(bs_b, backing, &error_abort);
415*da668aa1SThomas Huth 
416*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
417*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
418*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
419*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 0);
420*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 0);
421*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
422*da668aa1SThomas Huth 
423*da668aa1SThomas Huth     do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
424*da668aa1SThomas Huth 
425*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
426*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
427*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 1);
428*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
429*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
430*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 1);
431*da668aa1SThomas Huth 
432*da668aa1SThomas Huth     do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
433*da668aa1SThomas Huth 
434*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 2);
435*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
436*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 2);
437*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 2);
438*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 2);
439*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 2);
440*da668aa1SThomas Huth 
441*da668aa1SThomas Huth     do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
442*da668aa1SThomas Huth 
443*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
444*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
445*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 1);
446*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
447*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
448*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 1);
449*da668aa1SThomas Huth 
450*da668aa1SThomas Huth     do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
451*da668aa1SThomas Huth 
452*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
453*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
454*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
455*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 0);
456*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 0);
457*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
458*da668aa1SThomas Huth 
459*da668aa1SThomas Huth     bdrv_unref(backing);
460*da668aa1SThomas Huth     bdrv_unref(bs_a);
461*da668aa1SThomas Huth     bdrv_unref(bs_b);
462*da668aa1SThomas Huth     blk_unref(blk_a);
463*da668aa1SThomas Huth     blk_unref(blk_b);
464*da668aa1SThomas Huth }
465*da668aa1SThomas Huth 
466*da668aa1SThomas Huth static void test_graph_change_drain_subtree(void)
467*da668aa1SThomas Huth {
468*da668aa1SThomas Huth     BlockBackend *blk_a, *blk_b;
469*da668aa1SThomas Huth     BlockDriverState *bs_a, *bs_b, *backing;
470*da668aa1SThomas Huth     BDRVTestState *a_s, *b_s, *backing_s;
471*da668aa1SThomas Huth 
472*da668aa1SThomas Huth     blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
473*da668aa1SThomas Huth     bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
474*da668aa1SThomas Huth                                 &error_abort);
475*da668aa1SThomas Huth     a_s = bs_a->opaque;
476*da668aa1SThomas Huth     blk_insert_bs(blk_a, bs_a, &error_abort);
477*da668aa1SThomas Huth 
478*da668aa1SThomas Huth     blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
479*da668aa1SThomas Huth     bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
480*da668aa1SThomas Huth                                 &error_abort);
481*da668aa1SThomas Huth     b_s = bs_b->opaque;
482*da668aa1SThomas Huth     blk_insert_bs(blk_b, bs_b, &error_abort);
483*da668aa1SThomas Huth 
484*da668aa1SThomas Huth     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
485*da668aa1SThomas Huth     backing_s = backing->opaque;
486*da668aa1SThomas Huth     bdrv_set_backing_hd(bs_a, backing, &error_abort);
487*da668aa1SThomas Huth 
488*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
489*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
490*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
491*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 0);
492*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 0);
493*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
494*da668aa1SThomas Huth 
495*da668aa1SThomas Huth     do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
496*da668aa1SThomas Huth     do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
497*da668aa1SThomas Huth     do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
498*da668aa1SThomas Huth     do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
499*da668aa1SThomas Huth     do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
500*da668aa1SThomas Huth 
501*da668aa1SThomas Huth     bdrv_set_backing_hd(bs_b, backing, &error_abort);
502*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
503*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
504*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 5);
505*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 5);
506*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 5);
507*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 5);
508*da668aa1SThomas Huth 
509*da668aa1SThomas Huth     bdrv_set_backing_hd(bs_b, NULL, &error_abort);
510*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 3);
511*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
512*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 3);
513*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 3);
514*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 2);
515*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 3);
516*da668aa1SThomas Huth 
517*da668aa1SThomas Huth     bdrv_set_backing_hd(bs_b, backing, &error_abort);
518*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
519*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
520*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 5);
521*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 5);
522*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 5);
523*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 5);
524*da668aa1SThomas Huth 
525*da668aa1SThomas Huth     do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
526*da668aa1SThomas Huth     do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
527*da668aa1SThomas Huth     do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
528*da668aa1SThomas Huth     do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
529*da668aa1SThomas Huth     do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
530*da668aa1SThomas Huth 
531*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
532*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
533*da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
534*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 0);
535*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 0);
536*da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
537*da668aa1SThomas Huth 
538*da668aa1SThomas Huth     bdrv_unref(backing);
539*da668aa1SThomas Huth     bdrv_unref(bs_a);
540*da668aa1SThomas Huth     bdrv_unref(bs_b);
541*da668aa1SThomas Huth     blk_unref(blk_a);
542*da668aa1SThomas Huth     blk_unref(blk_b);
543*da668aa1SThomas Huth }
544*da668aa1SThomas Huth 
545*da668aa1SThomas Huth static void test_graph_change_drain_all(void)
546*da668aa1SThomas Huth {
547*da668aa1SThomas Huth     BlockBackend *blk_a, *blk_b;
548*da668aa1SThomas Huth     BlockDriverState *bs_a, *bs_b;
549*da668aa1SThomas Huth     BDRVTestState *a_s, *b_s;
550*da668aa1SThomas Huth 
551*da668aa1SThomas Huth     /* Create node A with a BlockBackend */
552*da668aa1SThomas Huth     blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
553*da668aa1SThomas Huth     bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
554*da668aa1SThomas Huth                                 &error_abort);
555*da668aa1SThomas Huth     a_s = bs_a->opaque;
556*da668aa1SThomas Huth     blk_insert_bs(blk_a, bs_a, &error_abort);
557*da668aa1SThomas Huth 
558*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
559*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 0);
560*da668aa1SThomas Huth 
561*da668aa1SThomas Huth     /* Call bdrv_drain_all_begin() */
562*da668aa1SThomas Huth     bdrv_drain_all_begin();
563*da668aa1SThomas Huth 
564*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
565*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
566*da668aa1SThomas Huth 
567*da668aa1SThomas Huth     /* Create node B with a BlockBackend */
568*da668aa1SThomas Huth     blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
569*da668aa1SThomas Huth     bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
570*da668aa1SThomas Huth                                 &error_abort);
571*da668aa1SThomas Huth     b_s = bs_b->opaque;
572*da668aa1SThomas Huth     blk_insert_bs(blk_b, bs_b, &error_abort);
573*da668aa1SThomas Huth 
574*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
575*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
576*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
577*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
578*da668aa1SThomas Huth 
579*da668aa1SThomas Huth     /* Unref and finally delete node A */
580*da668aa1SThomas Huth     blk_unref(blk_a);
581*da668aa1SThomas Huth 
582*da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
583*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
584*da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
585*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
586*da668aa1SThomas Huth 
587*da668aa1SThomas Huth     bdrv_unref(bs_a);
588*da668aa1SThomas Huth 
589*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
590*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
591*da668aa1SThomas Huth 
592*da668aa1SThomas Huth     /* End the drained section */
593*da668aa1SThomas Huth     bdrv_drain_all_end();
594*da668aa1SThomas Huth 
595*da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
596*da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 0);
597*da668aa1SThomas Huth     g_assert_cmpint(qemu_get_aio_context()->external_disable_cnt, ==, 0);
598*da668aa1SThomas Huth 
599*da668aa1SThomas Huth     bdrv_unref(bs_b);
600*da668aa1SThomas Huth     blk_unref(blk_b);
601*da668aa1SThomas Huth }
602*da668aa1SThomas Huth 
603*da668aa1SThomas Huth struct test_iothread_data {
604*da668aa1SThomas Huth     BlockDriverState *bs;
605*da668aa1SThomas Huth     enum drain_type drain_type;
606*da668aa1SThomas Huth     int *aio_ret;
607*da668aa1SThomas Huth };
608*da668aa1SThomas Huth 
609*da668aa1SThomas Huth static void test_iothread_drain_entry(void *opaque)
610*da668aa1SThomas Huth {
611*da668aa1SThomas Huth     struct test_iothread_data *data = opaque;
612*da668aa1SThomas Huth 
613*da668aa1SThomas Huth     aio_context_acquire(bdrv_get_aio_context(data->bs));
614*da668aa1SThomas Huth     do_drain_begin(data->drain_type, data->bs);
615*da668aa1SThomas Huth     g_assert_cmpint(*data->aio_ret, ==, 0);
616*da668aa1SThomas Huth     do_drain_end(data->drain_type, data->bs);
617*da668aa1SThomas Huth     aio_context_release(bdrv_get_aio_context(data->bs));
618*da668aa1SThomas Huth 
619*da668aa1SThomas Huth     qemu_event_set(&done_event);
620*da668aa1SThomas Huth }
621*da668aa1SThomas Huth 
622*da668aa1SThomas Huth static void test_iothread_aio_cb(void *opaque, int ret)
623*da668aa1SThomas Huth {
624*da668aa1SThomas Huth     int *aio_ret = opaque;
625*da668aa1SThomas Huth     *aio_ret = ret;
626*da668aa1SThomas Huth     qemu_event_set(&done_event);
627*da668aa1SThomas Huth }
628*da668aa1SThomas Huth 
629*da668aa1SThomas Huth static void test_iothread_main_thread_bh(void *opaque)
630*da668aa1SThomas Huth {
631*da668aa1SThomas Huth     struct test_iothread_data *data = opaque;
632*da668aa1SThomas Huth 
633*da668aa1SThomas Huth     /* Test that the AioContext is not yet locked in a random BH that is
634*da668aa1SThomas Huth      * executed during drain, otherwise this would deadlock. */
635*da668aa1SThomas Huth     aio_context_acquire(bdrv_get_aio_context(data->bs));
636*da668aa1SThomas Huth     bdrv_flush(data->bs);
637*da668aa1SThomas Huth     aio_context_release(bdrv_get_aio_context(data->bs));
638*da668aa1SThomas Huth }
639*da668aa1SThomas Huth 
640*da668aa1SThomas Huth /*
641*da668aa1SThomas Huth  * Starts an AIO request on a BDS that runs in the AioContext of iothread 1.
642*da668aa1SThomas Huth  * The request involves a BH on iothread 2 before it can complete.
643*da668aa1SThomas Huth  *
644*da668aa1SThomas Huth  * @drain_thread = 0 means that do_drain_begin/end are called from the main
645*da668aa1SThomas Huth  * thread, @drain_thread = 1 means that they are called from iothread 1. Drain
646*da668aa1SThomas Huth  * for this BDS cannot be called from iothread 2 because only the main thread
647*da668aa1SThomas Huth  * may do cross-AioContext polling.
648*da668aa1SThomas Huth  */
649*da668aa1SThomas Huth static void test_iothread_common(enum drain_type drain_type, int drain_thread)
650*da668aa1SThomas Huth {
651*da668aa1SThomas Huth     BlockBackend *blk;
652*da668aa1SThomas Huth     BlockDriverState *bs;
653*da668aa1SThomas Huth     BDRVTestState *s;
654*da668aa1SThomas Huth     BlockAIOCB *acb;
655*da668aa1SThomas Huth     int aio_ret;
656*da668aa1SThomas Huth     struct test_iothread_data data;
657*da668aa1SThomas Huth 
658*da668aa1SThomas Huth     IOThread *a = iothread_new();
659*da668aa1SThomas Huth     IOThread *b = iothread_new();
660*da668aa1SThomas Huth     AioContext *ctx_a = iothread_get_aio_context(a);
661*da668aa1SThomas Huth     AioContext *ctx_b = iothread_get_aio_context(b);
662*da668aa1SThomas Huth 
663*da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
664*da668aa1SThomas Huth 
665*da668aa1SThomas Huth     /* bdrv_drain_all() may only be called from the main loop thread */
666*da668aa1SThomas Huth     if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
667*da668aa1SThomas Huth         goto out;
668*da668aa1SThomas Huth     }
669*da668aa1SThomas Huth 
670*da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
671*da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
672*da668aa1SThomas Huth                               &error_abort);
673*da668aa1SThomas Huth     s = bs->opaque;
674*da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
675*da668aa1SThomas Huth     blk_set_disable_request_queuing(blk, true);
676*da668aa1SThomas Huth 
677*da668aa1SThomas Huth     blk_set_aio_context(blk, ctx_a, &error_abort);
678*da668aa1SThomas Huth     aio_context_acquire(ctx_a);
679*da668aa1SThomas Huth 
680*da668aa1SThomas Huth     s->bh_indirection_ctx = ctx_b;
681*da668aa1SThomas Huth 
682*da668aa1SThomas Huth     aio_ret = -EINPROGRESS;
683*da668aa1SThomas Huth     qemu_event_reset(&done_event);
684*da668aa1SThomas Huth 
685*da668aa1SThomas Huth     if (drain_thread == 0) {
686*da668aa1SThomas Huth         acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret);
687*da668aa1SThomas Huth     } else {
688*da668aa1SThomas Huth         acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
689*da668aa1SThomas Huth     }
690*da668aa1SThomas Huth     g_assert(acb != NULL);
691*da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
692*da668aa1SThomas Huth 
693*da668aa1SThomas Huth     aio_context_release(ctx_a);
694*da668aa1SThomas Huth 
695*da668aa1SThomas Huth     data = (struct test_iothread_data) {
696*da668aa1SThomas Huth         .bs         = bs,
697*da668aa1SThomas Huth         .drain_type = drain_type,
698*da668aa1SThomas Huth         .aio_ret    = &aio_ret,
699*da668aa1SThomas Huth     };
700*da668aa1SThomas Huth 
701*da668aa1SThomas Huth     switch (drain_thread) {
702*da668aa1SThomas Huth     case 0:
703*da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
704*da668aa1SThomas Huth             aio_context_acquire(ctx_a);
705*da668aa1SThomas Huth         }
706*da668aa1SThomas Huth 
707*da668aa1SThomas Huth         aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
708*da668aa1SThomas Huth 
709*da668aa1SThomas Huth         /* The request is running on the IOThread a. Draining its block device
710*da668aa1SThomas Huth          * will make sure that it has completed as far as the BDS is concerned,
711*da668aa1SThomas Huth          * but the drain in this thread can continue immediately after
712*da668aa1SThomas Huth          * bdrv_dec_in_flight() and aio_ret might be assigned only slightly
713*da668aa1SThomas Huth          * later. */
714*da668aa1SThomas Huth         do_drain_begin(drain_type, bs);
715*da668aa1SThomas Huth         g_assert_cmpint(bs->in_flight, ==, 0);
716*da668aa1SThomas Huth 
717*da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
718*da668aa1SThomas Huth             aio_context_release(ctx_a);
719*da668aa1SThomas Huth         }
720*da668aa1SThomas Huth         qemu_event_wait(&done_event);
721*da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
722*da668aa1SThomas Huth             aio_context_acquire(ctx_a);
723*da668aa1SThomas Huth         }
724*da668aa1SThomas Huth 
725*da668aa1SThomas Huth         g_assert_cmpint(aio_ret, ==, 0);
726*da668aa1SThomas Huth         do_drain_end(drain_type, bs);
727*da668aa1SThomas Huth 
728*da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
729*da668aa1SThomas Huth             aio_context_release(ctx_a);
730*da668aa1SThomas Huth         }
731*da668aa1SThomas Huth         break;
732*da668aa1SThomas Huth     case 1:
733*da668aa1SThomas Huth         aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data);
734*da668aa1SThomas Huth         qemu_event_wait(&done_event);
735*da668aa1SThomas Huth         break;
736*da668aa1SThomas Huth     default:
737*da668aa1SThomas Huth         g_assert_not_reached();
738*da668aa1SThomas Huth     }
739*da668aa1SThomas Huth 
740*da668aa1SThomas Huth     aio_context_acquire(ctx_a);
741*da668aa1SThomas Huth     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
742*da668aa1SThomas Huth     aio_context_release(ctx_a);
743*da668aa1SThomas Huth 
744*da668aa1SThomas Huth     bdrv_unref(bs);
745*da668aa1SThomas Huth     blk_unref(blk);
746*da668aa1SThomas Huth 
747*da668aa1SThomas Huth out:
748*da668aa1SThomas Huth     iothread_join(a);
749*da668aa1SThomas Huth     iothread_join(b);
750*da668aa1SThomas Huth }
751*da668aa1SThomas Huth 
752*da668aa1SThomas Huth static void test_iothread_drain_all(void)
753*da668aa1SThomas Huth {
754*da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN_ALL, 0);
755*da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN_ALL, 1);
756*da668aa1SThomas Huth }
757*da668aa1SThomas Huth 
758*da668aa1SThomas Huth static void test_iothread_drain(void)
759*da668aa1SThomas Huth {
760*da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN, 0);
761*da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN, 1);
762*da668aa1SThomas Huth }
763*da668aa1SThomas Huth 
764*da668aa1SThomas Huth static void test_iothread_drain_subtree(void)
765*da668aa1SThomas Huth {
766*da668aa1SThomas Huth     test_iothread_common(BDRV_SUBTREE_DRAIN, 0);
767*da668aa1SThomas Huth     test_iothread_common(BDRV_SUBTREE_DRAIN, 1);
768*da668aa1SThomas Huth }
769*da668aa1SThomas Huth 
770*da668aa1SThomas Huth 
771*da668aa1SThomas Huth typedef struct TestBlockJob {
772*da668aa1SThomas Huth     BlockJob common;
773*da668aa1SThomas Huth     int run_ret;
774*da668aa1SThomas Huth     int prepare_ret;
775*da668aa1SThomas Huth     bool running;
776*da668aa1SThomas Huth     bool should_complete;
777*da668aa1SThomas Huth } TestBlockJob;
778*da668aa1SThomas Huth 
779*da668aa1SThomas Huth static int test_job_prepare(Job *job)
780*da668aa1SThomas Huth {
781*da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
782*da668aa1SThomas Huth 
783*da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
784*da668aa1SThomas Huth     blk_flush(s->common.blk);
785*da668aa1SThomas Huth     return s->prepare_ret;
786*da668aa1SThomas Huth }
787*da668aa1SThomas Huth 
788*da668aa1SThomas Huth static void test_job_commit(Job *job)
789*da668aa1SThomas Huth {
790*da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
791*da668aa1SThomas Huth 
792*da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
793*da668aa1SThomas Huth     blk_flush(s->common.blk);
794*da668aa1SThomas Huth }
795*da668aa1SThomas Huth 
796*da668aa1SThomas Huth static void test_job_abort(Job *job)
797*da668aa1SThomas Huth {
798*da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
799*da668aa1SThomas Huth 
800*da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
801*da668aa1SThomas Huth     blk_flush(s->common.blk);
802*da668aa1SThomas Huth }
803*da668aa1SThomas Huth 
804*da668aa1SThomas Huth static int coroutine_fn test_job_run(Job *job, Error **errp)
805*da668aa1SThomas Huth {
806*da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
807*da668aa1SThomas Huth 
808*da668aa1SThomas Huth     /* We are running the actual job code past the pause point in
809*da668aa1SThomas Huth      * job_co_entry(). */
810*da668aa1SThomas Huth     s->running = true;
811*da668aa1SThomas Huth 
812*da668aa1SThomas Huth     job_transition_to_ready(&s->common.job);
813*da668aa1SThomas Huth     while (!s->should_complete) {
814*da668aa1SThomas Huth         /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
815*da668aa1SThomas Huth          * emulate some actual activity (probably some I/O) here so that drain
816*da668aa1SThomas Huth          * has to wait for this activity to stop. */
817*da668aa1SThomas Huth         qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
818*da668aa1SThomas Huth 
819*da668aa1SThomas Huth         job_pause_point(&s->common.job);
820*da668aa1SThomas Huth     }
821*da668aa1SThomas Huth 
822*da668aa1SThomas Huth     return s->run_ret;
823*da668aa1SThomas Huth }
824*da668aa1SThomas Huth 
825*da668aa1SThomas Huth static void test_job_complete(Job *job, Error **errp)
826*da668aa1SThomas Huth {
827*da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
828*da668aa1SThomas Huth     s->should_complete = true;
829*da668aa1SThomas Huth }
830*da668aa1SThomas Huth 
831*da668aa1SThomas Huth BlockJobDriver test_job_driver = {
832*da668aa1SThomas Huth     .job_driver = {
833*da668aa1SThomas Huth         .instance_size  = sizeof(TestBlockJob),
834*da668aa1SThomas Huth         .free           = block_job_free,
835*da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
836*da668aa1SThomas Huth         .run            = test_job_run,
837*da668aa1SThomas Huth         .complete       = test_job_complete,
838*da668aa1SThomas Huth         .prepare        = test_job_prepare,
839*da668aa1SThomas Huth         .commit         = test_job_commit,
840*da668aa1SThomas Huth         .abort          = test_job_abort,
841*da668aa1SThomas Huth     },
842*da668aa1SThomas Huth };
843*da668aa1SThomas Huth 
844*da668aa1SThomas Huth enum test_job_result {
845*da668aa1SThomas Huth     TEST_JOB_SUCCESS,
846*da668aa1SThomas Huth     TEST_JOB_FAIL_RUN,
847*da668aa1SThomas Huth     TEST_JOB_FAIL_PREPARE,
848*da668aa1SThomas Huth };
849*da668aa1SThomas Huth 
850*da668aa1SThomas Huth enum test_job_drain_node {
851*da668aa1SThomas Huth     TEST_JOB_DRAIN_SRC,
852*da668aa1SThomas Huth     TEST_JOB_DRAIN_SRC_CHILD,
853*da668aa1SThomas Huth     TEST_JOB_DRAIN_SRC_PARENT,
854*da668aa1SThomas Huth };
855*da668aa1SThomas Huth 
856*da668aa1SThomas Huth static void test_blockjob_common_drain_node(enum drain_type drain_type,
857*da668aa1SThomas Huth                                             bool use_iothread,
858*da668aa1SThomas Huth                                             enum test_job_result result,
859*da668aa1SThomas Huth                                             enum test_job_drain_node drain_node)
860*da668aa1SThomas Huth {
861*da668aa1SThomas Huth     BlockBackend *blk_src, *blk_target;
862*da668aa1SThomas Huth     BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
863*da668aa1SThomas Huth     BlockJob *job;
864*da668aa1SThomas Huth     TestBlockJob *tjob;
865*da668aa1SThomas Huth     IOThread *iothread = NULL;
866*da668aa1SThomas Huth     AioContext *ctx;
867*da668aa1SThomas Huth     int ret;
868*da668aa1SThomas Huth 
869*da668aa1SThomas Huth     src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
870*da668aa1SThomas Huth                                &error_abort);
871*da668aa1SThomas Huth     src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
872*da668aa1SThomas Huth                                        BDRV_O_RDWR, &error_abort);
873*da668aa1SThomas Huth     src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
874*da668aa1SThomas Huth                                        BDRV_O_RDWR, &error_abort);
875*da668aa1SThomas Huth 
876*da668aa1SThomas Huth     bdrv_set_backing_hd(src_overlay, src, &error_abort);
877*da668aa1SThomas Huth     bdrv_unref(src);
878*da668aa1SThomas Huth     bdrv_set_backing_hd(src, src_backing, &error_abort);
879*da668aa1SThomas Huth     bdrv_unref(src_backing);
880*da668aa1SThomas Huth 
881*da668aa1SThomas Huth     blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
882*da668aa1SThomas Huth     blk_insert_bs(blk_src, src_overlay, &error_abort);
883*da668aa1SThomas Huth 
884*da668aa1SThomas Huth     switch (drain_node) {
885*da668aa1SThomas Huth     case TEST_JOB_DRAIN_SRC:
886*da668aa1SThomas Huth         drain_bs = src;
887*da668aa1SThomas Huth         break;
888*da668aa1SThomas Huth     case TEST_JOB_DRAIN_SRC_CHILD:
889*da668aa1SThomas Huth         drain_bs = src_backing;
890*da668aa1SThomas Huth         break;
891*da668aa1SThomas Huth     case TEST_JOB_DRAIN_SRC_PARENT:
892*da668aa1SThomas Huth         drain_bs = src_overlay;
893*da668aa1SThomas Huth         break;
894*da668aa1SThomas Huth     default:
895*da668aa1SThomas Huth         g_assert_not_reached();
896*da668aa1SThomas Huth     }
897*da668aa1SThomas Huth 
898*da668aa1SThomas Huth     if (use_iothread) {
899*da668aa1SThomas Huth         iothread = iothread_new();
900*da668aa1SThomas Huth         ctx = iothread_get_aio_context(iothread);
901*da668aa1SThomas Huth         blk_set_aio_context(blk_src, ctx, &error_abort);
902*da668aa1SThomas Huth     } else {
903*da668aa1SThomas Huth         ctx = qemu_get_aio_context();
904*da668aa1SThomas Huth     }
905*da668aa1SThomas Huth 
906*da668aa1SThomas Huth     target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
907*da668aa1SThomas Huth                                   &error_abort);
908*da668aa1SThomas Huth     blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
909*da668aa1SThomas Huth     blk_insert_bs(blk_target, target, &error_abort);
910*da668aa1SThomas Huth     blk_set_allow_aio_context_change(blk_target, true);
911*da668aa1SThomas Huth 
912*da668aa1SThomas Huth     aio_context_acquire(ctx);
913*da668aa1SThomas Huth     tjob = block_job_create("job0", &test_job_driver, NULL, src,
914*da668aa1SThomas Huth                             0, BLK_PERM_ALL,
915*da668aa1SThomas Huth                             0, 0, NULL, NULL, &error_abort);
916*da668aa1SThomas Huth     job = &tjob->common;
917*da668aa1SThomas Huth     block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
918*da668aa1SThomas Huth 
919*da668aa1SThomas Huth     switch (result) {
920*da668aa1SThomas Huth     case TEST_JOB_SUCCESS:
921*da668aa1SThomas Huth         break;
922*da668aa1SThomas Huth     case TEST_JOB_FAIL_RUN:
923*da668aa1SThomas Huth         tjob->run_ret = -EIO;
924*da668aa1SThomas Huth         break;
925*da668aa1SThomas Huth     case TEST_JOB_FAIL_PREPARE:
926*da668aa1SThomas Huth         tjob->prepare_ret = -EIO;
927*da668aa1SThomas Huth         break;
928*da668aa1SThomas Huth     }
929*da668aa1SThomas Huth 
930*da668aa1SThomas Huth     job_start(&job->job);
931*da668aa1SThomas Huth     aio_context_release(ctx);
932*da668aa1SThomas Huth 
933*da668aa1SThomas Huth     if (use_iothread) {
934*da668aa1SThomas Huth         /* job_co_entry() is run in the I/O thread, wait for the actual job
935*da668aa1SThomas Huth          * code to start (we don't want to catch the job in the pause point in
936*da668aa1SThomas Huth          * job_co_entry(). */
937*da668aa1SThomas Huth         while (!tjob->running) {
938*da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
939*da668aa1SThomas Huth         }
940*da668aa1SThomas Huth     }
941*da668aa1SThomas Huth 
942*da668aa1SThomas Huth     g_assert_cmpint(job->job.pause_count, ==, 0);
943*da668aa1SThomas Huth     g_assert_false(job->job.paused);
944*da668aa1SThomas Huth     g_assert_true(tjob->running);
945*da668aa1SThomas Huth     g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
946*da668aa1SThomas Huth 
947*da668aa1SThomas Huth     do_drain_begin_unlocked(drain_type, drain_bs);
948*da668aa1SThomas Huth 
949*da668aa1SThomas Huth     if (drain_type == BDRV_DRAIN_ALL) {
950*da668aa1SThomas Huth         /* bdrv_drain_all() drains both src and target */
951*da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 2);
952*da668aa1SThomas Huth     } else {
953*da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 1);
954*da668aa1SThomas Huth     }
955*da668aa1SThomas Huth     g_assert_true(job->job.paused);
956*da668aa1SThomas Huth     g_assert_false(job->job.busy); /* The job is paused */
957*da668aa1SThomas Huth 
958*da668aa1SThomas Huth     do_drain_end_unlocked(drain_type, drain_bs);
959*da668aa1SThomas Huth 
960*da668aa1SThomas Huth     if (use_iothread) {
961*da668aa1SThomas Huth         /* paused is reset in the I/O thread, wait for it */
962*da668aa1SThomas Huth         while (job->job.paused) {
963*da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
964*da668aa1SThomas Huth         }
965*da668aa1SThomas Huth     }
966*da668aa1SThomas Huth 
967*da668aa1SThomas Huth     g_assert_cmpint(job->job.pause_count, ==, 0);
968*da668aa1SThomas Huth     g_assert_false(job->job.paused);
969*da668aa1SThomas Huth     g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
970*da668aa1SThomas Huth 
971*da668aa1SThomas Huth     do_drain_begin_unlocked(drain_type, target);
972*da668aa1SThomas Huth 
973*da668aa1SThomas Huth     if (drain_type == BDRV_DRAIN_ALL) {
974*da668aa1SThomas Huth         /* bdrv_drain_all() drains both src and target */
975*da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 2);
976*da668aa1SThomas Huth     } else {
977*da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 1);
978*da668aa1SThomas Huth     }
979*da668aa1SThomas Huth     g_assert_true(job->job.paused);
980*da668aa1SThomas Huth     g_assert_false(job->job.busy); /* The job is paused */
981*da668aa1SThomas Huth 
982*da668aa1SThomas Huth     do_drain_end_unlocked(drain_type, target);
983*da668aa1SThomas Huth 
984*da668aa1SThomas Huth     if (use_iothread) {
985*da668aa1SThomas Huth         /* paused is reset in the I/O thread, wait for it */
986*da668aa1SThomas Huth         while (job->job.paused) {
987*da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
988*da668aa1SThomas Huth         }
989*da668aa1SThomas Huth     }
990*da668aa1SThomas Huth 
991*da668aa1SThomas Huth     g_assert_cmpint(job->job.pause_count, ==, 0);
992*da668aa1SThomas Huth     g_assert_false(job->job.paused);
993*da668aa1SThomas Huth     g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
994*da668aa1SThomas Huth 
995*da668aa1SThomas Huth     aio_context_acquire(ctx);
996*da668aa1SThomas Huth     ret = job_complete_sync(&job->job, &error_abort);
997*da668aa1SThomas Huth     g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
998*da668aa1SThomas Huth 
999*da668aa1SThomas Huth     if (use_iothread) {
1000*da668aa1SThomas Huth         blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
1001*da668aa1SThomas Huth         assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
1002*da668aa1SThomas Huth     }
1003*da668aa1SThomas Huth     aio_context_release(ctx);
1004*da668aa1SThomas Huth 
1005*da668aa1SThomas Huth     blk_unref(blk_src);
1006*da668aa1SThomas Huth     blk_unref(blk_target);
1007*da668aa1SThomas Huth     bdrv_unref(src_overlay);
1008*da668aa1SThomas Huth     bdrv_unref(target);
1009*da668aa1SThomas Huth 
1010*da668aa1SThomas Huth     if (iothread) {
1011*da668aa1SThomas Huth         iothread_join(iothread);
1012*da668aa1SThomas Huth     }
1013*da668aa1SThomas Huth }
1014*da668aa1SThomas Huth 
1015*da668aa1SThomas Huth static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
1016*da668aa1SThomas Huth                                  enum test_job_result result)
1017*da668aa1SThomas Huth {
1018*da668aa1SThomas Huth     test_blockjob_common_drain_node(drain_type, use_iothread, result,
1019*da668aa1SThomas Huth                                     TEST_JOB_DRAIN_SRC);
1020*da668aa1SThomas Huth     test_blockjob_common_drain_node(drain_type, use_iothread, result,
1021*da668aa1SThomas Huth                                     TEST_JOB_DRAIN_SRC_CHILD);
1022*da668aa1SThomas Huth     if (drain_type == BDRV_SUBTREE_DRAIN) {
1023*da668aa1SThomas Huth         test_blockjob_common_drain_node(drain_type, use_iothread, result,
1024*da668aa1SThomas Huth                                         TEST_JOB_DRAIN_SRC_PARENT);
1025*da668aa1SThomas Huth     }
1026*da668aa1SThomas Huth }
1027*da668aa1SThomas Huth 
1028*da668aa1SThomas Huth static void test_blockjob_drain_all(void)
1029*da668aa1SThomas Huth {
1030*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
1031*da668aa1SThomas Huth }
1032*da668aa1SThomas Huth 
1033*da668aa1SThomas Huth static void test_blockjob_drain(void)
1034*da668aa1SThomas Huth {
1035*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
1036*da668aa1SThomas Huth }
1037*da668aa1SThomas Huth 
1038*da668aa1SThomas Huth static void test_blockjob_drain_subtree(void)
1039*da668aa1SThomas Huth {
1040*da668aa1SThomas Huth     test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS);
1041*da668aa1SThomas Huth }
1042*da668aa1SThomas Huth 
1043*da668aa1SThomas Huth static void test_blockjob_error_drain_all(void)
1044*da668aa1SThomas Huth {
1045*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
1046*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
1047*da668aa1SThomas Huth }
1048*da668aa1SThomas Huth 
1049*da668aa1SThomas Huth static void test_blockjob_error_drain(void)
1050*da668aa1SThomas Huth {
1051*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
1052*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
1053*da668aa1SThomas Huth }
1054*da668aa1SThomas Huth 
1055*da668aa1SThomas Huth static void test_blockjob_error_drain_subtree(void)
1056*da668aa1SThomas Huth {
1057*da668aa1SThomas Huth     test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN);
1058*da668aa1SThomas Huth     test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE);
1059*da668aa1SThomas Huth }
1060*da668aa1SThomas Huth 
1061*da668aa1SThomas Huth static void test_blockjob_iothread_drain_all(void)
1062*da668aa1SThomas Huth {
1063*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
1064*da668aa1SThomas Huth }
1065*da668aa1SThomas Huth 
1066*da668aa1SThomas Huth static void test_blockjob_iothread_drain(void)
1067*da668aa1SThomas Huth {
1068*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
1069*da668aa1SThomas Huth }
1070*da668aa1SThomas Huth 
1071*da668aa1SThomas Huth static void test_blockjob_iothread_drain_subtree(void)
1072*da668aa1SThomas Huth {
1073*da668aa1SThomas Huth     test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS);
1074*da668aa1SThomas Huth }
1075*da668aa1SThomas Huth 
1076*da668aa1SThomas Huth static void test_blockjob_iothread_error_drain_all(void)
1077*da668aa1SThomas Huth {
1078*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
1079*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
1080*da668aa1SThomas Huth }
1081*da668aa1SThomas Huth 
1082*da668aa1SThomas Huth static void test_blockjob_iothread_error_drain(void)
1083*da668aa1SThomas Huth {
1084*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
1085*da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
1086*da668aa1SThomas Huth }
1087*da668aa1SThomas Huth 
1088*da668aa1SThomas Huth static void test_blockjob_iothread_error_drain_subtree(void)
1089*da668aa1SThomas Huth {
1090*da668aa1SThomas Huth     test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN);
1091*da668aa1SThomas Huth     test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE);
1092*da668aa1SThomas Huth }
1093*da668aa1SThomas Huth 
1094*da668aa1SThomas Huth 
1095*da668aa1SThomas Huth typedef struct BDRVTestTopState {
1096*da668aa1SThomas Huth     BdrvChild *wait_child;
1097*da668aa1SThomas Huth } BDRVTestTopState;
1098*da668aa1SThomas Huth 
1099*da668aa1SThomas Huth static void bdrv_test_top_close(BlockDriverState *bs)
1100*da668aa1SThomas Huth {
1101*da668aa1SThomas Huth     BdrvChild *c, *next_c;
1102*da668aa1SThomas Huth     QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1103*da668aa1SThomas Huth         bdrv_unref_child(bs, c);
1104*da668aa1SThomas Huth     }
1105*da668aa1SThomas Huth }
1106*da668aa1SThomas Huth 
1107*da668aa1SThomas Huth static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs,
1108*da668aa1SThomas Huth                                                 uint64_t offset, uint64_t bytes,
1109*da668aa1SThomas Huth                                                 QEMUIOVector *qiov, int flags)
1110*da668aa1SThomas Huth {
1111*da668aa1SThomas Huth     BDRVTestTopState *tts = bs->opaque;
1112*da668aa1SThomas Huth     return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags);
1113*da668aa1SThomas Huth }
1114*da668aa1SThomas Huth 
1115*da668aa1SThomas Huth static BlockDriver bdrv_test_top_driver = {
1116*da668aa1SThomas Huth     .format_name            = "test_top_driver",
1117*da668aa1SThomas Huth     .instance_size          = sizeof(BDRVTestTopState),
1118*da668aa1SThomas Huth 
1119*da668aa1SThomas Huth     .bdrv_close             = bdrv_test_top_close,
1120*da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_test_top_co_preadv,
1121*da668aa1SThomas Huth 
1122*da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
1123*da668aa1SThomas Huth };
1124*da668aa1SThomas Huth 
1125*da668aa1SThomas Huth typedef struct TestCoDeleteByDrainData {
1126*da668aa1SThomas Huth     BlockBackend *blk;
1127*da668aa1SThomas Huth     bool detach_instead_of_delete;
1128*da668aa1SThomas Huth     bool done;
1129*da668aa1SThomas Huth } TestCoDeleteByDrainData;
1130*da668aa1SThomas Huth 
1131*da668aa1SThomas Huth static void coroutine_fn test_co_delete_by_drain(void *opaque)
1132*da668aa1SThomas Huth {
1133*da668aa1SThomas Huth     TestCoDeleteByDrainData *dbdd = opaque;
1134*da668aa1SThomas Huth     BlockBackend *blk = dbdd->blk;
1135*da668aa1SThomas Huth     BlockDriverState *bs = blk_bs(blk);
1136*da668aa1SThomas Huth     BDRVTestTopState *tts = bs->opaque;
1137*da668aa1SThomas Huth     void *buffer = g_malloc(65536);
1138*da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
1139*da668aa1SThomas Huth 
1140*da668aa1SThomas Huth     /* Pretend some internal write operation from parent to child.
1141*da668aa1SThomas Huth      * Important: We have to read from the child, not from the parent!
1142*da668aa1SThomas Huth      * Draining works by first propagating it all up the tree to the
1143*da668aa1SThomas Huth      * root and then waiting for drainage from root to the leaves
1144*da668aa1SThomas Huth      * (protocol nodes).  If we have a request waiting on the root,
1145*da668aa1SThomas Huth      * everything will be drained before we go back down the tree, but
1146*da668aa1SThomas Huth      * we do not want that.  We want to be in the middle of draining
1147*da668aa1SThomas Huth      * when this following requests returns. */
1148*da668aa1SThomas Huth     bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0);
1149*da668aa1SThomas Huth 
1150*da668aa1SThomas Huth     g_assert_cmpint(bs->refcnt, ==, 1);
1151*da668aa1SThomas Huth 
1152*da668aa1SThomas Huth     if (!dbdd->detach_instead_of_delete) {
1153*da668aa1SThomas Huth         blk_unref(blk);
1154*da668aa1SThomas Huth     } else {
1155*da668aa1SThomas Huth         BdrvChild *c, *next_c;
1156*da668aa1SThomas Huth         QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1157*da668aa1SThomas Huth             bdrv_unref_child(bs, c);
1158*da668aa1SThomas Huth         }
1159*da668aa1SThomas Huth     }
1160*da668aa1SThomas Huth 
1161*da668aa1SThomas Huth     dbdd->done = true;
1162*da668aa1SThomas Huth     g_free(buffer);
1163*da668aa1SThomas Huth }
1164*da668aa1SThomas Huth 
1165*da668aa1SThomas Huth /**
1166*da668aa1SThomas Huth  * Test what happens when some BDS has some children, you drain one of
1167*da668aa1SThomas Huth  * them and this results in the BDS being deleted.
1168*da668aa1SThomas Huth  *
1169*da668aa1SThomas Huth  * If @detach_instead_of_delete is set, the BDS is not going to be
1170*da668aa1SThomas Huth  * deleted but will only detach all of its children.
1171*da668aa1SThomas Huth  */
1172*da668aa1SThomas Huth static void do_test_delete_by_drain(bool detach_instead_of_delete,
1173*da668aa1SThomas Huth                                     enum drain_type drain_type)
1174*da668aa1SThomas Huth {
1175*da668aa1SThomas Huth     BlockBackend *blk;
1176*da668aa1SThomas Huth     BlockDriverState *bs, *child_bs, *null_bs;
1177*da668aa1SThomas Huth     BDRVTestTopState *tts;
1178*da668aa1SThomas Huth     TestCoDeleteByDrainData dbdd;
1179*da668aa1SThomas Huth     Coroutine *co;
1180*da668aa1SThomas Huth 
1181*da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR,
1182*da668aa1SThomas Huth                               &error_abort);
1183*da668aa1SThomas Huth     bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1184*da668aa1SThomas Huth     tts = bs->opaque;
1185*da668aa1SThomas Huth 
1186*da668aa1SThomas Huth     null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1187*da668aa1SThomas Huth                         &error_abort);
1188*da668aa1SThomas Huth     bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
1189*da668aa1SThomas Huth                       BDRV_CHILD_DATA, &error_abort);
1190*da668aa1SThomas Huth 
1191*da668aa1SThomas Huth     /* This child will be the one to pass to requests through to, and
1192*da668aa1SThomas Huth      * it will stall until a drain occurs */
1193*da668aa1SThomas Huth     child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR,
1194*da668aa1SThomas Huth                                     &error_abort);
1195*da668aa1SThomas Huth     child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1196*da668aa1SThomas Huth     /* Takes our reference to child_bs */
1197*da668aa1SThomas Huth     tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
1198*da668aa1SThomas Huth                                         &child_of_bds,
1199*da668aa1SThomas Huth                                         BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
1200*da668aa1SThomas Huth                                         &error_abort);
1201*da668aa1SThomas Huth 
1202*da668aa1SThomas Huth     /* This child is just there to be deleted
1203*da668aa1SThomas Huth      * (for detach_instead_of_delete == true) */
1204*da668aa1SThomas Huth     null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1205*da668aa1SThomas Huth                         &error_abort);
1206*da668aa1SThomas Huth     bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
1207*da668aa1SThomas Huth                       &error_abort);
1208*da668aa1SThomas Huth 
1209*da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1210*da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
1211*da668aa1SThomas Huth 
1212*da668aa1SThomas Huth     /* Referenced by blk now */
1213*da668aa1SThomas Huth     bdrv_unref(bs);
1214*da668aa1SThomas Huth 
1215*da668aa1SThomas Huth     g_assert_cmpint(bs->refcnt, ==, 1);
1216*da668aa1SThomas Huth     g_assert_cmpint(child_bs->refcnt, ==, 1);
1217*da668aa1SThomas Huth     g_assert_cmpint(null_bs->refcnt, ==, 1);
1218*da668aa1SThomas Huth 
1219*da668aa1SThomas Huth 
1220*da668aa1SThomas Huth     dbdd = (TestCoDeleteByDrainData){
1221*da668aa1SThomas Huth         .blk = blk,
1222*da668aa1SThomas Huth         .detach_instead_of_delete = detach_instead_of_delete,
1223*da668aa1SThomas Huth         .done = false,
1224*da668aa1SThomas Huth     };
1225*da668aa1SThomas Huth     co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd);
1226*da668aa1SThomas Huth     qemu_coroutine_enter(co);
1227*da668aa1SThomas Huth 
1228*da668aa1SThomas Huth     /* Drain the child while the read operation is still pending.
1229*da668aa1SThomas Huth      * This should result in the operation finishing and
1230*da668aa1SThomas Huth      * test_co_delete_by_drain() resuming.  Thus, @bs will be deleted
1231*da668aa1SThomas Huth      * and the coroutine will exit while this drain operation is still
1232*da668aa1SThomas Huth      * in progress. */
1233*da668aa1SThomas Huth     switch (drain_type) {
1234*da668aa1SThomas Huth     case BDRV_DRAIN:
1235*da668aa1SThomas Huth         bdrv_ref(child_bs);
1236*da668aa1SThomas Huth         bdrv_drain(child_bs);
1237*da668aa1SThomas Huth         bdrv_unref(child_bs);
1238*da668aa1SThomas Huth         break;
1239*da668aa1SThomas Huth     case BDRV_SUBTREE_DRAIN:
1240*da668aa1SThomas Huth         /* Would have to ref/unref bs here for !detach_instead_of_delete, but
1241*da668aa1SThomas Huth          * then the whole test becomes pointless because the graph changes
1242*da668aa1SThomas Huth          * don't occur during the drain any more. */
1243*da668aa1SThomas Huth         assert(detach_instead_of_delete);
1244*da668aa1SThomas Huth         bdrv_subtree_drained_begin(bs);
1245*da668aa1SThomas Huth         bdrv_subtree_drained_end(bs);
1246*da668aa1SThomas Huth         break;
1247*da668aa1SThomas Huth     case BDRV_DRAIN_ALL:
1248*da668aa1SThomas Huth         bdrv_drain_all_begin();
1249*da668aa1SThomas Huth         bdrv_drain_all_end();
1250*da668aa1SThomas Huth         break;
1251*da668aa1SThomas Huth     default:
1252*da668aa1SThomas Huth         g_assert_not_reached();
1253*da668aa1SThomas Huth     }
1254*da668aa1SThomas Huth 
1255*da668aa1SThomas Huth     while (!dbdd.done) {
1256*da668aa1SThomas Huth         aio_poll(qemu_get_aio_context(), true);
1257*da668aa1SThomas Huth     }
1258*da668aa1SThomas Huth 
1259*da668aa1SThomas Huth     if (detach_instead_of_delete) {
1260*da668aa1SThomas Huth         /* Here, the reference has not passed over to the coroutine,
1261*da668aa1SThomas Huth          * so we have to delete the BB ourselves */
1262*da668aa1SThomas Huth         blk_unref(blk);
1263*da668aa1SThomas Huth     }
1264*da668aa1SThomas Huth }
1265*da668aa1SThomas Huth 
1266*da668aa1SThomas Huth static void test_delete_by_drain(void)
1267*da668aa1SThomas Huth {
1268*da668aa1SThomas Huth     do_test_delete_by_drain(false, BDRV_DRAIN);
1269*da668aa1SThomas Huth }
1270*da668aa1SThomas Huth 
1271*da668aa1SThomas Huth static void test_detach_by_drain_all(void)
1272*da668aa1SThomas Huth {
1273*da668aa1SThomas Huth     do_test_delete_by_drain(true, BDRV_DRAIN_ALL);
1274*da668aa1SThomas Huth }
1275*da668aa1SThomas Huth 
1276*da668aa1SThomas Huth static void test_detach_by_drain(void)
1277*da668aa1SThomas Huth {
1278*da668aa1SThomas Huth     do_test_delete_by_drain(true, BDRV_DRAIN);
1279*da668aa1SThomas Huth }
1280*da668aa1SThomas Huth 
1281*da668aa1SThomas Huth static void test_detach_by_drain_subtree(void)
1282*da668aa1SThomas Huth {
1283*da668aa1SThomas Huth     do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN);
1284*da668aa1SThomas Huth }
1285*da668aa1SThomas Huth 
1286*da668aa1SThomas Huth 
1287*da668aa1SThomas Huth struct detach_by_parent_data {
1288*da668aa1SThomas Huth     BlockDriverState *parent_b;
1289*da668aa1SThomas Huth     BdrvChild *child_b;
1290*da668aa1SThomas Huth     BlockDriverState *c;
1291*da668aa1SThomas Huth     BdrvChild *child_c;
1292*da668aa1SThomas Huth     bool by_parent_cb;
1293*da668aa1SThomas Huth };
1294*da668aa1SThomas Huth static struct detach_by_parent_data detach_by_parent_data;
1295*da668aa1SThomas Huth 
1296*da668aa1SThomas Huth static void detach_indirect_bh(void *opaque)
1297*da668aa1SThomas Huth {
1298*da668aa1SThomas Huth     struct detach_by_parent_data *data = opaque;
1299*da668aa1SThomas Huth 
1300*da668aa1SThomas Huth     bdrv_unref_child(data->parent_b, data->child_b);
1301*da668aa1SThomas Huth 
1302*da668aa1SThomas Huth     bdrv_ref(data->c);
1303*da668aa1SThomas Huth     data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
1304*da668aa1SThomas Huth                                       &child_of_bds, BDRV_CHILD_DATA,
1305*da668aa1SThomas Huth                                       &error_abort);
1306*da668aa1SThomas Huth }
1307*da668aa1SThomas Huth 
1308*da668aa1SThomas Huth static void detach_by_parent_aio_cb(void *opaque, int ret)
1309*da668aa1SThomas Huth {
1310*da668aa1SThomas Huth     struct detach_by_parent_data *data = &detach_by_parent_data;
1311*da668aa1SThomas Huth 
1312*da668aa1SThomas Huth     g_assert_cmpint(ret, ==, 0);
1313*da668aa1SThomas Huth     if (data->by_parent_cb) {
1314*da668aa1SThomas Huth         detach_indirect_bh(data);
1315*da668aa1SThomas Huth     }
1316*da668aa1SThomas Huth }
1317*da668aa1SThomas Huth 
1318*da668aa1SThomas Huth static void detach_by_driver_cb_drained_begin(BdrvChild *child)
1319*da668aa1SThomas Huth {
1320*da668aa1SThomas Huth     aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1321*da668aa1SThomas Huth                             detach_indirect_bh, &detach_by_parent_data);
1322*da668aa1SThomas Huth     child_of_bds.drained_begin(child);
1323*da668aa1SThomas Huth }
1324*da668aa1SThomas Huth 
1325*da668aa1SThomas Huth static BdrvChildClass detach_by_driver_cb_class;
1326*da668aa1SThomas Huth 
1327*da668aa1SThomas Huth /*
1328*da668aa1SThomas Huth  * Initial graph:
1329*da668aa1SThomas Huth  *
1330*da668aa1SThomas Huth  * PA     PB
1331*da668aa1SThomas Huth  *    \ /   \
1332*da668aa1SThomas Huth  *     A     B     C
1333*da668aa1SThomas Huth  *
1334*da668aa1SThomas Huth  * by_parent_cb == true:  Test that parent callbacks don't poll
1335*da668aa1SThomas Huth  *
1336*da668aa1SThomas Huth  *     PA has a pending write request whose callback changes the child nodes of
1337*da668aa1SThomas Huth  *     PB: It removes B and adds C instead. The subtree of PB is drained, which
1338*da668aa1SThomas Huth  *     will indirectly drain the write request, too.
1339*da668aa1SThomas Huth  *
1340*da668aa1SThomas Huth  * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll
1341*da668aa1SThomas Huth  *
1342*da668aa1SThomas Huth  *     PA's BdrvChildClass has a .drained_begin callback that schedules a BH
1343*da668aa1SThomas Huth  *     that does the same graph change. If bdrv_drain_invoke() calls it, the
1344*da668aa1SThomas Huth  *     state is messed up, but if it is only polled in the single
1345*da668aa1SThomas Huth  *     BDRV_POLL_WHILE() at the end of the drain, this should work fine.
1346*da668aa1SThomas Huth  */
1347*da668aa1SThomas Huth static void test_detach_indirect(bool by_parent_cb)
1348*da668aa1SThomas Huth {
1349*da668aa1SThomas Huth     BlockBackend *blk;
1350*da668aa1SThomas Huth     BlockDriverState *parent_a, *parent_b, *a, *b, *c;
1351*da668aa1SThomas Huth     BdrvChild *child_a, *child_b;
1352*da668aa1SThomas Huth     BlockAIOCB *acb;
1353*da668aa1SThomas Huth 
1354*da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
1355*da668aa1SThomas Huth 
1356*da668aa1SThomas Huth     if (!by_parent_cb) {
1357*da668aa1SThomas Huth         detach_by_driver_cb_class = child_of_bds;
1358*da668aa1SThomas Huth         detach_by_driver_cb_class.drained_begin =
1359*da668aa1SThomas Huth             detach_by_driver_cb_drained_begin;
1360*da668aa1SThomas Huth     }
1361*da668aa1SThomas Huth 
1362*da668aa1SThomas Huth     /* Create all involved nodes */
1363*da668aa1SThomas Huth     parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR,
1364*da668aa1SThomas Huth                                     &error_abort);
1365*da668aa1SThomas Huth     parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0,
1366*da668aa1SThomas Huth                                     &error_abort);
1367*da668aa1SThomas Huth 
1368*da668aa1SThomas Huth     a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort);
1369*da668aa1SThomas Huth     b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort);
1370*da668aa1SThomas Huth     c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort);
1371*da668aa1SThomas Huth 
1372*da668aa1SThomas Huth     /* blk is a BB for parent-a */
1373*da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1374*da668aa1SThomas Huth     blk_insert_bs(blk, parent_a, &error_abort);
1375*da668aa1SThomas Huth     bdrv_unref(parent_a);
1376*da668aa1SThomas Huth 
1377*da668aa1SThomas Huth     /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver
1378*da668aa1SThomas Huth      * callback must not return immediately. */
1379*da668aa1SThomas Huth     if (!by_parent_cb) {
1380*da668aa1SThomas Huth         BDRVTestState *s = parent_a->opaque;
1381*da668aa1SThomas Huth         s->sleep_in_drain_begin = true;
1382*da668aa1SThomas Huth     }
1383*da668aa1SThomas Huth 
1384*da668aa1SThomas Huth     /* Set child relationships */
1385*da668aa1SThomas Huth     bdrv_ref(b);
1386*da668aa1SThomas Huth     bdrv_ref(a);
1387*da668aa1SThomas Huth     child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
1388*da668aa1SThomas Huth                                 BDRV_CHILD_DATA, &error_abort);
1389*da668aa1SThomas Huth     child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
1390*da668aa1SThomas Huth                                 BDRV_CHILD_COW, &error_abort);
1391*da668aa1SThomas Huth 
1392*da668aa1SThomas Huth     bdrv_ref(a);
1393*da668aa1SThomas Huth     bdrv_attach_child(parent_a, a, "PA-A",
1394*da668aa1SThomas Huth                       by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
1395*da668aa1SThomas Huth                       BDRV_CHILD_DATA, &error_abort);
1396*da668aa1SThomas Huth 
1397*da668aa1SThomas Huth     g_assert_cmpint(parent_a->refcnt, ==, 1);
1398*da668aa1SThomas Huth     g_assert_cmpint(parent_b->refcnt, ==, 1);
1399*da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 3);
1400*da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 2);
1401*da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 1);
1402*da668aa1SThomas Huth 
1403*da668aa1SThomas Huth     g_assert(QLIST_FIRST(&parent_b->children) == child_a);
1404*da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_a, next) == child_b);
1405*da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_b, next) == NULL);
1406*da668aa1SThomas Huth 
1407*da668aa1SThomas Huth     /* Start the evil write request */
1408*da668aa1SThomas Huth     detach_by_parent_data = (struct detach_by_parent_data) {
1409*da668aa1SThomas Huth         .parent_b = parent_b,
1410*da668aa1SThomas Huth         .child_b = child_b,
1411*da668aa1SThomas Huth         .c = c,
1412*da668aa1SThomas Huth         .by_parent_cb = by_parent_cb,
1413*da668aa1SThomas Huth     };
1414*da668aa1SThomas Huth     acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL);
1415*da668aa1SThomas Huth     g_assert(acb != NULL);
1416*da668aa1SThomas Huth 
1417*da668aa1SThomas Huth     /* Drain and check the expected result */
1418*da668aa1SThomas Huth     bdrv_subtree_drained_begin(parent_b);
1419*da668aa1SThomas Huth 
1420*da668aa1SThomas Huth     g_assert(detach_by_parent_data.child_c != NULL);
1421*da668aa1SThomas Huth 
1422*da668aa1SThomas Huth     g_assert_cmpint(parent_a->refcnt, ==, 1);
1423*da668aa1SThomas Huth     g_assert_cmpint(parent_b->refcnt, ==, 1);
1424*da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 3);
1425*da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 1);
1426*da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 2);
1427*da668aa1SThomas Huth 
1428*da668aa1SThomas Huth     g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c);
1429*da668aa1SThomas Huth     g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a);
1430*da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_a, next) == NULL);
1431*da668aa1SThomas Huth 
1432*da668aa1SThomas Huth     g_assert_cmpint(parent_a->quiesce_counter, ==, 1);
1433*da668aa1SThomas Huth     g_assert_cmpint(parent_b->quiesce_counter, ==, 1);
1434*da668aa1SThomas Huth     g_assert_cmpint(a->quiesce_counter, ==, 1);
1435*da668aa1SThomas Huth     g_assert_cmpint(b->quiesce_counter, ==, 0);
1436*da668aa1SThomas Huth     g_assert_cmpint(c->quiesce_counter, ==, 1);
1437*da668aa1SThomas Huth 
1438*da668aa1SThomas Huth     bdrv_subtree_drained_end(parent_b);
1439*da668aa1SThomas Huth 
1440*da668aa1SThomas Huth     bdrv_unref(parent_b);
1441*da668aa1SThomas Huth     blk_unref(blk);
1442*da668aa1SThomas Huth 
1443*da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 1);
1444*da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 1);
1445*da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 1);
1446*da668aa1SThomas Huth     bdrv_unref(a);
1447*da668aa1SThomas Huth     bdrv_unref(b);
1448*da668aa1SThomas Huth     bdrv_unref(c);
1449*da668aa1SThomas Huth }
1450*da668aa1SThomas Huth 
1451*da668aa1SThomas Huth static void test_detach_by_parent_cb(void)
1452*da668aa1SThomas Huth {
1453*da668aa1SThomas Huth     test_detach_indirect(true);
1454*da668aa1SThomas Huth }
1455*da668aa1SThomas Huth 
1456*da668aa1SThomas Huth static void test_detach_by_driver_cb(void)
1457*da668aa1SThomas Huth {
1458*da668aa1SThomas Huth     test_detach_indirect(false);
1459*da668aa1SThomas Huth }
1460*da668aa1SThomas Huth 
1461*da668aa1SThomas Huth static void test_append_to_drained(void)
1462*da668aa1SThomas Huth {
1463*da668aa1SThomas Huth     BlockBackend *blk;
1464*da668aa1SThomas Huth     BlockDriverState *base, *overlay;
1465*da668aa1SThomas Huth     BDRVTestState *base_s, *overlay_s;
1466*da668aa1SThomas Huth 
1467*da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1468*da668aa1SThomas Huth     base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
1469*da668aa1SThomas Huth     base_s = base->opaque;
1470*da668aa1SThomas Huth     blk_insert_bs(blk, base, &error_abort);
1471*da668aa1SThomas Huth 
1472*da668aa1SThomas Huth     overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR,
1473*da668aa1SThomas Huth                                    &error_abort);
1474*da668aa1SThomas Huth     overlay_s = overlay->opaque;
1475*da668aa1SThomas Huth 
1476*da668aa1SThomas Huth     do_drain_begin(BDRV_DRAIN, base);
1477*da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 1);
1478*da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 1);
1479*da668aa1SThomas Huth     g_assert_cmpint(base->in_flight, ==, 0);
1480*da668aa1SThomas Huth 
1481*da668aa1SThomas Huth     /* Takes ownership of overlay, so we don't have to unref it later */
1482*da668aa1SThomas Huth     bdrv_append(overlay, base, &error_abort);
1483*da668aa1SThomas Huth     g_assert_cmpint(base->in_flight, ==, 0);
1484*da668aa1SThomas Huth     g_assert_cmpint(overlay->in_flight, ==, 0);
1485*da668aa1SThomas Huth 
1486*da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 1);
1487*da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 1);
1488*da668aa1SThomas Huth     g_assert_cmpint(overlay->quiesce_counter, ==, 1);
1489*da668aa1SThomas Huth     g_assert_cmpint(overlay_s->drain_count, ==, 1);
1490*da668aa1SThomas Huth 
1491*da668aa1SThomas Huth     do_drain_end(BDRV_DRAIN, base);
1492*da668aa1SThomas Huth 
1493*da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 0);
1494*da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 0);
1495*da668aa1SThomas Huth     g_assert_cmpint(overlay->quiesce_counter, ==, 0);
1496*da668aa1SThomas Huth     g_assert_cmpint(overlay_s->drain_count, ==, 0);
1497*da668aa1SThomas Huth 
1498*da668aa1SThomas Huth     bdrv_unref(base);
1499*da668aa1SThomas Huth     blk_unref(blk);
1500*da668aa1SThomas Huth }
1501*da668aa1SThomas Huth 
1502*da668aa1SThomas Huth static void test_set_aio_context(void)
1503*da668aa1SThomas Huth {
1504*da668aa1SThomas Huth     BlockDriverState *bs;
1505*da668aa1SThomas Huth     IOThread *a = iothread_new();
1506*da668aa1SThomas Huth     IOThread *b = iothread_new();
1507*da668aa1SThomas Huth     AioContext *ctx_a = iothread_get_aio_context(a);
1508*da668aa1SThomas Huth     AioContext *ctx_b = iothread_get_aio_context(b);
1509*da668aa1SThomas Huth 
1510*da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
1511*da668aa1SThomas Huth                               &error_abort);
1512*da668aa1SThomas Huth 
1513*da668aa1SThomas Huth     bdrv_drained_begin(bs);
1514*da668aa1SThomas Huth     bdrv_try_set_aio_context(bs, ctx_a, &error_abort);
1515*da668aa1SThomas Huth 
1516*da668aa1SThomas Huth     aio_context_acquire(ctx_a);
1517*da668aa1SThomas Huth     bdrv_drained_end(bs);
1518*da668aa1SThomas Huth 
1519*da668aa1SThomas Huth     bdrv_drained_begin(bs);
1520*da668aa1SThomas Huth     bdrv_try_set_aio_context(bs, ctx_b, &error_abort);
1521*da668aa1SThomas Huth     aio_context_release(ctx_a);
1522*da668aa1SThomas Huth     aio_context_acquire(ctx_b);
1523*da668aa1SThomas Huth     bdrv_try_set_aio_context(bs, qemu_get_aio_context(), &error_abort);
1524*da668aa1SThomas Huth     aio_context_release(ctx_b);
1525*da668aa1SThomas Huth     bdrv_drained_end(bs);
1526*da668aa1SThomas Huth 
1527*da668aa1SThomas Huth     bdrv_unref(bs);
1528*da668aa1SThomas Huth     iothread_join(a);
1529*da668aa1SThomas Huth     iothread_join(b);
1530*da668aa1SThomas Huth }
1531*da668aa1SThomas Huth 
1532*da668aa1SThomas Huth 
1533*da668aa1SThomas Huth typedef struct TestDropBackingBlockJob {
1534*da668aa1SThomas Huth     BlockJob common;
1535*da668aa1SThomas Huth     bool should_complete;
1536*da668aa1SThomas Huth     bool *did_complete;
1537*da668aa1SThomas Huth     BlockDriverState *detach_also;
1538*da668aa1SThomas Huth } TestDropBackingBlockJob;
1539*da668aa1SThomas Huth 
1540*da668aa1SThomas Huth static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
1541*da668aa1SThomas Huth {
1542*da668aa1SThomas Huth     TestDropBackingBlockJob *s =
1543*da668aa1SThomas Huth         container_of(job, TestDropBackingBlockJob, common.job);
1544*da668aa1SThomas Huth 
1545*da668aa1SThomas Huth     while (!s->should_complete) {
1546*da668aa1SThomas Huth         job_sleep_ns(job, 0);
1547*da668aa1SThomas Huth     }
1548*da668aa1SThomas Huth 
1549*da668aa1SThomas Huth     return 0;
1550*da668aa1SThomas Huth }
1551*da668aa1SThomas Huth 
1552*da668aa1SThomas Huth static void test_drop_backing_job_commit(Job *job)
1553*da668aa1SThomas Huth {
1554*da668aa1SThomas Huth     TestDropBackingBlockJob *s =
1555*da668aa1SThomas Huth         container_of(job, TestDropBackingBlockJob, common.job);
1556*da668aa1SThomas Huth 
1557*da668aa1SThomas Huth     bdrv_set_backing_hd(blk_bs(s->common.blk), NULL, &error_abort);
1558*da668aa1SThomas Huth     bdrv_set_backing_hd(s->detach_also, NULL, &error_abort);
1559*da668aa1SThomas Huth 
1560*da668aa1SThomas Huth     *s->did_complete = true;
1561*da668aa1SThomas Huth }
1562*da668aa1SThomas Huth 
1563*da668aa1SThomas Huth static const BlockJobDriver test_drop_backing_job_driver = {
1564*da668aa1SThomas Huth     .job_driver = {
1565*da668aa1SThomas Huth         .instance_size  = sizeof(TestDropBackingBlockJob),
1566*da668aa1SThomas Huth         .free           = block_job_free,
1567*da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
1568*da668aa1SThomas Huth         .run            = test_drop_backing_job_run,
1569*da668aa1SThomas Huth         .commit         = test_drop_backing_job_commit,
1570*da668aa1SThomas Huth     }
1571*da668aa1SThomas Huth };
1572*da668aa1SThomas Huth 
1573*da668aa1SThomas Huth /**
1574*da668aa1SThomas Huth  * Creates a child node with three parent nodes on it, and then runs a
1575*da668aa1SThomas Huth  * block job on the final one, parent-node-2.
1576*da668aa1SThomas Huth  *
1577*da668aa1SThomas Huth  * The job is then asked to complete before a section where the child
1578*da668aa1SThomas Huth  * is drained.
1579*da668aa1SThomas Huth  *
1580*da668aa1SThomas Huth  * Ending this section will undrain the child's parents, first
1581*da668aa1SThomas Huth  * parent-node-2, then parent-node-1, then parent-node-0 -- the parent
1582*da668aa1SThomas Huth  * list is in reverse order of how they were added.  Ending the drain
1583*da668aa1SThomas Huth  * on parent-node-2 will resume the job, thus completing it and
1584*da668aa1SThomas Huth  * scheduling job_exit().
1585*da668aa1SThomas Huth  *
1586*da668aa1SThomas Huth  * Ending the drain on parent-node-1 will poll the AioContext, which
1587*da668aa1SThomas Huth  * lets job_exit() and thus test_drop_backing_job_commit() run.  That
1588*da668aa1SThomas Huth  * function first removes the child as parent-node-2's backing file.
1589*da668aa1SThomas Huth  *
1590*da668aa1SThomas Huth  * In old (and buggy) implementations, there are two problems with
1591*da668aa1SThomas Huth  * that:
1592*da668aa1SThomas Huth  * (A) bdrv_drain_invoke() polls for every node that leaves the
1593*da668aa1SThomas Huth  *     drained section.  This means that job_exit() is scheduled
1594*da668aa1SThomas Huth  *     before the child has left the drained section.  Its
1595*da668aa1SThomas Huth  *     quiesce_counter is therefore still 1 when it is removed from
1596*da668aa1SThomas Huth  *     parent-node-2.
1597*da668aa1SThomas Huth  *
1598*da668aa1SThomas Huth  * (B) bdrv_replace_child_noperm() calls drained_end() on the old
1599*da668aa1SThomas Huth  *     child's parents as many times as the child is quiesced.  This
1600*da668aa1SThomas Huth  *     means it will call drained_end() on parent-node-2 once.
1601*da668aa1SThomas Huth  *     Because parent-node-2 is no longer quiesced at this point, this
1602*da668aa1SThomas Huth  *     will fail.
1603*da668aa1SThomas Huth  *
1604*da668aa1SThomas Huth  * bdrv_replace_child_noperm() therefore must call drained_end() on
1605*da668aa1SThomas Huth  * the parent only if it really is still drained because the child is
1606*da668aa1SThomas Huth  * drained.
1607*da668aa1SThomas Huth  *
1608*da668aa1SThomas Huth  * If removing child from parent-node-2 was successful (as it should
1609*da668aa1SThomas Huth  * be), test_drop_backing_job_commit() will then also remove the child
1610*da668aa1SThomas Huth  * from parent-node-0.
1611*da668aa1SThomas Huth  *
1612*da668aa1SThomas Huth  * With an old version of our drain infrastructure ((A) above), that
1613*da668aa1SThomas Huth  * resulted in the following flow:
1614*da668aa1SThomas Huth  *
1615*da668aa1SThomas Huth  * 1. child attempts to leave its drained section.  The call recurses
1616*da668aa1SThomas Huth  *    to its parents.
1617*da668aa1SThomas Huth  *
1618*da668aa1SThomas Huth  * 2. parent-node-2 leaves the drained section.  Polling in
1619*da668aa1SThomas Huth  *    bdrv_drain_invoke() will schedule job_exit().
1620*da668aa1SThomas Huth  *
1621*da668aa1SThomas Huth  * 3. parent-node-1 leaves the drained section.  Polling in
1622*da668aa1SThomas Huth  *    bdrv_drain_invoke() will run job_exit(), thus disconnecting
1623*da668aa1SThomas Huth  *    parent-node-0 from the child node.
1624*da668aa1SThomas Huth  *
1625*da668aa1SThomas Huth  * 4. bdrv_parent_drained_end() uses a QLIST_FOREACH_SAFE() loop to
1626*da668aa1SThomas Huth  *    iterate over the parents.  Thus, it now accesses the BdrvChild
1627*da668aa1SThomas Huth  *    object that used to connect parent-node-0 and the child node.
1628*da668aa1SThomas Huth  *    However, that object no longer exists, so it accesses a dangling
1629*da668aa1SThomas Huth  *    pointer.
1630*da668aa1SThomas Huth  *
1631*da668aa1SThomas Huth  * The solution is to only poll once when running a bdrv_drained_end()
1632*da668aa1SThomas Huth  * operation, specifically at the end when all drained_end()
1633*da668aa1SThomas Huth  * operations for all involved nodes have been scheduled.
1634*da668aa1SThomas Huth  * Note that this also solves (A) above, thus hiding (B).
1635*da668aa1SThomas Huth  */
1636*da668aa1SThomas Huth static void test_blockjob_commit_by_drained_end(void)
1637*da668aa1SThomas Huth {
1638*da668aa1SThomas Huth     BlockDriverState *bs_child, *bs_parents[3];
1639*da668aa1SThomas Huth     TestDropBackingBlockJob *job;
1640*da668aa1SThomas Huth     bool job_has_completed = false;
1641*da668aa1SThomas Huth     int i;
1642*da668aa1SThomas Huth 
1643*da668aa1SThomas Huth     bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR,
1644*da668aa1SThomas Huth                                     &error_abort);
1645*da668aa1SThomas Huth 
1646*da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1647*da668aa1SThomas Huth         char name[32];
1648*da668aa1SThomas Huth         snprintf(name, sizeof(name), "parent-node-%i", i);
1649*da668aa1SThomas Huth         bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR,
1650*da668aa1SThomas Huth                                              &error_abort);
1651*da668aa1SThomas Huth         bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort);
1652*da668aa1SThomas Huth     }
1653*da668aa1SThomas Huth 
1654*da668aa1SThomas Huth     job = block_job_create("job", &test_drop_backing_job_driver, NULL,
1655*da668aa1SThomas Huth                            bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL,
1656*da668aa1SThomas Huth                            &error_abort);
1657*da668aa1SThomas Huth 
1658*da668aa1SThomas Huth     job->detach_also = bs_parents[0];
1659*da668aa1SThomas Huth     job->did_complete = &job_has_completed;
1660*da668aa1SThomas Huth 
1661*da668aa1SThomas Huth     job_start(&job->common.job);
1662*da668aa1SThomas Huth 
1663*da668aa1SThomas Huth     job->should_complete = true;
1664*da668aa1SThomas Huth     bdrv_drained_begin(bs_child);
1665*da668aa1SThomas Huth     g_assert(!job_has_completed);
1666*da668aa1SThomas Huth     bdrv_drained_end(bs_child);
1667*da668aa1SThomas Huth     g_assert(job_has_completed);
1668*da668aa1SThomas Huth 
1669*da668aa1SThomas Huth     bdrv_unref(bs_parents[0]);
1670*da668aa1SThomas Huth     bdrv_unref(bs_parents[1]);
1671*da668aa1SThomas Huth     bdrv_unref(bs_parents[2]);
1672*da668aa1SThomas Huth     bdrv_unref(bs_child);
1673*da668aa1SThomas Huth }
1674*da668aa1SThomas Huth 
1675*da668aa1SThomas Huth 
1676*da668aa1SThomas Huth typedef struct TestSimpleBlockJob {
1677*da668aa1SThomas Huth     BlockJob common;
1678*da668aa1SThomas Huth     bool should_complete;
1679*da668aa1SThomas Huth     bool *did_complete;
1680*da668aa1SThomas Huth } TestSimpleBlockJob;
1681*da668aa1SThomas Huth 
1682*da668aa1SThomas Huth static int coroutine_fn test_simple_job_run(Job *job, Error **errp)
1683*da668aa1SThomas Huth {
1684*da668aa1SThomas Huth     TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1685*da668aa1SThomas Huth 
1686*da668aa1SThomas Huth     while (!s->should_complete) {
1687*da668aa1SThomas Huth         job_sleep_ns(job, 0);
1688*da668aa1SThomas Huth     }
1689*da668aa1SThomas Huth 
1690*da668aa1SThomas Huth     return 0;
1691*da668aa1SThomas Huth }
1692*da668aa1SThomas Huth 
1693*da668aa1SThomas Huth static void test_simple_job_clean(Job *job)
1694*da668aa1SThomas Huth {
1695*da668aa1SThomas Huth     TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1696*da668aa1SThomas Huth     *s->did_complete = true;
1697*da668aa1SThomas Huth }
1698*da668aa1SThomas Huth 
1699*da668aa1SThomas Huth static const BlockJobDriver test_simple_job_driver = {
1700*da668aa1SThomas Huth     .job_driver = {
1701*da668aa1SThomas Huth         .instance_size  = sizeof(TestSimpleBlockJob),
1702*da668aa1SThomas Huth         .free           = block_job_free,
1703*da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
1704*da668aa1SThomas Huth         .run            = test_simple_job_run,
1705*da668aa1SThomas Huth         .clean          = test_simple_job_clean,
1706*da668aa1SThomas Huth     },
1707*da668aa1SThomas Huth };
1708*da668aa1SThomas Huth 
1709*da668aa1SThomas Huth static int drop_intermediate_poll_update_filename(BdrvChild *child,
1710*da668aa1SThomas Huth                                                   BlockDriverState *new_base,
1711*da668aa1SThomas Huth                                                   const char *filename,
1712*da668aa1SThomas Huth                                                   Error **errp)
1713*da668aa1SThomas Huth {
1714*da668aa1SThomas Huth     /*
1715*da668aa1SThomas Huth      * We are free to poll here, which may change the block graph, if
1716*da668aa1SThomas Huth      * it is not drained.
1717*da668aa1SThomas Huth      */
1718*da668aa1SThomas Huth 
1719*da668aa1SThomas Huth     /* If the job is not drained: Complete it, schedule job_exit() */
1720*da668aa1SThomas Huth     aio_poll(qemu_get_current_aio_context(), false);
1721*da668aa1SThomas Huth     /* If the job is not drained: Run job_exit(), finish the job */
1722*da668aa1SThomas Huth     aio_poll(qemu_get_current_aio_context(), false);
1723*da668aa1SThomas Huth 
1724*da668aa1SThomas Huth     return 0;
1725*da668aa1SThomas Huth }
1726*da668aa1SThomas Huth 
1727*da668aa1SThomas Huth /**
1728*da668aa1SThomas Huth  * Test a poll in the midst of bdrv_drop_intermediate().
1729*da668aa1SThomas Huth  *
1730*da668aa1SThomas Huth  * bdrv_drop_intermediate() calls BdrvChildClass.update_filename(),
1731*da668aa1SThomas Huth  * which can yield or poll.  This may lead to graph changes, unless
1732*da668aa1SThomas Huth  * the whole subtree in question is drained.
1733*da668aa1SThomas Huth  *
1734*da668aa1SThomas Huth  * We test this on the following graph:
1735*da668aa1SThomas Huth  *
1736*da668aa1SThomas Huth  *                    Job
1737*da668aa1SThomas Huth  *
1738*da668aa1SThomas Huth  *                     |
1739*da668aa1SThomas Huth  *                  job-node
1740*da668aa1SThomas Huth  *                     |
1741*da668aa1SThomas Huth  *                     v
1742*da668aa1SThomas Huth  *
1743*da668aa1SThomas Huth  *                  job-node
1744*da668aa1SThomas Huth  *
1745*da668aa1SThomas Huth  *                     |
1746*da668aa1SThomas Huth  *                  backing
1747*da668aa1SThomas Huth  *                     |
1748*da668aa1SThomas Huth  *                     v
1749*da668aa1SThomas Huth  *
1750*da668aa1SThomas Huth  * node-2 --chain--> node-1 --chain--> node-0
1751*da668aa1SThomas Huth  *
1752*da668aa1SThomas Huth  * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0).
1753*da668aa1SThomas Huth  *
1754*da668aa1SThomas Huth  * This first updates node-2's backing filename by invoking
1755*da668aa1SThomas Huth  * drop_intermediate_poll_update_filename(), which polls twice.  This
1756*da668aa1SThomas Huth  * causes the job to finish, which in turns causes the job-node to be
1757*da668aa1SThomas Huth  * deleted.
1758*da668aa1SThomas Huth  *
1759*da668aa1SThomas Huth  * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it
1760*da668aa1SThomas Huth  * already has a pointer to the BdrvChild edge between job-node and
1761*da668aa1SThomas Huth  * node-1.  When it tries to handle that edge, we probably get a
1762*da668aa1SThomas Huth  * segmentation fault because the object no longer exists.
1763*da668aa1SThomas Huth  *
1764*da668aa1SThomas Huth  *
1765*da668aa1SThomas Huth  * The solution is for bdrv_drop_intermediate() to drain top's
1766*da668aa1SThomas Huth  * subtree.  This prevents graph changes from happening just because
1767*da668aa1SThomas Huth  * BdrvChildClass.update_filename() yields or polls.  Thus, the block
1768*da668aa1SThomas Huth  * job is paused during that drained section and must finish before or
1769*da668aa1SThomas Huth  * after.
1770*da668aa1SThomas Huth  *
1771*da668aa1SThomas Huth  * (In addition, bdrv_replace_child() must keep the job paused.)
1772*da668aa1SThomas Huth  */
1773*da668aa1SThomas Huth static void test_drop_intermediate_poll(void)
1774*da668aa1SThomas Huth {
1775*da668aa1SThomas Huth     static BdrvChildClass chain_child_class;
1776*da668aa1SThomas Huth     BlockDriverState *chain[3];
1777*da668aa1SThomas Huth     TestSimpleBlockJob *job;
1778*da668aa1SThomas Huth     BlockDriverState *job_node;
1779*da668aa1SThomas Huth     bool job_has_completed = false;
1780*da668aa1SThomas Huth     int i;
1781*da668aa1SThomas Huth     int ret;
1782*da668aa1SThomas Huth 
1783*da668aa1SThomas Huth     chain_child_class = child_of_bds;
1784*da668aa1SThomas Huth     chain_child_class.update_filename = drop_intermediate_poll_update_filename;
1785*da668aa1SThomas Huth 
1786*da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1787*da668aa1SThomas Huth         char name[32];
1788*da668aa1SThomas Huth         snprintf(name, 32, "node-%i", i);
1789*da668aa1SThomas Huth 
1790*da668aa1SThomas Huth         chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort);
1791*da668aa1SThomas Huth     }
1792*da668aa1SThomas Huth 
1793*da668aa1SThomas Huth     job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR,
1794*da668aa1SThomas Huth                                     &error_abort);
1795*da668aa1SThomas Huth     bdrv_set_backing_hd(job_node, chain[1], &error_abort);
1796*da668aa1SThomas Huth 
1797*da668aa1SThomas Huth     /*
1798*da668aa1SThomas Huth      * Establish the chain last, so the chain links are the first
1799*da668aa1SThomas Huth      * elements in the BDS.parents lists
1800*da668aa1SThomas Huth      */
1801*da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1802*da668aa1SThomas Huth         if (i) {
1803*da668aa1SThomas Huth             /* Takes the reference to chain[i - 1] */
1804*da668aa1SThomas Huth             chain[i]->backing = bdrv_attach_child(chain[i], chain[i - 1],
1805*da668aa1SThomas Huth                                                   "chain", &chain_child_class,
1806*da668aa1SThomas Huth                                                   BDRV_CHILD_COW, &error_abort);
1807*da668aa1SThomas Huth         }
1808*da668aa1SThomas Huth     }
1809*da668aa1SThomas Huth 
1810*da668aa1SThomas Huth     job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
1811*da668aa1SThomas Huth                            0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
1812*da668aa1SThomas Huth 
1813*da668aa1SThomas Huth     /* The job has a reference now */
1814*da668aa1SThomas Huth     bdrv_unref(job_node);
1815*da668aa1SThomas Huth 
1816*da668aa1SThomas Huth     job->did_complete = &job_has_completed;
1817*da668aa1SThomas Huth 
1818*da668aa1SThomas Huth     job_start(&job->common.job);
1819*da668aa1SThomas Huth     job->should_complete = true;
1820*da668aa1SThomas Huth 
1821*da668aa1SThomas Huth     g_assert(!job_has_completed);
1822*da668aa1SThomas Huth     ret = bdrv_drop_intermediate(chain[1], chain[0], NULL);
1823*da668aa1SThomas Huth     g_assert(ret == 0);
1824*da668aa1SThomas Huth     g_assert(job_has_completed);
1825*da668aa1SThomas Huth 
1826*da668aa1SThomas Huth     bdrv_unref(chain[2]);
1827*da668aa1SThomas Huth }
1828*da668aa1SThomas Huth 
1829*da668aa1SThomas Huth 
1830*da668aa1SThomas Huth typedef struct BDRVReplaceTestState {
1831*da668aa1SThomas Huth     bool was_drained;
1832*da668aa1SThomas Huth     bool was_undrained;
1833*da668aa1SThomas Huth     bool has_read;
1834*da668aa1SThomas Huth 
1835*da668aa1SThomas Huth     int drain_count;
1836*da668aa1SThomas Huth 
1837*da668aa1SThomas Huth     bool yield_before_read;
1838*da668aa1SThomas Huth     Coroutine *io_co;
1839*da668aa1SThomas Huth     Coroutine *drain_co;
1840*da668aa1SThomas Huth } BDRVReplaceTestState;
1841*da668aa1SThomas Huth 
1842*da668aa1SThomas Huth static void bdrv_replace_test_close(BlockDriverState *bs)
1843*da668aa1SThomas Huth {
1844*da668aa1SThomas Huth }
1845*da668aa1SThomas Huth 
1846*da668aa1SThomas Huth /**
1847*da668aa1SThomas Huth  * If @bs has a backing file:
1848*da668aa1SThomas Huth  *   Yield if .yield_before_read is true (and wait for drain_begin to
1849*da668aa1SThomas Huth  *   wake us up).
1850*da668aa1SThomas Huth  *   Forward the read to bs->backing.  Set .has_read to true.
1851*da668aa1SThomas Huth  *   If drain_begin has woken us, wake it in turn.
1852*da668aa1SThomas Huth  *
1853*da668aa1SThomas Huth  * Otherwise:
1854*da668aa1SThomas Huth  *   Set .has_read to true and return success.
1855*da668aa1SThomas Huth  */
1856*da668aa1SThomas Huth static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs,
1857*da668aa1SThomas Huth                                                     uint64_t offset,
1858*da668aa1SThomas Huth                                                     uint64_t bytes,
1859*da668aa1SThomas Huth                                                     QEMUIOVector *qiov,
1860*da668aa1SThomas Huth                                                     int flags)
1861*da668aa1SThomas Huth {
1862*da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1863*da668aa1SThomas Huth 
1864*da668aa1SThomas Huth     if (bs->backing) {
1865*da668aa1SThomas Huth         int ret;
1866*da668aa1SThomas Huth 
1867*da668aa1SThomas Huth         g_assert(!s->drain_count);
1868*da668aa1SThomas Huth 
1869*da668aa1SThomas Huth         s->io_co = qemu_coroutine_self();
1870*da668aa1SThomas Huth         if (s->yield_before_read) {
1871*da668aa1SThomas Huth             s->yield_before_read = false;
1872*da668aa1SThomas Huth             qemu_coroutine_yield();
1873*da668aa1SThomas Huth         }
1874*da668aa1SThomas Huth         s->io_co = NULL;
1875*da668aa1SThomas Huth 
1876*da668aa1SThomas Huth         ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0);
1877*da668aa1SThomas Huth         s->has_read = true;
1878*da668aa1SThomas Huth 
1879*da668aa1SThomas Huth         /* Wake up drain_co if it runs */
1880*da668aa1SThomas Huth         if (s->drain_co) {
1881*da668aa1SThomas Huth             aio_co_wake(s->drain_co);
1882*da668aa1SThomas Huth         }
1883*da668aa1SThomas Huth 
1884*da668aa1SThomas Huth         return ret;
1885*da668aa1SThomas Huth     }
1886*da668aa1SThomas Huth 
1887*da668aa1SThomas Huth     s->has_read = true;
1888*da668aa1SThomas Huth     return 0;
1889*da668aa1SThomas Huth }
1890*da668aa1SThomas Huth 
1891*da668aa1SThomas Huth /**
1892*da668aa1SThomas Huth  * If .drain_count is 0, wake up .io_co if there is one; and set
1893*da668aa1SThomas Huth  * .was_drained.
1894*da668aa1SThomas Huth  * Increment .drain_count.
1895*da668aa1SThomas Huth  */
1896*da668aa1SThomas Huth static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs)
1897*da668aa1SThomas Huth {
1898*da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1899*da668aa1SThomas Huth 
1900*da668aa1SThomas Huth     if (!s->drain_count) {
1901*da668aa1SThomas Huth         /* Keep waking io_co up until it is done */
1902*da668aa1SThomas Huth         s->drain_co = qemu_coroutine_self();
1903*da668aa1SThomas Huth         while (s->io_co) {
1904*da668aa1SThomas Huth             aio_co_wake(s->io_co);
1905*da668aa1SThomas Huth             s->io_co = NULL;
1906*da668aa1SThomas Huth             qemu_coroutine_yield();
1907*da668aa1SThomas Huth         }
1908*da668aa1SThomas Huth         s->drain_co = NULL;
1909*da668aa1SThomas Huth 
1910*da668aa1SThomas Huth         s->was_drained = true;
1911*da668aa1SThomas Huth     }
1912*da668aa1SThomas Huth     s->drain_count++;
1913*da668aa1SThomas Huth }
1914*da668aa1SThomas Huth 
1915*da668aa1SThomas Huth /**
1916*da668aa1SThomas Huth  * Reduce .drain_count, set .was_undrained once it reaches 0.
1917*da668aa1SThomas Huth  * If .drain_count reaches 0 and the node has a backing file, issue a
1918*da668aa1SThomas Huth  * read request.
1919*da668aa1SThomas Huth  */
1920*da668aa1SThomas Huth static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs)
1921*da668aa1SThomas Huth {
1922*da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1923*da668aa1SThomas Huth 
1924*da668aa1SThomas Huth     g_assert(s->drain_count > 0);
1925*da668aa1SThomas Huth     if (!--s->drain_count) {
1926*da668aa1SThomas Huth         int ret;
1927*da668aa1SThomas Huth 
1928*da668aa1SThomas Huth         s->was_undrained = true;
1929*da668aa1SThomas Huth 
1930*da668aa1SThomas Huth         if (bs->backing) {
1931*da668aa1SThomas Huth             char data;
1932*da668aa1SThomas Huth             QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
1933*da668aa1SThomas Huth 
1934*da668aa1SThomas Huth             /* Queue a read request post-drain */
1935*da668aa1SThomas Huth             ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
1936*da668aa1SThomas Huth             g_assert(ret >= 0);
1937*da668aa1SThomas Huth         }
1938*da668aa1SThomas Huth     }
1939*da668aa1SThomas Huth }
1940*da668aa1SThomas Huth 
1941*da668aa1SThomas Huth static BlockDriver bdrv_replace_test = {
1942*da668aa1SThomas Huth     .format_name            = "replace_test",
1943*da668aa1SThomas Huth     .instance_size          = sizeof(BDRVReplaceTestState),
1944*da668aa1SThomas Huth 
1945*da668aa1SThomas Huth     .bdrv_close             = bdrv_replace_test_close,
1946*da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_replace_test_co_preadv,
1947*da668aa1SThomas Huth 
1948*da668aa1SThomas Huth     .bdrv_co_drain_begin    = bdrv_replace_test_co_drain_begin,
1949*da668aa1SThomas Huth     .bdrv_co_drain_end      = bdrv_replace_test_co_drain_end,
1950*da668aa1SThomas Huth 
1951*da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
1952*da668aa1SThomas Huth };
1953*da668aa1SThomas Huth 
1954*da668aa1SThomas Huth static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque)
1955*da668aa1SThomas Huth {
1956*da668aa1SThomas Huth     int ret;
1957*da668aa1SThomas Huth     char data;
1958*da668aa1SThomas Huth 
1959*da668aa1SThomas Huth     ret = blk_co_pread(opaque, 0, 1, &data, 0);
1960*da668aa1SThomas Huth     g_assert(ret >= 0);
1961*da668aa1SThomas Huth }
1962*da668aa1SThomas Huth 
1963*da668aa1SThomas Huth /**
1964*da668aa1SThomas Huth  * We test two things:
1965*da668aa1SThomas Huth  * (1) bdrv_replace_child_noperm() must not undrain the parent if both
1966*da668aa1SThomas Huth  *     children are drained.
1967*da668aa1SThomas Huth  * (2) bdrv_replace_child_noperm() must never flush I/O requests to a
1968*da668aa1SThomas Huth  *     drained child.  If the old child is drained, it must flush I/O
1969*da668aa1SThomas Huth  *     requests after the new one has been attached.  If the new child
1970*da668aa1SThomas Huth  *     is drained, it must flush I/O requests before the old one is
1971*da668aa1SThomas Huth  *     detached.
1972*da668aa1SThomas Huth  *
1973*da668aa1SThomas Huth  * To do so, we create one parent node and two child nodes; then
1974*da668aa1SThomas Huth  * attach one of the children (old_child_bs) to the parent, then
1975*da668aa1SThomas Huth  * drain both old_child_bs and new_child_bs according to
1976*da668aa1SThomas Huth  * old_drain_count and new_drain_count, respectively, and finally
1977*da668aa1SThomas Huth  * we invoke bdrv_replace_node() to replace old_child_bs by
1978*da668aa1SThomas Huth  * new_child_bs.
1979*da668aa1SThomas Huth  *
1980*da668aa1SThomas Huth  * The test block driver we use here (bdrv_replace_test) has a read
1981*da668aa1SThomas Huth  * function that:
1982*da668aa1SThomas Huth  * - For the parent node, can optionally yield, and then forwards the
1983*da668aa1SThomas Huth  *   read to bdrv_preadv(),
1984*da668aa1SThomas Huth  * - For the child node, just returns immediately.
1985*da668aa1SThomas Huth  *
1986*da668aa1SThomas Huth  * If the read yields, the drain_begin function will wake it up.
1987*da668aa1SThomas Huth  *
1988*da668aa1SThomas Huth  * The drain_end function issues a read on the parent once it is fully
1989*da668aa1SThomas Huth  * undrained (which simulates requests starting to come in again).
1990*da668aa1SThomas Huth  */
1991*da668aa1SThomas Huth static void do_test_replace_child_mid_drain(int old_drain_count,
1992*da668aa1SThomas Huth                                             int new_drain_count)
1993*da668aa1SThomas Huth {
1994*da668aa1SThomas Huth     BlockBackend *parent_blk;
1995*da668aa1SThomas Huth     BlockDriverState *parent_bs;
1996*da668aa1SThomas Huth     BlockDriverState *old_child_bs, *new_child_bs;
1997*da668aa1SThomas Huth     BDRVReplaceTestState *parent_s;
1998*da668aa1SThomas Huth     BDRVReplaceTestState *old_child_s, *new_child_s;
1999*da668aa1SThomas Huth     Coroutine *io_co;
2000*da668aa1SThomas Huth     int i;
2001*da668aa1SThomas Huth 
2002*da668aa1SThomas Huth     parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0,
2003*da668aa1SThomas Huth                                      &error_abort);
2004*da668aa1SThomas Huth     parent_s = parent_bs->opaque;
2005*da668aa1SThomas Huth 
2006*da668aa1SThomas Huth     parent_blk = blk_new(qemu_get_aio_context(),
2007*da668aa1SThomas Huth                          BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
2008*da668aa1SThomas Huth     blk_insert_bs(parent_blk, parent_bs, &error_abort);
2009*da668aa1SThomas Huth 
2010*da668aa1SThomas Huth     old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0,
2011*da668aa1SThomas Huth                                         &error_abort);
2012*da668aa1SThomas Huth     new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0,
2013*da668aa1SThomas Huth                                         &error_abort);
2014*da668aa1SThomas Huth     old_child_s = old_child_bs->opaque;
2015*da668aa1SThomas Huth     new_child_s = new_child_bs->opaque;
2016*da668aa1SThomas Huth 
2017*da668aa1SThomas Huth     /* So that we can read something */
2018*da668aa1SThomas Huth     parent_bs->total_sectors = 1;
2019*da668aa1SThomas Huth     old_child_bs->total_sectors = 1;
2020*da668aa1SThomas Huth     new_child_bs->total_sectors = 1;
2021*da668aa1SThomas Huth 
2022*da668aa1SThomas Huth     bdrv_ref(old_child_bs);
2023*da668aa1SThomas Huth     parent_bs->backing = bdrv_attach_child(parent_bs, old_child_bs, "child",
2024*da668aa1SThomas Huth                                            &child_of_bds, BDRV_CHILD_COW,
2025*da668aa1SThomas Huth                                            &error_abort);
2026*da668aa1SThomas Huth 
2027*da668aa1SThomas Huth     for (i = 0; i < old_drain_count; i++) {
2028*da668aa1SThomas Huth         bdrv_drained_begin(old_child_bs);
2029*da668aa1SThomas Huth     }
2030*da668aa1SThomas Huth     for (i = 0; i < new_drain_count; i++) {
2031*da668aa1SThomas Huth         bdrv_drained_begin(new_child_bs);
2032*da668aa1SThomas Huth     }
2033*da668aa1SThomas Huth 
2034*da668aa1SThomas Huth     if (!old_drain_count) {
2035*da668aa1SThomas Huth         /*
2036*da668aa1SThomas Huth          * Start a read operation that will yield, so it will not
2037*da668aa1SThomas Huth          * complete before the node is drained.
2038*da668aa1SThomas Huth          */
2039*da668aa1SThomas Huth         parent_s->yield_before_read = true;
2040*da668aa1SThomas Huth         io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co,
2041*da668aa1SThomas Huth                                       parent_blk);
2042*da668aa1SThomas Huth         qemu_coroutine_enter(io_co);
2043*da668aa1SThomas Huth     }
2044*da668aa1SThomas Huth 
2045*da668aa1SThomas Huth     /* If we have started a read operation, it should have yielded */
2046*da668aa1SThomas Huth     g_assert(!parent_s->has_read);
2047*da668aa1SThomas Huth 
2048*da668aa1SThomas Huth     /* Reset drained status so we can see what bdrv_replace_node() does */
2049*da668aa1SThomas Huth     parent_s->was_drained = false;
2050*da668aa1SThomas Huth     parent_s->was_undrained = false;
2051*da668aa1SThomas Huth 
2052*da668aa1SThomas Huth     g_assert(parent_bs->quiesce_counter == old_drain_count);
2053*da668aa1SThomas Huth     bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
2054*da668aa1SThomas Huth     g_assert(parent_bs->quiesce_counter == new_drain_count);
2055*da668aa1SThomas Huth 
2056*da668aa1SThomas Huth     if (!old_drain_count && !new_drain_count) {
2057*da668aa1SThomas Huth         /*
2058*da668aa1SThomas Huth          * From undrained to undrained drains and undrains the parent,
2059*da668aa1SThomas Huth          * because bdrv_replace_node() contains a drained section for
2060*da668aa1SThomas Huth          * @old_child_bs.
2061*da668aa1SThomas Huth          */
2062*da668aa1SThomas Huth         g_assert(parent_s->was_drained && parent_s->was_undrained);
2063*da668aa1SThomas Huth     } else if (!old_drain_count && new_drain_count) {
2064*da668aa1SThomas Huth         /*
2065*da668aa1SThomas Huth          * From undrained to drained should drain the parent and keep
2066*da668aa1SThomas Huth          * it that way.
2067*da668aa1SThomas Huth          */
2068*da668aa1SThomas Huth         g_assert(parent_s->was_drained && !parent_s->was_undrained);
2069*da668aa1SThomas Huth     } else if (old_drain_count && !new_drain_count) {
2070*da668aa1SThomas Huth         /*
2071*da668aa1SThomas Huth          * From drained to undrained should undrain the parent and
2072*da668aa1SThomas Huth          * keep it that way.
2073*da668aa1SThomas Huth          */
2074*da668aa1SThomas Huth         g_assert(!parent_s->was_drained && parent_s->was_undrained);
2075*da668aa1SThomas Huth     } else /* if (old_drain_count && new_drain_count) */ {
2076*da668aa1SThomas Huth         /*
2077*da668aa1SThomas Huth          * From drained to drained must not undrain the parent at any
2078*da668aa1SThomas Huth          * point
2079*da668aa1SThomas Huth          */
2080*da668aa1SThomas Huth         g_assert(!parent_s->was_drained && !parent_s->was_undrained);
2081*da668aa1SThomas Huth     }
2082*da668aa1SThomas Huth 
2083*da668aa1SThomas Huth     if (!old_drain_count || !new_drain_count) {
2084*da668aa1SThomas Huth         /*
2085*da668aa1SThomas Huth          * If !old_drain_count, we have started a read request before
2086*da668aa1SThomas Huth          * bdrv_replace_node().  If !new_drain_count, the parent must
2087*da668aa1SThomas Huth          * have been undrained at some point, and
2088*da668aa1SThomas Huth          * bdrv_replace_test_co_drain_end() starts a read request
2089*da668aa1SThomas Huth          * then.
2090*da668aa1SThomas Huth          */
2091*da668aa1SThomas Huth         g_assert(parent_s->has_read);
2092*da668aa1SThomas Huth     } else {
2093*da668aa1SThomas Huth         /*
2094*da668aa1SThomas Huth          * If the parent was never undrained, there is no way to start
2095*da668aa1SThomas Huth          * a read request.
2096*da668aa1SThomas Huth          */
2097*da668aa1SThomas Huth         g_assert(!parent_s->has_read);
2098*da668aa1SThomas Huth     }
2099*da668aa1SThomas Huth 
2100*da668aa1SThomas Huth     /* A drained child must have not received any request */
2101*da668aa1SThomas Huth     g_assert(!(old_drain_count && old_child_s->has_read));
2102*da668aa1SThomas Huth     g_assert(!(new_drain_count && new_child_s->has_read));
2103*da668aa1SThomas Huth 
2104*da668aa1SThomas Huth     for (i = 0; i < new_drain_count; i++) {
2105*da668aa1SThomas Huth         bdrv_drained_end(new_child_bs);
2106*da668aa1SThomas Huth     }
2107*da668aa1SThomas Huth     for (i = 0; i < old_drain_count; i++) {
2108*da668aa1SThomas Huth         bdrv_drained_end(old_child_bs);
2109*da668aa1SThomas Huth     }
2110*da668aa1SThomas Huth 
2111*da668aa1SThomas Huth     /*
2112*da668aa1SThomas Huth      * By now, bdrv_replace_test_co_drain_end() must have been called
2113*da668aa1SThomas Huth      * at some point while the new child was attached to the parent.
2114*da668aa1SThomas Huth      */
2115*da668aa1SThomas Huth     g_assert(parent_s->has_read);
2116*da668aa1SThomas Huth     g_assert(new_child_s->has_read);
2117*da668aa1SThomas Huth 
2118*da668aa1SThomas Huth     blk_unref(parent_blk);
2119*da668aa1SThomas Huth     bdrv_unref(parent_bs);
2120*da668aa1SThomas Huth     bdrv_unref(old_child_bs);
2121*da668aa1SThomas Huth     bdrv_unref(new_child_bs);
2122*da668aa1SThomas Huth }
2123*da668aa1SThomas Huth 
2124*da668aa1SThomas Huth static void test_replace_child_mid_drain(void)
2125*da668aa1SThomas Huth {
2126*da668aa1SThomas Huth     int old_drain_count, new_drain_count;
2127*da668aa1SThomas Huth 
2128*da668aa1SThomas Huth     for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) {
2129*da668aa1SThomas Huth         for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) {
2130*da668aa1SThomas Huth             do_test_replace_child_mid_drain(old_drain_count, new_drain_count);
2131*da668aa1SThomas Huth         }
2132*da668aa1SThomas Huth     }
2133*da668aa1SThomas Huth }
2134*da668aa1SThomas Huth 
2135*da668aa1SThomas Huth int main(int argc, char **argv)
2136*da668aa1SThomas Huth {
2137*da668aa1SThomas Huth     int ret;
2138*da668aa1SThomas Huth 
2139*da668aa1SThomas Huth     bdrv_init();
2140*da668aa1SThomas Huth     qemu_init_main_loop(&error_abort);
2141*da668aa1SThomas Huth 
2142*da668aa1SThomas Huth     g_test_init(&argc, &argv, NULL);
2143*da668aa1SThomas Huth     qemu_event_init(&done_event, false);
2144*da668aa1SThomas Huth 
2145*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
2146*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
2147*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/drain_subtree",
2148*da668aa1SThomas Huth                     test_drv_cb_drain_subtree);
2149*da668aa1SThomas Huth 
2150*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/co/drain_all",
2151*da668aa1SThomas Huth                     test_drv_cb_co_drain_all);
2152*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
2153*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree",
2154*da668aa1SThomas Huth                     test_drv_cb_co_drain_subtree);
2155*da668aa1SThomas Huth 
2156*da668aa1SThomas Huth 
2157*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
2158*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
2159*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/drain_subtree",
2160*da668aa1SThomas Huth                     test_quiesce_drain_subtree);
2161*da668aa1SThomas Huth 
2162*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/co/drain_all",
2163*da668aa1SThomas Huth                     test_quiesce_co_drain_all);
2164*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
2165*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree",
2166*da668aa1SThomas Huth                     test_quiesce_co_drain_subtree);
2167*da668aa1SThomas Huth 
2168*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/nested", test_nested);
2169*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/multiparent", test_multiparent);
2170*da668aa1SThomas Huth 
2171*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/graph-change/drain_subtree",
2172*da668aa1SThomas Huth                     test_graph_change_drain_subtree);
2173*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/graph-change/drain_all",
2174*da668aa1SThomas Huth                     test_graph_change_drain_all);
2175*da668aa1SThomas Huth 
2176*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all);
2177*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain);
2178*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/iothread/drain_subtree",
2179*da668aa1SThomas Huth                     test_iothread_drain_subtree);
2180*da668aa1SThomas Huth 
2181*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
2182*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
2183*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
2184*da668aa1SThomas Huth                     test_blockjob_drain_subtree);
2185*da668aa1SThomas Huth 
2186*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
2187*da668aa1SThomas Huth                     test_blockjob_error_drain_all);
2188*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/error/drain",
2189*da668aa1SThomas Huth                     test_blockjob_error_drain);
2190*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree",
2191*da668aa1SThomas Huth                     test_blockjob_error_drain_subtree);
2192*da668aa1SThomas Huth 
2193*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
2194*da668aa1SThomas Huth                     test_blockjob_iothread_drain_all);
2195*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
2196*da668aa1SThomas Huth                     test_blockjob_iothread_drain);
2197*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree",
2198*da668aa1SThomas Huth                     test_blockjob_iothread_drain_subtree);
2199*da668aa1SThomas Huth 
2200*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
2201*da668aa1SThomas Huth                     test_blockjob_iothread_error_drain_all);
2202*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
2203*da668aa1SThomas Huth                     test_blockjob_iothread_error_drain);
2204*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree",
2205*da668aa1SThomas Huth                     test_blockjob_iothread_error_drain_subtree);
2206*da668aa1SThomas Huth 
2207*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
2208*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
2209*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
2210*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree);
2211*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb);
2212*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb);
2213*da668aa1SThomas Huth 
2214*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained);
2215*da668aa1SThomas Huth 
2216*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context);
2217*da668aa1SThomas Huth 
2218*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end",
2219*da668aa1SThomas Huth                     test_blockjob_commit_by_drained_end);
2220*da668aa1SThomas Huth 
2221*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll",
2222*da668aa1SThomas Huth                     test_drop_intermediate_poll);
2223*da668aa1SThomas Huth 
2224*da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/replace_child/mid-drain",
2225*da668aa1SThomas Huth                     test_replace_child_mid_drain);
2226*da668aa1SThomas Huth 
2227*da668aa1SThomas Huth     ret = g_test_run();
2228*da668aa1SThomas Huth     qemu_event_destroy(&done_event);
2229*da668aa1SThomas Huth     return ret;
2230*da668aa1SThomas Huth }
2231