1da668aa1SThomas Huth /* 2da668aa1SThomas Huth * Block node draining tests 3da668aa1SThomas Huth * 4da668aa1SThomas Huth * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com> 5da668aa1SThomas Huth * 6da668aa1SThomas Huth * Permission is hereby granted, free of charge, to any person obtaining a copy 7da668aa1SThomas Huth * of this software and associated documentation files (the "Software"), to deal 8da668aa1SThomas Huth * in the Software without restriction, including without limitation the rights 9da668aa1SThomas Huth * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10da668aa1SThomas Huth * copies of the Software, and to permit persons to whom the Software is 11da668aa1SThomas Huth * furnished to do so, subject to the following conditions: 12da668aa1SThomas Huth * 13da668aa1SThomas Huth * The above copyright notice and this permission notice shall be included in 14da668aa1SThomas Huth * all copies or substantial portions of the Software. 15da668aa1SThomas Huth * 16da668aa1SThomas Huth * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17da668aa1SThomas Huth * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18da668aa1SThomas Huth * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19da668aa1SThomas Huth * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20da668aa1SThomas Huth * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21da668aa1SThomas Huth * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22da668aa1SThomas Huth * THE SOFTWARE. 23da668aa1SThomas Huth */ 24da668aa1SThomas Huth 25da668aa1SThomas Huth #include "qemu/osdep.h" 26da668aa1SThomas Huth #include "block/block.h" 27da668aa1SThomas Huth #include "block/blockjob_int.h" 28da668aa1SThomas Huth #include "sysemu/block-backend.h" 29da668aa1SThomas Huth #include "qapi/error.h" 30da668aa1SThomas Huth #include "qemu/main-loop.h" 31da668aa1SThomas Huth #include "iothread.h" 32da668aa1SThomas Huth 33da668aa1SThomas Huth static QemuEvent done_event; 34da668aa1SThomas Huth 35da668aa1SThomas Huth typedef struct BDRVTestState { 36da668aa1SThomas Huth int drain_count; 37da668aa1SThomas Huth AioContext *bh_indirection_ctx; 38da668aa1SThomas Huth bool sleep_in_drain_begin; 39da668aa1SThomas Huth } BDRVTestState; 40da668aa1SThomas Huth 417bce1c29SKevin Wolf static void coroutine_fn sleep_in_drain_begin(void *opaque) 427bce1c29SKevin Wolf { 437bce1c29SKevin Wolf BlockDriverState *bs = opaque; 447bce1c29SKevin Wolf 457bce1c29SKevin Wolf qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); 467bce1c29SKevin Wolf bdrv_dec_in_flight(bs); 477bce1c29SKevin Wolf } 487bce1c29SKevin Wolf 49*5e8ac217SKevin Wolf static void bdrv_test_drain_begin(BlockDriverState *bs) 50da668aa1SThomas Huth { 51da668aa1SThomas Huth BDRVTestState *s = bs->opaque; 52da668aa1SThomas Huth s->drain_count++; 53da668aa1SThomas Huth if (s->sleep_in_drain_begin) { 547bce1c29SKevin Wolf Coroutine *co = qemu_coroutine_create(sleep_in_drain_begin, bs); 557bce1c29SKevin Wolf bdrv_inc_in_flight(bs); 567bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), co); 57da668aa1SThomas Huth } 58da668aa1SThomas Huth } 59da668aa1SThomas Huth 60*5e8ac217SKevin Wolf static void bdrv_test_drain_end(BlockDriverState *bs) 61da668aa1SThomas Huth { 62da668aa1SThomas Huth BDRVTestState *s = bs->opaque; 63da668aa1SThomas Huth s->drain_count--; 64da668aa1SThomas Huth } 65da668aa1SThomas Huth 66da668aa1SThomas Huth static void bdrv_test_close(BlockDriverState *bs) 67da668aa1SThomas Huth { 68da668aa1SThomas Huth BDRVTestState *s = bs->opaque; 69da668aa1SThomas Huth g_assert_cmpint(s->drain_count, >, 0); 70da668aa1SThomas Huth } 71da668aa1SThomas Huth 72da668aa1SThomas Huth static void co_reenter_bh(void *opaque) 73da668aa1SThomas Huth { 74da668aa1SThomas Huth aio_co_wake(opaque); 75da668aa1SThomas Huth } 76da668aa1SThomas Huth 77da668aa1SThomas Huth static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, 78f7ef38ddSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, 79f7ef38ddSVladimir Sementsov-Ogievskiy QEMUIOVector *qiov, 80f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags) 81da668aa1SThomas Huth { 82da668aa1SThomas Huth BDRVTestState *s = bs->opaque; 83da668aa1SThomas Huth 84da668aa1SThomas Huth /* We want this request to stay until the polling loop in drain waits for 85da668aa1SThomas Huth * it to complete. We need to sleep a while as bdrv_drain_invoke() comes 86da668aa1SThomas Huth * first and polls its result, too, but it shouldn't accidentally complete 87da668aa1SThomas Huth * this request yet. */ 88da668aa1SThomas Huth qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); 89da668aa1SThomas Huth 90da668aa1SThomas Huth if (s->bh_indirection_ctx) { 91da668aa1SThomas Huth aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh, 92da668aa1SThomas Huth qemu_coroutine_self()); 93da668aa1SThomas Huth qemu_coroutine_yield(); 94da668aa1SThomas Huth } 95da668aa1SThomas Huth 96da668aa1SThomas Huth return 0; 97da668aa1SThomas Huth } 98da668aa1SThomas Huth 99da668aa1SThomas Huth static int bdrv_test_change_backing_file(BlockDriverState *bs, 100da668aa1SThomas Huth const char *backing_file, 101da668aa1SThomas Huth const char *backing_fmt) 102da668aa1SThomas Huth { 103da668aa1SThomas Huth return 0; 104da668aa1SThomas Huth } 105da668aa1SThomas Huth 106da668aa1SThomas Huth static BlockDriver bdrv_test = { 107da668aa1SThomas Huth .format_name = "test", 108da668aa1SThomas Huth .instance_size = sizeof(BDRVTestState), 10925f78d9eSVladimir Sementsov-Ogievskiy .supports_backing = true, 110da668aa1SThomas Huth 111da668aa1SThomas Huth .bdrv_close = bdrv_test_close, 112da668aa1SThomas Huth .bdrv_co_preadv = bdrv_test_co_preadv, 113da668aa1SThomas Huth 114*5e8ac217SKevin Wolf .bdrv_drain_begin = bdrv_test_drain_begin, 115*5e8ac217SKevin Wolf .bdrv_drain_end = bdrv_test_drain_end, 116da668aa1SThomas Huth 117da668aa1SThomas Huth .bdrv_child_perm = bdrv_default_perms, 118da668aa1SThomas Huth 119da668aa1SThomas Huth .bdrv_change_backing_file = bdrv_test_change_backing_file, 120da668aa1SThomas Huth }; 121da668aa1SThomas Huth 122da668aa1SThomas Huth static void aio_ret_cb(void *opaque, int ret) 123da668aa1SThomas Huth { 124da668aa1SThomas Huth int *aio_ret = opaque; 125da668aa1SThomas Huth *aio_ret = ret; 126da668aa1SThomas Huth } 127da668aa1SThomas Huth 128da668aa1SThomas Huth typedef struct CallInCoroutineData { 129da668aa1SThomas Huth void (*entry)(void); 130da668aa1SThomas Huth bool done; 131da668aa1SThomas Huth } CallInCoroutineData; 132da668aa1SThomas Huth 133da668aa1SThomas Huth static coroutine_fn void call_in_coroutine_entry(void *opaque) 134da668aa1SThomas Huth { 135da668aa1SThomas Huth CallInCoroutineData *data = opaque; 136da668aa1SThomas Huth 137da668aa1SThomas Huth data->entry(); 138da668aa1SThomas Huth data->done = true; 139da668aa1SThomas Huth } 140da668aa1SThomas Huth 141da668aa1SThomas Huth static void call_in_coroutine(void (*entry)(void)) 142da668aa1SThomas Huth { 143da668aa1SThomas Huth Coroutine *co; 144da668aa1SThomas Huth CallInCoroutineData data = { 145da668aa1SThomas Huth .entry = entry, 146da668aa1SThomas Huth .done = false, 147da668aa1SThomas Huth }; 148da668aa1SThomas Huth 149da668aa1SThomas Huth co = qemu_coroutine_create(call_in_coroutine_entry, &data); 150da668aa1SThomas Huth qemu_coroutine_enter(co); 151da668aa1SThomas Huth while (!data.done) { 152da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), true); 153da668aa1SThomas Huth } 154da668aa1SThomas Huth } 155da668aa1SThomas Huth 156da668aa1SThomas Huth enum drain_type { 157da668aa1SThomas Huth BDRV_DRAIN_ALL, 158da668aa1SThomas Huth BDRV_DRAIN, 159da668aa1SThomas Huth BDRV_SUBTREE_DRAIN, 160da668aa1SThomas Huth DRAIN_TYPE_MAX, 161da668aa1SThomas Huth }; 162da668aa1SThomas Huth 163da668aa1SThomas Huth static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs) 164da668aa1SThomas Huth { 165da668aa1SThomas Huth switch (drain_type) { 166da668aa1SThomas Huth case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break; 167da668aa1SThomas Huth case BDRV_DRAIN: bdrv_drained_begin(bs); break; 168da668aa1SThomas Huth case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_begin(bs); break; 169da668aa1SThomas Huth default: g_assert_not_reached(); 170da668aa1SThomas Huth } 171da668aa1SThomas Huth } 172da668aa1SThomas Huth 173da668aa1SThomas Huth static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs) 174da668aa1SThomas Huth { 175da668aa1SThomas Huth switch (drain_type) { 176da668aa1SThomas Huth case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break; 177da668aa1SThomas Huth case BDRV_DRAIN: bdrv_drained_end(bs); break; 178da668aa1SThomas Huth case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_end(bs); break; 179da668aa1SThomas Huth default: g_assert_not_reached(); 180da668aa1SThomas Huth } 181da668aa1SThomas Huth } 182da668aa1SThomas Huth 183da668aa1SThomas Huth static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs) 184da668aa1SThomas Huth { 185da668aa1SThomas Huth if (drain_type != BDRV_DRAIN_ALL) { 186da668aa1SThomas Huth aio_context_acquire(bdrv_get_aio_context(bs)); 187da668aa1SThomas Huth } 188da668aa1SThomas Huth do_drain_begin(drain_type, bs); 189da668aa1SThomas Huth if (drain_type != BDRV_DRAIN_ALL) { 190da668aa1SThomas Huth aio_context_release(bdrv_get_aio_context(bs)); 191da668aa1SThomas Huth } 192da668aa1SThomas Huth } 193da668aa1SThomas Huth 194da668aa1SThomas Huth static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs) 195da668aa1SThomas Huth { 196da668aa1SThomas Huth if (drain_type != BDRV_DRAIN_ALL) { 197da668aa1SThomas Huth aio_context_acquire(bdrv_get_aio_context(bs)); 198da668aa1SThomas Huth } 199da668aa1SThomas Huth do_drain_end(drain_type, bs); 200da668aa1SThomas Huth if (drain_type != BDRV_DRAIN_ALL) { 201da668aa1SThomas Huth aio_context_release(bdrv_get_aio_context(bs)); 202da668aa1SThomas Huth } 203da668aa1SThomas Huth } 204da668aa1SThomas Huth 205da668aa1SThomas Huth static void test_drv_cb_common(enum drain_type drain_type, bool recursive) 206da668aa1SThomas Huth { 207da668aa1SThomas Huth BlockBackend *blk; 208da668aa1SThomas Huth BlockDriverState *bs, *backing; 209da668aa1SThomas Huth BDRVTestState *s, *backing_s; 210da668aa1SThomas Huth BlockAIOCB *acb; 211da668aa1SThomas Huth int aio_ret; 212da668aa1SThomas Huth 213da668aa1SThomas Huth QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); 214da668aa1SThomas Huth 215da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 216da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 217da668aa1SThomas Huth &error_abort); 218da668aa1SThomas Huth s = bs->opaque; 219da668aa1SThomas Huth blk_insert_bs(blk, bs, &error_abort); 220da668aa1SThomas Huth 221da668aa1SThomas Huth backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); 222da668aa1SThomas Huth backing_s = backing->opaque; 223da668aa1SThomas Huth bdrv_set_backing_hd(bs, backing, &error_abort); 224da668aa1SThomas Huth 225da668aa1SThomas Huth /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */ 226da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0); 227da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 228da668aa1SThomas Huth 229da668aa1SThomas Huth do_drain_begin(drain_type, bs); 230da668aa1SThomas Huth 231da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 1); 232da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, !!recursive); 233da668aa1SThomas Huth 234da668aa1SThomas Huth do_drain_end(drain_type, bs); 235da668aa1SThomas Huth 236da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0); 237da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 238da668aa1SThomas Huth 239da668aa1SThomas Huth /* Now do the same while a request is pending */ 240da668aa1SThomas Huth aio_ret = -EINPROGRESS; 241da668aa1SThomas Huth acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret); 242da668aa1SThomas Huth g_assert(acb != NULL); 243da668aa1SThomas Huth g_assert_cmpint(aio_ret, ==, -EINPROGRESS); 244da668aa1SThomas Huth 245da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0); 246da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 247da668aa1SThomas Huth 248da668aa1SThomas Huth do_drain_begin(drain_type, bs); 249da668aa1SThomas Huth 250da668aa1SThomas Huth g_assert_cmpint(aio_ret, ==, 0); 251da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 1); 252da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, !!recursive); 253da668aa1SThomas Huth 254da668aa1SThomas Huth do_drain_end(drain_type, bs); 255da668aa1SThomas Huth 256da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0); 257da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 258da668aa1SThomas Huth 259da668aa1SThomas Huth bdrv_unref(backing); 260da668aa1SThomas Huth bdrv_unref(bs); 261da668aa1SThomas Huth blk_unref(blk); 262da668aa1SThomas Huth } 263da668aa1SThomas Huth 264da668aa1SThomas Huth static void test_drv_cb_drain_all(void) 265da668aa1SThomas Huth { 266da668aa1SThomas Huth test_drv_cb_common(BDRV_DRAIN_ALL, true); 267da668aa1SThomas Huth } 268da668aa1SThomas Huth 269da668aa1SThomas Huth static void test_drv_cb_drain(void) 270da668aa1SThomas Huth { 271da668aa1SThomas Huth test_drv_cb_common(BDRV_DRAIN, false); 272da668aa1SThomas Huth } 273da668aa1SThomas Huth 274da668aa1SThomas Huth static void test_drv_cb_drain_subtree(void) 275da668aa1SThomas Huth { 276da668aa1SThomas Huth test_drv_cb_common(BDRV_SUBTREE_DRAIN, true); 277da668aa1SThomas Huth } 278da668aa1SThomas Huth 279da668aa1SThomas Huth static void test_drv_cb_co_drain_all(void) 280da668aa1SThomas Huth { 281da668aa1SThomas Huth call_in_coroutine(test_drv_cb_drain_all); 282da668aa1SThomas Huth } 283da668aa1SThomas Huth 284da668aa1SThomas Huth static void test_drv_cb_co_drain(void) 285da668aa1SThomas Huth { 286da668aa1SThomas Huth call_in_coroutine(test_drv_cb_drain); 287da668aa1SThomas Huth } 288da668aa1SThomas Huth 289da668aa1SThomas Huth static void test_drv_cb_co_drain_subtree(void) 290da668aa1SThomas Huth { 291da668aa1SThomas Huth call_in_coroutine(test_drv_cb_drain_subtree); 292da668aa1SThomas Huth } 293da668aa1SThomas Huth 294da668aa1SThomas Huth static void test_quiesce_common(enum drain_type drain_type, bool recursive) 295da668aa1SThomas Huth { 296da668aa1SThomas Huth BlockBackend *blk; 297da668aa1SThomas Huth BlockDriverState *bs, *backing; 298da668aa1SThomas Huth 299da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 300da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 301da668aa1SThomas Huth &error_abort); 302da668aa1SThomas Huth blk_insert_bs(blk, bs, &error_abort); 303da668aa1SThomas Huth 304da668aa1SThomas Huth backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); 305da668aa1SThomas Huth bdrv_set_backing_hd(bs, backing, &error_abort); 306da668aa1SThomas Huth 307da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 0); 308da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0); 309da668aa1SThomas Huth 310da668aa1SThomas Huth do_drain_begin(drain_type, bs); 311da668aa1SThomas Huth 312da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 1); 313da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, !!recursive); 314da668aa1SThomas Huth 315da668aa1SThomas Huth do_drain_end(drain_type, bs); 316da668aa1SThomas Huth 317da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 0); 318da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0); 319da668aa1SThomas Huth 320da668aa1SThomas Huth bdrv_unref(backing); 321da668aa1SThomas Huth bdrv_unref(bs); 322da668aa1SThomas Huth blk_unref(blk); 323da668aa1SThomas Huth } 324da668aa1SThomas Huth 325da668aa1SThomas Huth static void test_quiesce_drain_all(void) 326da668aa1SThomas Huth { 327da668aa1SThomas Huth test_quiesce_common(BDRV_DRAIN_ALL, true); 328da668aa1SThomas Huth } 329da668aa1SThomas Huth 330da668aa1SThomas Huth static void test_quiesce_drain(void) 331da668aa1SThomas Huth { 332da668aa1SThomas Huth test_quiesce_common(BDRV_DRAIN, false); 333da668aa1SThomas Huth } 334da668aa1SThomas Huth 335da668aa1SThomas Huth static void test_quiesce_drain_subtree(void) 336da668aa1SThomas Huth { 337da668aa1SThomas Huth test_quiesce_common(BDRV_SUBTREE_DRAIN, true); 338da668aa1SThomas Huth } 339da668aa1SThomas Huth 340da668aa1SThomas Huth static void test_quiesce_co_drain_all(void) 341da668aa1SThomas Huth { 342da668aa1SThomas Huth call_in_coroutine(test_quiesce_drain_all); 343da668aa1SThomas Huth } 344da668aa1SThomas Huth 345da668aa1SThomas Huth static void test_quiesce_co_drain(void) 346da668aa1SThomas Huth { 347da668aa1SThomas Huth call_in_coroutine(test_quiesce_drain); 348da668aa1SThomas Huth } 349da668aa1SThomas Huth 350da668aa1SThomas Huth static void test_quiesce_co_drain_subtree(void) 351da668aa1SThomas Huth { 352da668aa1SThomas Huth call_in_coroutine(test_quiesce_drain_subtree); 353da668aa1SThomas Huth } 354da668aa1SThomas Huth 355da668aa1SThomas Huth static void test_nested(void) 356da668aa1SThomas Huth { 357da668aa1SThomas Huth BlockBackend *blk; 358da668aa1SThomas Huth BlockDriverState *bs, *backing; 359da668aa1SThomas Huth BDRVTestState *s, *backing_s; 360da668aa1SThomas Huth enum drain_type outer, inner; 361da668aa1SThomas Huth 362da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 363da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 364da668aa1SThomas Huth &error_abort); 365da668aa1SThomas Huth s = bs->opaque; 366da668aa1SThomas Huth blk_insert_bs(blk, bs, &error_abort); 367da668aa1SThomas Huth 368da668aa1SThomas Huth backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); 369da668aa1SThomas Huth backing_s = backing->opaque; 370da668aa1SThomas Huth bdrv_set_backing_hd(bs, backing, &error_abort); 371da668aa1SThomas Huth 372da668aa1SThomas Huth for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) { 373da668aa1SThomas Huth for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) { 374da668aa1SThomas Huth int backing_quiesce = (outer != BDRV_DRAIN) + 375da668aa1SThomas Huth (inner != BDRV_DRAIN); 376da668aa1SThomas Huth 377da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 0); 378da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0); 379da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0); 380da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 381da668aa1SThomas Huth 382da668aa1SThomas Huth do_drain_begin(outer, bs); 383da668aa1SThomas Huth do_drain_begin(inner, bs); 384da668aa1SThomas Huth 385da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 2); 386da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce); 387da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 2); 388da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce); 389da668aa1SThomas Huth 390da668aa1SThomas Huth do_drain_end(inner, bs); 391da668aa1SThomas Huth do_drain_end(outer, bs); 392da668aa1SThomas Huth 393da668aa1SThomas Huth g_assert_cmpint(bs->quiesce_counter, ==, 0); 394da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0); 395da668aa1SThomas Huth g_assert_cmpint(s->drain_count, ==, 0); 396da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 397da668aa1SThomas Huth } 398da668aa1SThomas Huth } 399da668aa1SThomas Huth 400da668aa1SThomas Huth bdrv_unref(backing); 401da668aa1SThomas Huth bdrv_unref(bs); 402da668aa1SThomas Huth blk_unref(blk); 403da668aa1SThomas Huth } 404da668aa1SThomas Huth 405da668aa1SThomas Huth static void test_multiparent(void) 406da668aa1SThomas Huth { 407da668aa1SThomas Huth BlockBackend *blk_a, *blk_b; 408da668aa1SThomas Huth BlockDriverState *bs_a, *bs_b, *backing; 409da668aa1SThomas Huth BDRVTestState *a_s, *b_s, *backing_s; 410da668aa1SThomas Huth 411da668aa1SThomas Huth blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 412da668aa1SThomas Huth bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, 413da668aa1SThomas Huth &error_abort); 414da668aa1SThomas Huth a_s = bs_a->opaque; 415da668aa1SThomas Huth blk_insert_bs(blk_a, bs_a, &error_abort); 416da668aa1SThomas Huth 417da668aa1SThomas Huth blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 418da668aa1SThomas Huth bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, 419da668aa1SThomas Huth &error_abort); 420da668aa1SThomas Huth b_s = bs_b->opaque; 421da668aa1SThomas Huth blk_insert_bs(blk_b, bs_b, &error_abort); 422da668aa1SThomas Huth 423da668aa1SThomas Huth backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); 424da668aa1SThomas Huth backing_s = backing->opaque; 425da668aa1SThomas Huth bdrv_set_backing_hd(bs_a, backing, &error_abort); 426da668aa1SThomas Huth bdrv_set_backing_hd(bs_b, backing, &error_abort); 427da668aa1SThomas Huth 428da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 0); 429da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 0); 430da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0); 431da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 0); 432da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 0); 433da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 434da668aa1SThomas Huth 435da668aa1SThomas Huth do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); 436da668aa1SThomas Huth 437da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 1); 438da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 1); 439da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 1); 440da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 1); 441da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 1); 442da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 1); 443da668aa1SThomas Huth 444da668aa1SThomas Huth do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); 445da668aa1SThomas Huth 446da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 2); 447da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 2); 448da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 2); 449da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 2); 450da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 2); 451da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 2); 452da668aa1SThomas Huth 453da668aa1SThomas Huth do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); 454da668aa1SThomas Huth 455da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 1); 456da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 1); 457da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 1); 458da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 1); 459da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 1); 460da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 1); 461da668aa1SThomas Huth 462da668aa1SThomas Huth do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); 463da668aa1SThomas Huth 464da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 0); 465da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 0); 466da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0); 467da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 0); 468da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 0); 469da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 470da668aa1SThomas Huth 471da668aa1SThomas Huth bdrv_unref(backing); 472da668aa1SThomas Huth bdrv_unref(bs_a); 473da668aa1SThomas Huth bdrv_unref(bs_b); 474da668aa1SThomas Huth blk_unref(blk_a); 475da668aa1SThomas Huth blk_unref(blk_b); 476da668aa1SThomas Huth } 477da668aa1SThomas Huth 478da668aa1SThomas Huth static void test_graph_change_drain_subtree(void) 479da668aa1SThomas Huth { 480da668aa1SThomas Huth BlockBackend *blk_a, *blk_b; 481da668aa1SThomas Huth BlockDriverState *bs_a, *bs_b, *backing; 482da668aa1SThomas Huth BDRVTestState *a_s, *b_s, *backing_s; 483da668aa1SThomas Huth 484da668aa1SThomas Huth blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 485da668aa1SThomas Huth bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, 486da668aa1SThomas Huth &error_abort); 487da668aa1SThomas Huth a_s = bs_a->opaque; 488da668aa1SThomas Huth blk_insert_bs(blk_a, bs_a, &error_abort); 489da668aa1SThomas Huth 490da668aa1SThomas Huth blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 491da668aa1SThomas Huth bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, 492da668aa1SThomas Huth &error_abort); 493da668aa1SThomas Huth b_s = bs_b->opaque; 494da668aa1SThomas Huth blk_insert_bs(blk_b, bs_b, &error_abort); 495da668aa1SThomas Huth 496da668aa1SThomas Huth backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); 497da668aa1SThomas Huth backing_s = backing->opaque; 498da668aa1SThomas Huth bdrv_set_backing_hd(bs_a, backing, &error_abort); 499da668aa1SThomas Huth 500da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 0); 501da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 0); 502da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0); 503da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 0); 504da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 0); 505da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 506da668aa1SThomas Huth 507da668aa1SThomas Huth do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); 508da668aa1SThomas Huth do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); 509da668aa1SThomas Huth do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); 510da668aa1SThomas Huth do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); 511da668aa1SThomas Huth do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); 512da668aa1SThomas Huth 513da668aa1SThomas Huth bdrv_set_backing_hd(bs_b, backing, &error_abort); 514da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 5); 515da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 5); 516da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 5); 517da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 5); 518da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 5); 519da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 5); 520da668aa1SThomas Huth 521da668aa1SThomas Huth bdrv_set_backing_hd(bs_b, NULL, &error_abort); 522da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 3); 523da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 2); 524da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 3); 525da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 3); 526da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 2); 527da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 3); 528da668aa1SThomas Huth 529da668aa1SThomas Huth bdrv_set_backing_hd(bs_b, backing, &error_abort); 530da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 5); 531da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 5); 532da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 5); 533da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 5); 534da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 5); 535da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 5); 536da668aa1SThomas Huth 537da668aa1SThomas Huth do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); 538da668aa1SThomas Huth do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); 539da668aa1SThomas Huth do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); 540da668aa1SThomas Huth do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); 541da668aa1SThomas Huth do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); 542da668aa1SThomas Huth 543da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 0); 544da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 0); 545da668aa1SThomas Huth g_assert_cmpint(backing->quiesce_counter, ==, 0); 546da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 0); 547da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 0); 548da668aa1SThomas Huth g_assert_cmpint(backing_s->drain_count, ==, 0); 549da668aa1SThomas Huth 550da668aa1SThomas Huth bdrv_unref(backing); 551da668aa1SThomas Huth bdrv_unref(bs_a); 552da668aa1SThomas Huth bdrv_unref(bs_b); 553da668aa1SThomas Huth blk_unref(blk_a); 554da668aa1SThomas Huth blk_unref(blk_b); 555da668aa1SThomas Huth } 556da668aa1SThomas Huth 557da668aa1SThomas Huth static void test_graph_change_drain_all(void) 558da668aa1SThomas Huth { 559da668aa1SThomas Huth BlockBackend *blk_a, *blk_b; 560da668aa1SThomas Huth BlockDriverState *bs_a, *bs_b; 561da668aa1SThomas Huth BDRVTestState *a_s, *b_s; 562da668aa1SThomas Huth 563da668aa1SThomas Huth /* Create node A with a BlockBackend */ 564da668aa1SThomas Huth blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 565da668aa1SThomas Huth bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, 566da668aa1SThomas Huth &error_abort); 567da668aa1SThomas Huth a_s = bs_a->opaque; 568da668aa1SThomas Huth blk_insert_bs(blk_a, bs_a, &error_abort); 569da668aa1SThomas Huth 570da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 0); 571da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 0); 572da668aa1SThomas Huth 573da668aa1SThomas Huth /* Call bdrv_drain_all_begin() */ 574da668aa1SThomas Huth bdrv_drain_all_begin(); 575da668aa1SThomas Huth 576da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 1); 577da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 1); 578da668aa1SThomas Huth 579da668aa1SThomas Huth /* Create node B with a BlockBackend */ 580da668aa1SThomas Huth blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 581da668aa1SThomas Huth bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, 582da668aa1SThomas Huth &error_abort); 583da668aa1SThomas Huth b_s = bs_b->opaque; 584da668aa1SThomas Huth blk_insert_bs(blk_b, bs_b, &error_abort); 585da668aa1SThomas Huth 586da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 1); 587da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 1); 588da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 1); 589da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 1); 590da668aa1SThomas Huth 591da668aa1SThomas Huth /* Unref and finally delete node A */ 592da668aa1SThomas Huth blk_unref(blk_a); 593da668aa1SThomas Huth 594da668aa1SThomas Huth g_assert_cmpint(bs_a->quiesce_counter, ==, 1); 595da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 1); 596da668aa1SThomas Huth g_assert_cmpint(a_s->drain_count, ==, 1); 597da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 1); 598da668aa1SThomas Huth 599da668aa1SThomas Huth bdrv_unref(bs_a); 600da668aa1SThomas Huth 601da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 1); 602da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 1); 603da668aa1SThomas Huth 604da668aa1SThomas Huth /* End the drained section */ 605da668aa1SThomas Huth bdrv_drain_all_end(); 606da668aa1SThomas Huth 607da668aa1SThomas Huth g_assert_cmpint(bs_b->quiesce_counter, ==, 0); 608da668aa1SThomas Huth g_assert_cmpint(b_s->drain_count, ==, 0); 609da668aa1SThomas Huth g_assert_cmpint(qemu_get_aio_context()->external_disable_cnt, ==, 0); 610da668aa1SThomas Huth 611da668aa1SThomas Huth bdrv_unref(bs_b); 612da668aa1SThomas Huth blk_unref(blk_b); 613da668aa1SThomas Huth } 614da668aa1SThomas Huth 615da668aa1SThomas Huth struct test_iothread_data { 616da668aa1SThomas Huth BlockDriverState *bs; 617da668aa1SThomas Huth enum drain_type drain_type; 618da668aa1SThomas Huth int *aio_ret; 619da668aa1SThomas Huth }; 620da668aa1SThomas Huth 621da668aa1SThomas Huth static void test_iothread_drain_entry(void *opaque) 622da668aa1SThomas Huth { 623da668aa1SThomas Huth struct test_iothread_data *data = opaque; 624da668aa1SThomas Huth 625da668aa1SThomas Huth aio_context_acquire(bdrv_get_aio_context(data->bs)); 626da668aa1SThomas Huth do_drain_begin(data->drain_type, data->bs); 627da668aa1SThomas Huth g_assert_cmpint(*data->aio_ret, ==, 0); 628da668aa1SThomas Huth do_drain_end(data->drain_type, data->bs); 629da668aa1SThomas Huth aio_context_release(bdrv_get_aio_context(data->bs)); 630da668aa1SThomas Huth 631da668aa1SThomas Huth qemu_event_set(&done_event); 632da668aa1SThomas Huth } 633da668aa1SThomas Huth 634da668aa1SThomas Huth static void test_iothread_aio_cb(void *opaque, int ret) 635da668aa1SThomas Huth { 636da668aa1SThomas Huth int *aio_ret = opaque; 637da668aa1SThomas Huth *aio_ret = ret; 638da668aa1SThomas Huth qemu_event_set(&done_event); 639da668aa1SThomas Huth } 640da668aa1SThomas Huth 641da668aa1SThomas Huth static void test_iothread_main_thread_bh(void *opaque) 642da668aa1SThomas Huth { 643da668aa1SThomas Huth struct test_iothread_data *data = opaque; 644da668aa1SThomas Huth 645da668aa1SThomas Huth /* Test that the AioContext is not yet locked in a random BH that is 646da668aa1SThomas Huth * executed during drain, otherwise this would deadlock. */ 647da668aa1SThomas Huth aio_context_acquire(bdrv_get_aio_context(data->bs)); 648da668aa1SThomas Huth bdrv_flush(data->bs); 649da668aa1SThomas Huth aio_context_release(bdrv_get_aio_context(data->bs)); 650da668aa1SThomas Huth } 651da668aa1SThomas Huth 652da668aa1SThomas Huth /* 653da668aa1SThomas Huth * Starts an AIO request on a BDS that runs in the AioContext of iothread 1. 654da668aa1SThomas Huth * The request involves a BH on iothread 2 before it can complete. 655da668aa1SThomas Huth * 656da668aa1SThomas Huth * @drain_thread = 0 means that do_drain_begin/end are called from the main 657da668aa1SThomas Huth * thread, @drain_thread = 1 means that they are called from iothread 1. Drain 658da668aa1SThomas Huth * for this BDS cannot be called from iothread 2 because only the main thread 659da668aa1SThomas Huth * may do cross-AioContext polling. 660da668aa1SThomas Huth */ 661da668aa1SThomas Huth static void test_iothread_common(enum drain_type drain_type, int drain_thread) 662da668aa1SThomas Huth { 663da668aa1SThomas Huth BlockBackend *blk; 664da668aa1SThomas Huth BlockDriverState *bs; 665da668aa1SThomas Huth BDRVTestState *s; 666da668aa1SThomas Huth BlockAIOCB *acb; 667da668aa1SThomas Huth int aio_ret; 668da668aa1SThomas Huth struct test_iothread_data data; 669da668aa1SThomas Huth 670da668aa1SThomas Huth IOThread *a = iothread_new(); 671da668aa1SThomas Huth IOThread *b = iothread_new(); 672da668aa1SThomas Huth AioContext *ctx_a = iothread_get_aio_context(a); 673da668aa1SThomas Huth AioContext *ctx_b = iothread_get_aio_context(b); 674da668aa1SThomas Huth 675da668aa1SThomas Huth QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); 676da668aa1SThomas Huth 677da668aa1SThomas Huth /* bdrv_drain_all() may only be called from the main loop thread */ 678da668aa1SThomas Huth if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) { 679da668aa1SThomas Huth goto out; 680da668aa1SThomas Huth } 681da668aa1SThomas Huth 682da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 683da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 684da668aa1SThomas Huth &error_abort); 685da668aa1SThomas Huth s = bs->opaque; 686da668aa1SThomas Huth blk_insert_bs(blk, bs, &error_abort); 687da668aa1SThomas Huth blk_set_disable_request_queuing(blk, true); 688da668aa1SThomas Huth 689da668aa1SThomas Huth blk_set_aio_context(blk, ctx_a, &error_abort); 690da668aa1SThomas Huth aio_context_acquire(ctx_a); 691da668aa1SThomas Huth 692da668aa1SThomas Huth s->bh_indirection_ctx = ctx_b; 693da668aa1SThomas Huth 694da668aa1SThomas Huth aio_ret = -EINPROGRESS; 695da668aa1SThomas Huth qemu_event_reset(&done_event); 696da668aa1SThomas Huth 697da668aa1SThomas Huth if (drain_thread == 0) { 698da668aa1SThomas Huth acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret); 699da668aa1SThomas Huth } else { 700da668aa1SThomas Huth acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret); 701da668aa1SThomas Huth } 702da668aa1SThomas Huth g_assert(acb != NULL); 703da668aa1SThomas Huth g_assert_cmpint(aio_ret, ==, -EINPROGRESS); 704da668aa1SThomas Huth 705da668aa1SThomas Huth aio_context_release(ctx_a); 706da668aa1SThomas Huth 707da668aa1SThomas Huth data = (struct test_iothread_data) { 708da668aa1SThomas Huth .bs = bs, 709da668aa1SThomas Huth .drain_type = drain_type, 710da668aa1SThomas Huth .aio_ret = &aio_ret, 711da668aa1SThomas Huth }; 712da668aa1SThomas Huth 713da668aa1SThomas Huth switch (drain_thread) { 714da668aa1SThomas Huth case 0: 715da668aa1SThomas Huth if (drain_type != BDRV_DRAIN_ALL) { 716da668aa1SThomas Huth aio_context_acquire(ctx_a); 717da668aa1SThomas Huth } 718da668aa1SThomas Huth 719da668aa1SThomas Huth aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data); 720da668aa1SThomas Huth 721da668aa1SThomas Huth /* The request is running on the IOThread a. Draining its block device 722da668aa1SThomas Huth * will make sure that it has completed as far as the BDS is concerned, 723da668aa1SThomas Huth * but the drain in this thread can continue immediately after 724da668aa1SThomas Huth * bdrv_dec_in_flight() and aio_ret might be assigned only slightly 725da668aa1SThomas Huth * later. */ 726da668aa1SThomas Huth do_drain_begin(drain_type, bs); 727da668aa1SThomas Huth g_assert_cmpint(bs->in_flight, ==, 0); 728da668aa1SThomas Huth 729da668aa1SThomas Huth if (drain_type != BDRV_DRAIN_ALL) { 730da668aa1SThomas Huth aio_context_release(ctx_a); 731da668aa1SThomas Huth } 732da668aa1SThomas Huth qemu_event_wait(&done_event); 733da668aa1SThomas Huth if (drain_type != BDRV_DRAIN_ALL) { 734da668aa1SThomas Huth aio_context_acquire(ctx_a); 735da668aa1SThomas Huth } 736da668aa1SThomas Huth 737da668aa1SThomas Huth g_assert_cmpint(aio_ret, ==, 0); 738da668aa1SThomas Huth do_drain_end(drain_type, bs); 739da668aa1SThomas Huth 740da668aa1SThomas Huth if (drain_type != BDRV_DRAIN_ALL) { 741da668aa1SThomas Huth aio_context_release(ctx_a); 742da668aa1SThomas Huth } 743da668aa1SThomas Huth break; 744da668aa1SThomas Huth case 1: 745da668aa1SThomas Huth aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data); 746da668aa1SThomas Huth qemu_event_wait(&done_event); 747da668aa1SThomas Huth break; 748da668aa1SThomas Huth default: 749da668aa1SThomas Huth g_assert_not_reached(); 750da668aa1SThomas Huth } 751da668aa1SThomas Huth 752da668aa1SThomas Huth aio_context_acquire(ctx_a); 753da668aa1SThomas Huth blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); 754da668aa1SThomas Huth aio_context_release(ctx_a); 755da668aa1SThomas Huth 756da668aa1SThomas Huth bdrv_unref(bs); 757da668aa1SThomas Huth blk_unref(blk); 758da668aa1SThomas Huth 759da668aa1SThomas Huth out: 760da668aa1SThomas Huth iothread_join(a); 761da668aa1SThomas Huth iothread_join(b); 762da668aa1SThomas Huth } 763da668aa1SThomas Huth 764da668aa1SThomas Huth static void test_iothread_drain_all(void) 765da668aa1SThomas Huth { 766da668aa1SThomas Huth test_iothread_common(BDRV_DRAIN_ALL, 0); 767da668aa1SThomas Huth test_iothread_common(BDRV_DRAIN_ALL, 1); 768da668aa1SThomas Huth } 769da668aa1SThomas Huth 770da668aa1SThomas Huth static void test_iothread_drain(void) 771da668aa1SThomas Huth { 772da668aa1SThomas Huth test_iothread_common(BDRV_DRAIN, 0); 773da668aa1SThomas Huth test_iothread_common(BDRV_DRAIN, 1); 774da668aa1SThomas Huth } 775da668aa1SThomas Huth 776da668aa1SThomas Huth static void test_iothread_drain_subtree(void) 777da668aa1SThomas Huth { 778da668aa1SThomas Huth test_iothread_common(BDRV_SUBTREE_DRAIN, 0); 779da668aa1SThomas Huth test_iothread_common(BDRV_SUBTREE_DRAIN, 1); 780da668aa1SThomas Huth } 781da668aa1SThomas Huth 782da668aa1SThomas Huth 783da668aa1SThomas Huth typedef struct TestBlockJob { 784da668aa1SThomas Huth BlockJob common; 7851b177bbeSVladimir Sementsov-Ogievskiy BlockDriverState *bs; 786da668aa1SThomas Huth int run_ret; 787da668aa1SThomas Huth int prepare_ret; 788da668aa1SThomas Huth bool running; 789da668aa1SThomas Huth bool should_complete; 790da668aa1SThomas Huth } TestBlockJob; 791da668aa1SThomas Huth 792da668aa1SThomas Huth static int test_job_prepare(Job *job) 793da668aa1SThomas Huth { 794da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job); 795da668aa1SThomas Huth 796da668aa1SThomas Huth /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ 7971b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs); 798da668aa1SThomas Huth return s->prepare_ret; 799da668aa1SThomas Huth } 800da668aa1SThomas Huth 801da668aa1SThomas Huth static void test_job_commit(Job *job) 802da668aa1SThomas Huth { 803da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job); 804da668aa1SThomas Huth 805da668aa1SThomas Huth /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ 8061b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs); 807da668aa1SThomas Huth } 808da668aa1SThomas Huth 809da668aa1SThomas Huth static void test_job_abort(Job *job) 810da668aa1SThomas Huth { 811da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job); 812da668aa1SThomas Huth 813da668aa1SThomas Huth /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ 8141b177bbeSVladimir Sementsov-Ogievskiy bdrv_flush(s->bs); 815da668aa1SThomas Huth } 816da668aa1SThomas Huth 817da668aa1SThomas Huth static int coroutine_fn test_job_run(Job *job, Error **errp) 818da668aa1SThomas Huth { 819da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job); 820da668aa1SThomas Huth 821da668aa1SThomas Huth /* We are running the actual job code past the pause point in 822da668aa1SThomas Huth * job_co_entry(). */ 823da668aa1SThomas Huth s->running = true; 824da668aa1SThomas Huth 825da668aa1SThomas Huth job_transition_to_ready(&s->common.job); 826da668aa1SThomas Huth while (!s->should_complete) { 827da668aa1SThomas Huth /* Avoid job_sleep_ns() because it marks the job as !busy. We want to 828da668aa1SThomas Huth * emulate some actual activity (probably some I/O) here so that drain 829da668aa1SThomas Huth * has to wait for this activity to stop. */ 830da668aa1SThomas Huth qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000); 831da668aa1SThomas Huth 832da668aa1SThomas Huth job_pause_point(&s->common.job); 833da668aa1SThomas Huth } 834da668aa1SThomas Huth 835da668aa1SThomas Huth return s->run_ret; 836da668aa1SThomas Huth } 837da668aa1SThomas Huth 838da668aa1SThomas Huth static void test_job_complete(Job *job, Error **errp) 839da668aa1SThomas Huth { 840da668aa1SThomas Huth TestBlockJob *s = container_of(job, TestBlockJob, common.job); 841da668aa1SThomas Huth s->should_complete = true; 842da668aa1SThomas Huth } 843da668aa1SThomas Huth 844da668aa1SThomas Huth BlockJobDriver test_job_driver = { 845da668aa1SThomas Huth .job_driver = { 846da668aa1SThomas Huth .instance_size = sizeof(TestBlockJob), 847da668aa1SThomas Huth .free = block_job_free, 848da668aa1SThomas Huth .user_resume = block_job_user_resume, 849da668aa1SThomas Huth .run = test_job_run, 850da668aa1SThomas Huth .complete = test_job_complete, 851da668aa1SThomas Huth .prepare = test_job_prepare, 852da668aa1SThomas Huth .commit = test_job_commit, 853da668aa1SThomas Huth .abort = test_job_abort, 854da668aa1SThomas Huth }, 855da668aa1SThomas Huth }; 856da668aa1SThomas Huth 857da668aa1SThomas Huth enum test_job_result { 858da668aa1SThomas Huth TEST_JOB_SUCCESS, 859da668aa1SThomas Huth TEST_JOB_FAIL_RUN, 860da668aa1SThomas Huth TEST_JOB_FAIL_PREPARE, 861da668aa1SThomas Huth }; 862da668aa1SThomas Huth 863da668aa1SThomas Huth enum test_job_drain_node { 864da668aa1SThomas Huth TEST_JOB_DRAIN_SRC, 865da668aa1SThomas Huth TEST_JOB_DRAIN_SRC_CHILD, 866da668aa1SThomas Huth TEST_JOB_DRAIN_SRC_PARENT, 867da668aa1SThomas Huth }; 868da668aa1SThomas Huth 869da668aa1SThomas Huth static void test_blockjob_common_drain_node(enum drain_type drain_type, 870da668aa1SThomas Huth bool use_iothread, 871da668aa1SThomas Huth enum test_job_result result, 872da668aa1SThomas Huth enum test_job_drain_node drain_node) 873da668aa1SThomas Huth { 874da668aa1SThomas Huth BlockBackend *blk_src, *blk_target; 875da668aa1SThomas Huth BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs; 876da668aa1SThomas Huth BlockJob *job; 877da668aa1SThomas Huth TestBlockJob *tjob; 878da668aa1SThomas Huth IOThread *iothread = NULL; 879da668aa1SThomas Huth AioContext *ctx; 880da668aa1SThomas Huth int ret; 881da668aa1SThomas Huth 882da668aa1SThomas Huth src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, 883da668aa1SThomas Huth &error_abort); 884da668aa1SThomas Huth src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing", 885da668aa1SThomas Huth BDRV_O_RDWR, &error_abort); 886da668aa1SThomas Huth src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay", 887da668aa1SThomas Huth BDRV_O_RDWR, &error_abort); 888da668aa1SThomas Huth 889da668aa1SThomas Huth bdrv_set_backing_hd(src_overlay, src, &error_abort); 890da668aa1SThomas Huth bdrv_unref(src); 891da668aa1SThomas Huth bdrv_set_backing_hd(src, src_backing, &error_abort); 892da668aa1SThomas Huth bdrv_unref(src_backing); 893da668aa1SThomas Huth 894da668aa1SThomas Huth blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 895da668aa1SThomas Huth blk_insert_bs(blk_src, src_overlay, &error_abort); 896da668aa1SThomas Huth 897da668aa1SThomas Huth switch (drain_node) { 898da668aa1SThomas Huth case TEST_JOB_DRAIN_SRC: 899da668aa1SThomas Huth drain_bs = src; 900da668aa1SThomas Huth break; 901da668aa1SThomas Huth case TEST_JOB_DRAIN_SRC_CHILD: 902da668aa1SThomas Huth drain_bs = src_backing; 903da668aa1SThomas Huth break; 904da668aa1SThomas Huth case TEST_JOB_DRAIN_SRC_PARENT: 905da668aa1SThomas Huth drain_bs = src_overlay; 906da668aa1SThomas Huth break; 907da668aa1SThomas Huth default: 908da668aa1SThomas Huth g_assert_not_reached(); 909da668aa1SThomas Huth } 910da668aa1SThomas Huth 911da668aa1SThomas Huth if (use_iothread) { 912da668aa1SThomas Huth iothread = iothread_new(); 913da668aa1SThomas Huth ctx = iothread_get_aio_context(iothread); 914da668aa1SThomas Huth blk_set_aio_context(blk_src, ctx, &error_abort); 915da668aa1SThomas Huth } else { 916da668aa1SThomas Huth ctx = qemu_get_aio_context(); 917da668aa1SThomas Huth } 918da668aa1SThomas Huth 919da668aa1SThomas Huth target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR, 920da668aa1SThomas Huth &error_abort); 921da668aa1SThomas Huth blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 922da668aa1SThomas Huth blk_insert_bs(blk_target, target, &error_abort); 923da668aa1SThomas Huth blk_set_allow_aio_context_change(blk_target, true); 924da668aa1SThomas Huth 925da668aa1SThomas Huth aio_context_acquire(ctx); 926da668aa1SThomas Huth tjob = block_job_create("job0", &test_job_driver, NULL, src, 927da668aa1SThomas Huth 0, BLK_PERM_ALL, 928da668aa1SThomas Huth 0, 0, NULL, NULL, &error_abort); 9291b177bbeSVladimir Sementsov-Ogievskiy tjob->bs = src; 930da668aa1SThomas Huth job = &tjob->common; 931da668aa1SThomas Huth block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); 932da668aa1SThomas Huth 933da668aa1SThomas Huth switch (result) { 934da668aa1SThomas Huth case TEST_JOB_SUCCESS: 935da668aa1SThomas Huth break; 936da668aa1SThomas Huth case TEST_JOB_FAIL_RUN: 937da668aa1SThomas Huth tjob->run_ret = -EIO; 938da668aa1SThomas Huth break; 939da668aa1SThomas Huth case TEST_JOB_FAIL_PREPARE: 940da668aa1SThomas Huth tjob->prepare_ret = -EIO; 941da668aa1SThomas Huth break; 942da668aa1SThomas Huth } 9436f592e5aSEmanuele Giuseppe Esposito aio_context_release(ctx); 944da668aa1SThomas Huth 945da668aa1SThomas Huth job_start(&job->job); 946da668aa1SThomas Huth 947da668aa1SThomas Huth if (use_iothread) { 948da668aa1SThomas Huth /* job_co_entry() is run in the I/O thread, wait for the actual job 949da668aa1SThomas Huth * code to start (we don't want to catch the job in the pause point in 950da668aa1SThomas Huth * job_co_entry(). */ 951da668aa1SThomas Huth while (!tjob->running) { 952da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), false); 953da668aa1SThomas Huth } 954da668aa1SThomas Huth } 955da668aa1SThomas Huth 956191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 957da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 0); 958da668aa1SThomas Huth g_assert_false(job->job.paused); 959da668aa1SThomas Huth g_assert_true(tjob->running); 960da668aa1SThomas Huth g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ 961191e7af3SEmanuele Giuseppe Esposito } 962da668aa1SThomas Huth 963da668aa1SThomas Huth do_drain_begin_unlocked(drain_type, drain_bs); 964da668aa1SThomas Huth 965191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 966da668aa1SThomas Huth if (drain_type == BDRV_DRAIN_ALL) { 967da668aa1SThomas Huth /* bdrv_drain_all() drains both src and target */ 968da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 2); 969da668aa1SThomas Huth } else { 970da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 1); 971da668aa1SThomas Huth } 972da668aa1SThomas Huth g_assert_true(job->job.paused); 973da668aa1SThomas Huth g_assert_false(job->job.busy); /* The job is paused */ 974191e7af3SEmanuele Giuseppe Esposito } 975da668aa1SThomas Huth 976da668aa1SThomas Huth do_drain_end_unlocked(drain_type, drain_bs); 977da668aa1SThomas Huth 978da668aa1SThomas Huth if (use_iothread) { 979191e7af3SEmanuele Giuseppe Esposito /* 980191e7af3SEmanuele Giuseppe Esposito * Here we are waiting for the paused status to change, 981191e7af3SEmanuele Giuseppe Esposito * so don't bother protecting the read every time. 982191e7af3SEmanuele Giuseppe Esposito * 983191e7af3SEmanuele Giuseppe Esposito * paused is reset in the I/O thread, wait for it 984191e7af3SEmanuele Giuseppe Esposito */ 985da668aa1SThomas Huth while (job->job.paused) { 986da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), false); 987da668aa1SThomas Huth } 988da668aa1SThomas Huth } 989da668aa1SThomas Huth 990191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 991da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 0); 992da668aa1SThomas Huth g_assert_false(job->job.paused); 993da668aa1SThomas Huth g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ 994191e7af3SEmanuele Giuseppe Esposito } 995da668aa1SThomas Huth 996da668aa1SThomas Huth do_drain_begin_unlocked(drain_type, target); 997da668aa1SThomas Huth 998191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 999da668aa1SThomas Huth if (drain_type == BDRV_DRAIN_ALL) { 1000da668aa1SThomas Huth /* bdrv_drain_all() drains both src and target */ 1001da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 2); 1002da668aa1SThomas Huth } else { 1003da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 1); 1004da668aa1SThomas Huth } 1005da668aa1SThomas Huth g_assert_true(job->job.paused); 1006da668aa1SThomas Huth g_assert_false(job->job.busy); /* The job is paused */ 1007191e7af3SEmanuele Giuseppe Esposito } 1008da668aa1SThomas Huth 1009da668aa1SThomas Huth do_drain_end_unlocked(drain_type, target); 1010da668aa1SThomas Huth 1011da668aa1SThomas Huth if (use_iothread) { 1012191e7af3SEmanuele Giuseppe Esposito /* 1013191e7af3SEmanuele Giuseppe Esposito * Here we are waiting for the paused status to change, 1014191e7af3SEmanuele Giuseppe Esposito * so don't bother protecting the read every time. 1015191e7af3SEmanuele Giuseppe Esposito * 1016191e7af3SEmanuele Giuseppe Esposito * paused is reset in the I/O thread, wait for it 1017191e7af3SEmanuele Giuseppe Esposito */ 1018da668aa1SThomas Huth while (job->job.paused) { 1019da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), false); 1020da668aa1SThomas Huth } 1021da668aa1SThomas Huth } 1022da668aa1SThomas Huth 1023191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 1024da668aa1SThomas Huth g_assert_cmpint(job->job.pause_count, ==, 0); 1025da668aa1SThomas Huth g_assert_false(job->job.paused); 1026da668aa1SThomas Huth g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ 1027191e7af3SEmanuele Giuseppe Esposito } 1028da668aa1SThomas Huth 1029191e7af3SEmanuele Giuseppe Esposito WITH_JOB_LOCK_GUARD() { 1030191e7af3SEmanuele Giuseppe Esposito ret = job_complete_sync_locked(&job->job, &error_abort); 1031191e7af3SEmanuele Giuseppe Esposito } 1032da668aa1SThomas Huth g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); 1033da668aa1SThomas Huth 10346f592e5aSEmanuele Giuseppe Esposito aio_context_acquire(ctx); 1035da668aa1SThomas Huth if (use_iothread) { 1036da668aa1SThomas Huth blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); 1037da668aa1SThomas Huth assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); 1038da668aa1SThomas Huth } 1039da668aa1SThomas Huth aio_context_release(ctx); 1040da668aa1SThomas Huth 1041da668aa1SThomas Huth blk_unref(blk_src); 1042da668aa1SThomas Huth blk_unref(blk_target); 1043da668aa1SThomas Huth bdrv_unref(src_overlay); 1044da668aa1SThomas Huth bdrv_unref(target); 1045da668aa1SThomas Huth 1046da668aa1SThomas Huth if (iothread) { 1047da668aa1SThomas Huth iothread_join(iothread); 1048da668aa1SThomas Huth } 1049da668aa1SThomas Huth } 1050da668aa1SThomas Huth 1051da668aa1SThomas Huth static void test_blockjob_common(enum drain_type drain_type, bool use_iothread, 1052da668aa1SThomas Huth enum test_job_result result) 1053da668aa1SThomas Huth { 1054da668aa1SThomas Huth test_blockjob_common_drain_node(drain_type, use_iothread, result, 1055da668aa1SThomas Huth TEST_JOB_DRAIN_SRC); 1056da668aa1SThomas Huth test_blockjob_common_drain_node(drain_type, use_iothread, result, 1057da668aa1SThomas Huth TEST_JOB_DRAIN_SRC_CHILD); 1058da668aa1SThomas Huth if (drain_type == BDRV_SUBTREE_DRAIN) { 1059da668aa1SThomas Huth test_blockjob_common_drain_node(drain_type, use_iothread, result, 1060da668aa1SThomas Huth TEST_JOB_DRAIN_SRC_PARENT); 1061da668aa1SThomas Huth } 1062da668aa1SThomas Huth } 1063da668aa1SThomas Huth 1064da668aa1SThomas Huth static void test_blockjob_drain_all(void) 1065da668aa1SThomas Huth { 1066da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS); 1067da668aa1SThomas Huth } 1068da668aa1SThomas Huth 1069da668aa1SThomas Huth static void test_blockjob_drain(void) 1070da668aa1SThomas Huth { 1071da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS); 1072da668aa1SThomas Huth } 1073da668aa1SThomas Huth 1074da668aa1SThomas Huth static void test_blockjob_drain_subtree(void) 1075da668aa1SThomas Huth { 1076da668aa1SThomas Huth test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS); 1077da668aa1SThomas Huth } 1078da668aa1SThomas Huth 1079da668aa1SThomas Huth static void test_blockjob_error_drain_all(void) 1080da668aa1SThomas Huth { 1081da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN); 1082da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE); 1083da668aa1SThomas Huth } 1084da668aa1SThomas Huth 1085da668aa1SThomas Huth static void test_blockjob_error_drain(void) 1086da668aa1SThomas Huth { 1087da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN); 1088da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE); 1089da668aa1SThomas Huth } 1090da668aa1SThomas Huth 1091da668aa1SThomas Huth static void test_blockjob_error_drain_subtree(void) 1092da668aa1SThomas Huth { 1093da668aa1SThomas Huth test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN); 1094da668aa1SThomas Huth test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE); 1095da668aa1SThomas Huth } 1096da668aa1SThomas Huth 1097da668aa1SThomas Huth static void test_blockjob_iothread_drain_all(void) 1098da668aa1SThomas Huth { 1099da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS); 1100da668aa1SThomas Huth } 1101da668aa1SThomas Huth 1102da668aa1SThomas Huth static void test_blockjob_iothread_drain(void) 1103da668aa1SThomas Huth { 1104da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS); 1105da668aa1SThomas Huth } 1106da668aa1SThomas Huth 1107da668aa1SThomas Huth static void test_blockjob_iothread_drain_subtree(void) 1108da668aa1SThomas Huth { 1109da668aa1SThomas Huth test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS); 1110da668aa1SThomas Huth } 1111da668aa1SThomas Huth 1112da668aa1SThomas Huth static void test_blockjob_iothread_error_drain_all(void) 1113da668aa1SThomas Huth { 1114da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN); 1115da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE); 1116da668aa1SThomas Huth } 1117da668aa1SThomas Huth 1118da668aa1SThomas Huth static void test_blockjob_iothread_error_drain(void) 1119da668aa1SThomas Huth { 1120da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN); 1121da668aa1SThomas Huth test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE); 1122da668aa1SThomas Huth } 1123da668aa1SThomas Huth 1124da668aa1SThomas Huth static void test_blockjob_iothread_error_drain_subtree(void) 1125da668aa1SThomas Huth { 1126da668aa1SThomas Huth test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN); 1127da668aa1SThomas Huth test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE); 1128da668aa1SThomas Huth } 1129da668aa1SThomas Huth 1130da668aa1SThomas Huth 1131da668aa1SThomas Huth typedef struct BDRVTestTopState { 1132da668aa1SThomas Huth BdrvChild *wait_child; 1133da668aa1SThomas Huth } BDRVTestTopState; 1134da668aa1SThomas Huth 1135da668aa1SThomas Huth static void bdrv_test_top_close(BlockDriverState *bs) 1136da668aa1SThomas Huth { 1137da668aa1SThomas Huth BdrvChild *c, *next_c; 1138da668aa1SThomas Huth QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { 1139da668aa1SThomas Huth bdrv_unref_child(bs, c); 1140da668aa1SThomas Huth } 1141da668aa1SThomas Huth } 1142da668aa1SThomas Huth 1143da668aa1SThomas Huth static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs, 1144f7ef38ddSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, 1145f7ef38ddSVladimir Sementsov-Ogievskiy QEMUIOVector *qiov, 1146f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags) 1147da668aa1SThomas Huth { 1148da668aa1SThomas Huth BDRVTestTopState *tts = bs->opaque; 1149da668aa1SThomas Huth return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags); 1150da668aa1SThomas Huth } 1151da668aa1SThomas Huth 1152da668aa1SThomas Huth static BlockDriver bdrv_test_top_driver = { 1153da668aa1SThomas Huth .format_name = "test_top_driver", 1154da668aa1SThomas Huth .instance_size = sizeof(BDRVTestTopState), 1155da668aa1SThomas Huth 1156da668aa1SThomas Huth .bdrv_close = bdrv_test_top_close, 1157da668aa1SThomas Huth .bdrv_co_preadv = bdrv_test_top_co_preadv, 1158da668aa1SThomas Huth 1159da668aa1SThomas Huth .bdrv_child_perm = bdrv_default_perms, 1160da668aa1SThomas Huth }; 1161da668aa1SThomas Huth 1162da668aa1SThomas Huth typedef struct TestCoDeleteByDrainData { 1163da668aa1SThomas Huth BlockBackend *blk; 1164da668aa1SThomas Huth bool detach_instead_of_delete; 1165da668aa1SThomas Huth bool done; 1166da668aa1SThomas Huth } TestCoDeleteByDrainData; 1167da668aa1SThomas Huth 1168da668aa1SThomas Huth static void coroutine_fn test_co_delete_by_drain(void *opaque) 1169da668aa1SThomas Huth { 1170da668aa1SThomas Huth TestCoDeleteByDrainData *dbdd = opaque; 1171da668aa1SThomas Huth BlockBackend *blk = dbdd->blk; 1172da668aa1SThomas Huth BlockDriverState *bs = blk_bs(blk); 1173da668aa1SThomas Huth BDRVTestTopState *tts = bs->opaque; 1174da668aa1SThomas Huth void *buffer = g_malloc(65536); 1175da668aa1SThomas Huth QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536); 1176da668aa1SThomas Huth 1177da668aa1SThomas Huth /* Pretend some internal write operation from parent to child. 1178da668aa1SThomas Huth * Important: We have to read from the child, not from the parent! 1179da668aa1SThomas Huth * Draining works by first propagating it all up the tree to the 1180da668aa1SThomas Huth * root and then waiting for drainage from root to the leaves 1181da668aa1SThomas Huth * (protocol nodes). If we have a request waiting on the root, 1182da668aa1SThomas Huth * everything will be drained before we go back down the tree, but 1183da668aa1SThomas Huth * we do not want that. We want to be in the middle of draining 1184da668aa1SThomas Huth * when this following requests returns. */ 1185da668aa1SThomas Huth bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0); 1186da668aa1SThomas Huth 1187da668aa1SThomas Huth g_assert_cmpint(bs->refcnt, ==, 1); 1188da668aa1SThomas Huth 1189da668aa1SThomas Huth if (!dbdd->detach_instead_of_delete) { 1190da668aa1SThomas Huth blk_unref(blk); 1191da668aa1SThomas Huth } else { 1192da668aa1SThomas Huth BdrvChild *c, *next_c; 1193da668aa1SThomas Huth QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { 1194da668aa1SThomas Huth bdrv_unref_child(bs, c); 1195da668aa1SThomas Huth } 1196da668aa1SThomas Huth } 1197da668aa1SThomas Huth 1198da668aa1SThomas Huth dbdd->done = true; 1199da668aa1SThomas Huth g_free(buffer); 1200da668aa1SThomas Huth } 1201da668aa1SThomas Huth 1202da668aa1SThomas Huth /** 1203da668aa1SThomas Huth * Test what happens when some BDS has some children, you drain one of 1204da668aa1SThomas Huth * them and this results in the BDS being deleted. 1205da668aa1SThomas Huth * 1206da668aa1SThomas Huth * If @detach_instead_of_delete is set, the BDS is not going to be 1207da668aa1SThomas Huth * deleted but will only detach all of its children. 1208da668aa1SThomas Huth */ 1209da668aa1SThomas Huth static void do_test_delete_by_drain(bool detach_instead_of_delete, 1210da668aa1SThomas Huth enum drain_type drain_type) 1211da668aa1SThomas Huth { 1212da668aa1SThomas Huth BlockBackend *blk; 1213da668aa1SThomas Huth BlockDriverState *bs, *child_bs, *null_bs; 1214da668aa1SThomas Huth BDRVTestTopState *tts; 1215da668aa1SThomas Huth TestCoDeleteByDrainData dbdd; 1216da668aa1SThomas Huth Coroutine *co; 1217da668aa1SThomas Huth 1218da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR, 1219da668aa1SThomas Huth &error_abort); 1220da668aa1SThomas Huth bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; 1221da668aa1SThomas Huth tts = bs->opaque; 1222da668aa1SThomas Huth 1223da668aa1SThomas Huth null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, 1224da668aa1SThomas Huth &error_abort); 1225da668aa1SThomas Huth bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, 1226da668aa1SThomas Huth BDRV_CHILD_DATA, &error_abort); 1227da668aa1SThomas Huth 1228da668aa1SThomas Huth /* This child will be the one to pass to requests through to, and 1229da668aa1SThomas Huth * it will stall until a drain occurs */ 1230da668aa1SThomas Huth child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR, 1231da668aa1SThomas Huth &error_abort); 1232da668aa1SThomas Huth child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; 1233da668aa1SThomas Huth /* Takes our reference to child_bs */ 1234da668aa1SThomas Huth tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", 1235da668aa1SThomas Huth &child_of_bds, 1236da668aa1SThomas Huth BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY, 1237da668aa1SThomas Huth &error_abort); 1238da668aa1SThomas Huth 1239da668aa1SThomas Huth /* This child is just there to be deleted 1240da668aa1SThomas Huth * (for detach_instead_of_delete == true) */ 1241da668aa1SThomas Huth null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, 1242da668aa1SThomas Huth &error_abort); 1243da668aa1SThomas Huth bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, 1244da668aa1SThomas Huth &error_abort); 1245da668aa1SThomas Huth 1246da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 1247da668aa1SThomas Huth blk_insert_bs(blk, bs, &error_abort); 1248da668aa1SThomas Huth 1249da668aa1SThomas Huth /* Referenced by blk now */ 1250da668aa1SThomas Huth bdrv_unref(bs); 1251da668aa1SThomas Huth 1252da668aa1SThomas Huth g_assert_cmpint(bs->refcnt, ==, 1); 1253da668aa1SThomas Huth g_assert_cmpint(child_bs->refcnt, ==, 1); 1254da668aa1SThomas Huth g_assert_cmpint(null_bs->refcnt, ==, 1); 1255da668aa1SThomas Huth 1256da668aa1SThomas Huth 1257da668aa1SThomas Huth dbdd = (TestCoDeleteByDrainData){ 1258da668aa1SThomas Huth .blk = blk, 1259da668aa1SThomas Huth .detach_instead_of_delete = detach_instead_of_delete, 1260da668aa1SThomas Huth .done = false, 1261da668aa1SThomas Huth }; 1262da668aa1SThomas Huth co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd); 1263da668aa1SThomas Huth qemu_coroutine_enter(co); 1264da668aa1SThomas Huth 1265da668aa1SThomas Huth /* Drain the child while the read operation is still pending. 1266da668aa1SThomas Huth * This should result in the operation finishing and 1267da668aa1SThomas Huth * test_co_delete_by_drain() resuming. Thus, @bs will be deleted 1268da668aa1SThomas Huth * and the coroutine will exit while this drain operation is still 1269da668aa1SThomas Huth * in progress. */ 1270da668aa1SThomas Huth switch (drain_type) { 1271da668aa1SThomas Huth case BDRV_DRAIN: 1272da668aa1SThomas Huth bdrv_ref(child_bs); 1273da668aa1SThomas Huth bdrv_drain(child_bs); 1274da668aa1SThomas Huth bdrv_unref(child_bs); 1275da668aa1SThomas Huth break; 1276da668aa1SThomas Huth case BDRV_SUBTREE_DRAIN: 1277da668aa1SThomas Huth /* Would have to ref/unref bs here for !detach_instead_of_delete, but 1278da668aa1SThomas Huth * then the whole test becomes pointless because the graph changes 1279da668aa1SThomas Huth * don't occur during the drain any more. */ 1280da668aa1SThomas Huth assert(detach_instead_of_delete); 1281da668aa1SThomas Huth bdrv_subtree_drained_begin(bs); 1282da668aa1SThomas Huth bdrv_subtree_drained_end(bs); 1283da668aa1SThomas Huth break; 1284da668aa1SThomas Huth case BDRV_DRAIN_ALL: 1285da668aa1SThomas Huth bdrv_drain_all_begin(); 1286da668aa1SThomas Huth bdrv_drain_all_end(); 1287da668aa1SThomas Huth break; 1288da668aa1SThomas Huth default: 1289da668aa1SThomas Huth g_assert_not_reached(); 1290da668aa1SThomas Huth } 1291da668aa1SThomas Huth 1292da668aa1SThomas Huth while (!dbdd.done) { 1293da668aa1SThomas Huth aio_poll(qemu_get_aio_context(), true); 1294da668aa1SThomas Huth } 1295da668aa1SThomas Huth 1296da668aa1SThomas Huth if (detach_instead_of_delete) { 1297da668aa1SThomas Huth /* Here, the reference has not passed over to the coroutine, 1298da668aa1SThomas Huth * so we have to delete the BB ourselves */ 1299da668aa1SThomas Huth blk_unref(blk); 1300da668aa1SThomas Huth } 1301da668aa1SThomas Huth } 1302da668aa1SThomas Huth 1303da668aa1SThomas Huth static void test_delete_by_drain(void) 1304da668aa1SThomas Huth { 1305da668aa1SThomas Huth do_test_delete_by_drain(false, BDRV_DRAIN); 1306da668aa1SThomas Huth } 1307da668aa1SThomas Huth 1308da668aa1SThomas Huth static void test_detach_by_drain_all(void) 1309da668aa1SThomas Huth { 1310da668aa1SThomas Huth do_test_delete_by_drain(true, BDRV_DRAIN_ALL); 1311da668aa1SThomas Huth } 1312da668aa1SThomas Huth 1313da668aa1SThomas Huth static void test_detach_by_drain(void) 1314da668aa1SThomas Huth { 1315da668aa1SThomas Huth do_test_delete_by_drain(true, BDRV_DRAIN); 1316da668aa1SThomas Huth } 1317da668aa1SThomas Huth 1318da668aa1SThomas Huth static void test_detach_by_drain_subtree(void) 1319da668aa1SThomas Huth { 1320da668aa1SThomas Huth do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN); 1321da668aa1SThomas Huth } 1322da668aa1SThomas Huth 1323da668aa1SThomas Huth 1324da668aa1SThomas Huth struct detach_by_parent_data { 1325da668aa1SThomas Huth BlockDriverState *parent_b; 1326da668aa1SThomas Huth BdrvChild *child_b; 1327da668aa1SThomas Huth BlockDriverState *c; 1328da668aa1SThomas Huth BdrvChild *child_c; 1329da668aa1SThomas Huth bool by_parent_cb; 1330da668aa1SThomas Huth }; 1331da668aa1SThomas Huth static struct detach_by_parent_data detach_by_parent_data; 1332da668aa1SThomas Huth 1333da668aa1SThomas Huth static void detach_indirect_bh(void *opaque) 1334da668aa1SThomas Huth { 1335da668aa1SThomas Huth struct detach_by_parent_data *data = opaque; 1336da668aa1SThomas Huth 1337da668aa1SThomas Huth bdrv_unref_child(data->parent_b, data->child_b); 1338da668aa1SThomas Huth 1339da668aa1SThomas Huth bdrv_ref(data->c); 1340da668aa1SThomas Huth data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C", 1341da668aa1SThomas Huth &child_of_bds, BDRV_CHILD_DATA, 1342da668aa1SThomas Huth &error_abort); 1343da668aa1SThomas Huth } 1344da668aa1SThomas Huth 1345da668aa1SThomas Huth static void detach_by_parent_aio_cb(void *opaque, int ret) 1346da668aa1SThomas Huth { 1347da668aa1SThomas Huth struct detach_by_parent_data *data = &detach_by_parent_data; 1348da668aa1SThomas Huth 1349da668aa1SThomas Huth g_assert_cmpint(ret, ==, 0); 1350da668aa1SThomas Huth if (data->by_parent_cb) { 1351da668aa1SThomas Huth detach_indirect_bh(data); 1352da668aa1SThomas Huth } 1353da668aa1SThomas Huth } 1354da668aa1SThomas Huth 1355da668aa1SThomas Huth static void detach_by_driver_cb_drained_begin(BdrvChild *child) 1356da668aa1SThomas Huth { 1357da668aa1SThomas Huth aio_bh_schedule_oneshot(qemu_get_current_aio_context(), 1358da668aa1SThomas Huth detach_indirect_bh, &detach_by_parent_data); 1359da668aa1SThomas Huth child_of_bds.drained_begin(child); 1360da668aa1SThomas Huth } 1361da668aa1SThomas Huth 1362da668aa1SThomas Huth static BdrvChildClass detach_by_driver_cb_class; 1363da668aa1SThomas Huth 1364da668aa1SThomas Huth /* 1365da668aa1SThomas Huth * Initial graph: 1366da668aa1SThomas Huth * 1367da668aa1SThomas Huth * PA PB 1368da668aa1SThomas Huth * \ / \ 1369da668aa1SThomas Huth * A B C 1370da668aa1SThomas Huth * 1371da668aa1SThomas Huth * by_parent_cb == true: Test that parent callbacks don't poll 1372da668aa1SThomas Huth * 1373da668aa1SThomas Huth * PA has a pending write request whose callback changes the child nodes of 1374da668aa1SThomas Huth * PB: It removes B and adds C instead. The subtree of PB is drained, which 1375da668aa1SThomas Huth * will indirectly drain the write request, too. 1376da668aa1SThomas Huth * 1377da668aa1SThomas Huth * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll 1378da668aa1SThomas Huth * 1379da668aa1SThomas Huth * PA's BdrvChildClass has a .drained_begin callback that schedules a BH 1380da668aa1SThomas Huth * that does the same graph change. If bdrv_drain_invoke() calls it, the 1381da668aa1SThomas Huth * state is messed up, but if it is only polled in the single 1382da668aa1SThomas Huth * BDRV_POLL_WHILE() at the end of the drain, this should work fine. 1383da668aa1SThomas Huth */ 1384da668aa1SThomas Huth static void test_detach_indirect(bool by_parent_cb) 1385da668aa1SThomas Huth { 1386da668aa1SThomas Huth BlockBackend *blk; 1387da668aa1SThomas Huth BlockDriverState *parent_a, *parent_b, *a, *b, *c; 1388da668aa1SThomas Huth BdrvChild *child_a, *child_b; 1389da668aa1SThomas Huth BlockAIOCB *acb; 1390da668aa1SThomas Huth 1391da668aa1SThomas Huth QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); 1392da668aa1SThomas Huth 1393da668aa1SThomas Huth if (!by_parent_cb) { 1394da668aa1SThomas Huth detach_by_driver_cb_class = child_of_bds; 1395da668aa1SThomas Huth detach_by_driver_cb_class.drained_begin = 1396da668aa1SThomas Huth detach_by_driver_cb_drained_begin; 1397da668aa1SThomas Huth } 1398da668aa1SThomas Huth 1399da668aa1SThomas Huth /* Create all involved nodes */ 1400da668aa1SThomas Huth parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR, 1401da668aa1SThomas Huth &error_abort); 1402da668aa1SThomas Huth parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0, 1403da668aa1SThomas Huth &error_abort); 1404da668aa1SThomas Huth 1405da668aa1SThomas Huth a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort); 1406da668aa1SThomas Huth b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort); 1407da668aa1SThomas Huth c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort); 1408da668aa1SThomas Huth 1409da668aa1SThomas Huth /* blk is a BB for parent-a */ 1410da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 1411da668aa1SThomas Huth blk_insert_bs(blk, parent_a, &error_abort); 1412da668aa1SThomas Huth bdrv_unref(parent_a); 1413da668aa1SThomas Huth 1414da668aa1SThomas Huth /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver 1415da668aa1SThomas Huth * callback must not return immediately. */ 1416da668aa1SThomas Huth if (!by_parent_cb) { 1417da668aa1SThomas Huth BDRVTestState *s = parent_a->opaque; 1418da668aa1SThomas Huth s->sleep_in_drain_begin = true; 1419da668aa1SThomas Huth } 1420da668aa1SThomas Huth 1421da668aa1SThomas Huth /* Set child relationships */ 1422da668aa1SThomas Huth bdrv_ref(b); 1423da668aa1SThomas Huth bdrv_ref(a); 1424da668aa1SThomas Huth child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds, 1425da668aa1SThomas Huth BDRV_CHILD_DATA, &error_abort); 1426da668aa1SThomas Huth child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds, 1427da668aa1SThomas Huth BDRV_CHILD_COW, &error_abort); 1428da668aa1SThomas Huth 1429da668aa1SThomas Huth bdrv_ref(a); 1430da668aa1SThomas Huth bdrv_attach_child(parent_a, a, "PA-A", 1431da668aa1SThomas Huth by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class, 1432da668aa1SThomas Huth BDRV_CHILD_DATA, &error_abort); 1433da668aa1SThomas Huth 1434da668aa1SThomas Huth g_assert_cmpint(parent_a->refcnt, ==, 1); 1435da668aa1SThomas Huth g_assert_cmpint(parent_b->refcnt, ==, 1); 1436da668aa1SThomas Huth g_assert_cmpint(a->refcnt, ==, 3); 1437da668aa1SThomas Huth g_assert_cmpint(b->refcnt, ==, 2); 1438da668aa1SThomas Huth g_assert_cmpint(c->refcnt, ==, 1); 1439da668aa1SThomas Huth 1440da668aa1SThomas Huth g_assert(QLIST_FIRST(&parent_b->children) == child_a); 1441da668aa1SThomas Huth g_assert(QLIST_NEXT(child_a, next) == child_b); 1442da668aa1SThomas Huth g_assert(QLIST_NEXT(child_b, next) == NULL); 1443da668aa1SThomas Huth 1444da668aa1SThomas Huth /* Start the evil write request */ 1445da668aa1SThomas Huth detach_by_parent_data = (struct detach_by_parent_data) { 1446da668aa1SThomas Huth .parent_b = parent_b, 1447da668aa1SThomas Huth .child_b = child_b, 1448da668aa1SThomas Huth .c = c, 1449da668aa1SThomas Huth .by_parent_cb = by_parent_cb, 1450da668aa1SThomas Huth }; 1451da668aa1SThomas Huth acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL); 1452da668aa1SThomas Huth g_assert(acb != NULL); 1453da668aa1SThomas Huth 1454da668aa1SThomas Huth /* Drain and check the expected result */ 1455da668aa1SThomas Huth bdrv_subtree_drained_begin(parent_b); 1456da668aa1SThomas Huth 1457da668aa1SThomas Huth g_assert(detach_by_parent_data.child_c != NULL); 1458da668aa1SThomas Huth 1459da668aa1SThomas Huth g_assert_cmpint(parent_a->refcnt, ==, 1); 1460da668aa1SThomas Huth g_assert_cmpint(parent_b->refcnt, ==, 1); 1461da668aa1SThomas Huth g_assert_cmpint(a->refcnt, ==, 3); 1462da668aa1SThomas Huth g_assert_cmpint(b->refcnt, ==, 1); 1463da668aa1SThomas Huth g_assert_cmpint(c->refcnt, ==, 2); 1464da668aa1SThomas Huth 1465da668aa1SThomas Huth g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c); 1466da668aa1SThomas Huth g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a); 1467da668aa1SThomas Huth g_assert(QLIST_NEXT(child_a, next) == NULL); 1468da668aa1SThomas Huth 1469da668aa1SThomas Huth g_assert_cmpint(parent_a->quiesce_counter, ==, 1); 1470da668aa1SThomas Huth g_assert_cmpint(parent_b->quiesce_counter, ==, 1); 1471da668aa1SThomas Huth g_assert_cmpint(a->quiesce_counter, ==, 1); 1472da668aa1SThomas Huth g_assert_cmpint(b->quiesce_counter, ==, 0); 1473da668aa1SThomas Huth g_assert_cmpint(c->quiesce_counter, ==, 1); 1474da668aa1SThomas Huth 1475da668aa1SThomas Huth bdrv_subtree_drained_end(parent_b); 1476da668aa1SThomas Huth 1477da668aa1SThomas Huth bdrv_unref(parent_b); 1478da668aa1SThomas Huth blk_unref(blk); 1479da668aa1SThomas Huth 1480da668aa1SThomas Huth g_assert_cmpint(a->refcnt, ==, 1); 1481da668aa1SThomas Huth g_assert_cmpint(b->refcnt, ==, 1); 1482da668aa1SThomas Huth g_assert_cmpint(c->refcnt, ==, 1); 1483da668aa1SThomas Huth bdrv_unref(a); 1484da668aa1SThomas Huth bdrv_unref(b); 1485da668aa1SThomas Huth bdrv_unref(c); 1486da668aa1SThomas Huth } 1487da668aa1SThomas Huth 1488da668aa1SThomas Huth static void test_detach_by_parent_cb(void) 1489da668aa1SThomas Huth { 1490da668aa1SThomas Huth test_detach_indirect(true); 1491da668aa1SThomas Huth } 1492da668aa1SThomas Huth 1493da668aa1SThomas Huth static void test_detach_by_driver_cb(void) 1494da668aa1SThomas Huth { 1495da668aa1SThomas Huth test_detach_indirect(false); 1496da668aa1SThomas Huth } 1497da668aa1SThomas Huth 1498da668aa1SThomas Huth static void test_append_to_drained(void) 1499da668aa1SThomas Huth { 1500da668aa1SThomas Huth BlockBackend *blk; 1501da668aa1SThomas Huth BlockDriverState *base, *overlay; 1502da668aa1SThomas Huth BDRVTestState *base_s, *overlay_s; 1503da668aa1SThomas Huth 1504da668aa1SThomas Huth blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 1505da668aa1SThomas Huth base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); 1506da668aa1SThomas Huth base_s = base->opaque; 1507da668aa1SThomas Huth blk_insert_bs(blk, base, &error_abort); 1508da668aa1SThomas Huth 1509da668aa1SThomas Huth overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR, 1510da668aa1SThomas Huth &error_abort); 1511da668aa1SThomas Huth overlay_s = overlay->opaque; 1512da668aa1SThomas Huth 1513da668aa1SThomas Huth do_drain_begin(BDRV_DRAIN, base); 1514da668aa1SThomas Huth g_assert_cmpint(base->quiesce_counter, ==, 1); 1515da668aa1SThomas Huth g_assert_cmpint(base_s->drain_count, ==, 1); 1516da668aa1SThomas Huth g_assert_cmpint(base->in_flight, ==, 0); 1517da668aa1SThomas Huth 1518da668aa1SThomas Huth bdrv_append(overlay, base, &error_abort); 1519da668aa1SThomas Huth g_assert_cmpint(base->in_flight, ==, 0); 1520da668aa1SThomas Huth g_assert_cmpint(overlay->in_flight, ==, 0); 1521da668aa1SThomas Huth 1522da668aa1SThomas Huth g_assert_cmpint(base->quiesce_counter, ==, 1); 1523da668aa1SThomas Huth g_assert_cmpint(base_s->drain_count, ==, 1); 1524da668aa1SThomas Huth g_assert_cmpint(overlay->quiesce_counter, ==, 1); 1525da668aa1SThomas Huth g_assert_cmpint(overlay_s->drain_count, ==, 1); 1526da668aa1SThomas Huth 1527da668aa1SThomas Huth do_drain_end(BDRV_DRAIN, base); 1528da668aa1SThomas Huth 1529da668aa1SThomas Huth g_assert_cmpint(base->quiesce_counter, ==, 0); 1530da668aa1SThomas Huth g_assert_cmpint(base_s->drain_count, ==, 0); 1531da668aa1SThomas Huth g_assert_cmpint(overlay->quiesce_counter, ==, 0); 1532da668aa1SThomas Huth g_assert_cmpint(overlay_s->drain_count, ==, 0); 1533da668aa1SThomas Huth 1534ae9d4417SVladimir Sementsov-Ogievskiy bdrv_unref(overlay); 1535da668aa1SThomas Huth bdrv_unref(base); 1536da668aa1SThomas Huth blk_unref(blk); 1537da668aa1SThomas Huth } 1538da668aa1SThomas Huth 1539da668aa1SThomas Huth static void test_set_aio_context(void) 1540da668aa1SThomas Huth { 1541da668aa1SThomas Huth BlockDriverState *bs; 1542da668aa1SThomas Huth IOThread *a = iothread_new(); 1543da668aa1SThomas Huth IOThread *b = iothread_new(); 1544da668aa1SThomas Huth AioContext *ctx_a = iothread_get_aio_context(a); 1545da668aa1SThomas Huth AioContext *ctx_b = iothread_get_aio_context(b); 1546da668aa1SThomas Huth 1547da668aa1SThomas Huth bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, 1548da668aa1SThomas Huth &error_abort); 1549da668aa1SThomas Huth 1550da668aa1SThomas Huth bdrv_drained_begin(bs); 1551142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort); 1552da668aa1SThomas Huth 1553da668aa1SThomas Huth aio_context_acquire(ctx_a); 1554da668aa1SThomas Huth bdrv_drained_end(bs); 1555da668aa1SThomas Huth 1556da668aa1SThomas Huth bdrv_drained_begin(bs); 1557142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort); 1558da668aa1SThomas Huth aio_context_release(ctx_a); 1559da668aa1SThomas Huth aio_context_acquire(ctx_b); 1560142e6907SEmanuele Giuseppe Esposito bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort); 1561da668aa1SThomas Huth aio_context_release(ctx_b); 1562da668aa1SThomas Huth bdrv_drained_end(bs); 1563da668aa1SThomas Huth 1564da668aa1SThomas Huth bdrv_unref(bs); 1565da668aa1SThomas Huth iothread_join(a); 1566da668aa1SThomas Huth iothread_join(b); 1567da668aa1SThomas Huth } 1568da668aa1SThomas Huth 1569da668aa1SThomas Huth 1570da668aa1SThomas Huth typedef struct TestDropBackingBlockJob { 1571da668aa1SThomas Huth BlockJob common; 1572da668aa1SThomas Huth bool should_complete; 1573da668aa1SThomas Huth bool *did_complete; 1574da668aa1SThomas Huth BlockDriverState *detach_also; 15751b177bbeSVladimir Sementsov-Ogievskiy BlockDriverState *bs; 1576da668aa1SThomas Huth } TestDropBackingBlockJob; 1577da668aa1SThomas Huth 1578da668aa1SThomas Huth static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp) 1579da668aa1SThomas Huth { 1580da668aa1SThomas Huth TestDropBackingBlockJob *s = 1581da668aa1SThomas Huth container_of(job, TestDropBackingBlockJob, common.job); 1582da668aa1SThomas Huth 1583da668aa1SThomas Huth while (!s->should_complete) { 1584da668aa1SThomas Huth job_sleep_ns(job, 0); 1585da668aa1SThomas Huth } 1586da668aa1SThomas Huth 1587da668aa1SThomas Huth return 0; 1588da668aa1SThomas Huth } 1589da668aa1SThomas Huth 1590da668aa1SThomas Huth static void test_drop_backing_job_commit(Job *job) 1591da668aa1SThomas Huth { 1592da668aa1SThomas Huth TestDropBackingBlockJob *s = 1593da668aa1SThomas Huth container_of(job, TestDropBackingBlockJob, common.job); 1594da668aa1SThomas Huth 15951b177bbeSVladimir Sementsov-Ogievskiy bdrv_set_backing_hd(s->bs, NULL, &error_abort); 1596da668aa1SThomas Huth bdrv_set_backing_hd(s->detach_also, NULL, &error_abort); 1597da668aa1SThomas Huth 1598da668aa1SThomas Huth *s->did_complete = true; 1599da668aa1SThomas Huth } 1600da668aa1SThomas Huth 1601da668aa1SThomas Huth static const BlockJobDriver test_drop_backing_job_driver = { 1602da668aa1SThomas Huth .job_driver = { 1603da668aa1SThomas Huth .instance_size = sizeof(TestDropBackingBlockJob), 1604da668aa1SThomas Huth .free = block_job_free, 1605da668aa1SThomas Huth .user_resume = block_job_user_resume, 1606da668aa1SThomas Huth .run = test_drop_backing_job_run, 1607da668aa1SThomas Huth .commit = test_drop_backing_job_commit, 1608da668aa1SThomas Huth } 1609da668aa1SThomas Huth }; 1610da668aa1SThomas Huth 1611da668aa1SThomas Huth /** 1612da668aa1SThomas Huth * Creates a child node with three parent nodes on it, and then runs a 1613da668aa1SThomas Huth * block job on the final one, parent-node-2. 1614da668aa1SThomas Huth * 1615da668aa1SThomas Huth * The job is then asked to complete before a section where the child 1616da668aa1SThomas Huth * is drained. 1617da668aa1SThomas Huth * 1618da668aa1SThomas Huth * Ending this section will undrain the child's parents, first 1619da668aa1SThomas Huth * parent-node-2, then parent-node-1, then parent-node-0 -- the parent 1620da668aa1SThomas Huth * list is in reverse order of how they were added. Ending the drain 1621da668aa1SThomas Huth * on parent-node-2 will resume the job, thus completing it and 1622da668aa1SThomas Huth * scheduling job_exit(). 1623da668aa1SThomas Huth * 1624da668aa1SThomas Huth * Ending the drain on parent-node-1 will poll the AioContext, which 1625da668aa1SThomas Huth * lets job_exit() and thus test_drop_backing_job_commit() run. That 1626da668aa1SThomas Huth * function first removes the child as parent-node-2's backing file. 1627da668aa1SThomas Huth * 1628da668aa1SThomas Huth * In old (and buggy) implementations, there are two problems with 1629da668aa1SThomas Huth * that: 1630da668aa1SThomas Huth * (A) bdrv_drain_invoke() polls for every node that leaves the 1631da668aa1SThomas Huth * drained section. This means that job_exit() is scheduled 1632da668aa1SThomas Huth * before the child has left the drained section. Its 1633da668aa1SThomas Huth * quiesce_counter is therefore still 1 when it is removed from 1634da668aa1SThomas Huth * parent-node-2. 1635da668aa1SThomas Huth * 1636da668aa1SThomas Huth * (B) bdrv_replace_child_noperm() calls drained_end() on the old 1637da668aa1SThomas Huth * child's parents as many times as the child is quiesced. This 1638da668aa1SThomas Huth * means it will call drained_end() on parent-node-2 once. 1639da668aa1SThomas Huth * Because parent-node-2 is no longer quiesced at this point, this 1640da668aa1SThomas Huth * will fail. 1641da668aa1SThomas Huth * 1642da668aa1SThomas Huth * bdrv_replace_child_noperm() therefore must call drained_end() on 1643da668aa1SThomas Huth * the parent only if it really is still drained because the child is 1644da668aa1SThomas Huth * drained. 1645da668aa1SThomas Huth * 1646da668aa1SThomas Huth * If removing child from parent-node-2 was successful (as it should 1647da668aa1SThomas Huth * be), test_drop_backing_job_commit() will then also remove the child 1648da668aa1SThomas Huth * from parent-node-0. 1649da668aa1SThomas Huth * 1650da668aa1SThomas Huth * With an old version of our drain infrastructure ((A) above), that 1651da668aa1SThomas Huth * resulted in the following flow: 1652da668aa1SThomas Huth * 1653da668aa1SThomas Huth * 1. child attempts to leave its drained section. The call recurses 1654da668aa1SThomas Huth * to its parents. 1655da668aa1SThomas Huth * 1656da668aa1SThomas Huth * 2. parent-node-2 leaves the drained section. Polling in 1657da668aa1SThomas Huth * bdrv_drain_invoke() will schedule job_exit(). 1658da668aa1SThomas Huth * 1659da668aa1SThomas Huth * 3. parent-node-1 leaves the drained section. Polling in 1660da668aa1SThomas Huth * bdrv_drain_invoke() will run job_exit(), thus disconnecting 1661da668aa1SThomas Huth * parent-node-0 from the child node. 1662da668aa1SThomas Huth * 1663da668aa1SThomas Huth * 4. bdrv_parent_drained_end() uses a QLIST_FOREACH_SAFE() loop to 1664da668aa1SThomas Huth * iterate over the parents. Thus, it now accesses the BdrvChild 1665da668aa1SThomas Huth * object that used to connect parent-node-0 and the child node. 1666da668aa1SThomas Huth * However, that object no longer exists, so it accesses a dangling 1667da668aa1SThomas Huth * pointer. 1668da668aa1SThomas Huth * 1669da668aa1SThomas Huth * The solution is to only poll once when running a bdrv_drained_end() 1670da668aa1SThomas Huth * operation, specifically at the end when all drained_end() 1671da668aa1SThomas Huth * operations for all involved nodes have been scheduled. 1672da668aa1SThomas Huth * Note that this also solves (A) above, thus hiding (B). 1673da668aa1SThomas Huth */ 1674da668aa1SThomas Huth static void test_blockjob_commit_by_drained_end(void) 1675da668aa1SThomas Huth { 1676da668aa1SThomas Huth BlockDriverState *bs_child, *bs_parents[3]; 1677da668aa1SThomas Huth TestDropBackingBlockJob *job; 1678da668aa1SThomas Huth bool job_has_completed = false; 1679da668aa1SThomas Huth int i; 1680da668aa1SThomas Huth 1681da668aa1SThomas Huth bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR, 1682da668aa1SThomas Huth &error_abort); 1683da668aa1SThomas Huth 1684da668aa1SThomas Huth for (i = 0; i < 3; i++) { 1685da668aa1SThomas Huth char name[32]; 1686da668aa1SThomas Huth snprintf(name, sizeof(name), "parent-node-%i", i); 1687da668aa1SThomas Huth bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR, 1688da668aa1SThomas Huth &error_abort); 1689da668aa1SThomas Huth bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort); 1690da668aa1SThomas Huth } 1691da668aa1SThomas Huth 1692da668aa1SThomas Huth job = block_job_create("job", &test_drop_backing_job_driver, NULL, 1693da668aa1SThomas Huth bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL, 1694da668aa1SThomas Huth &error_abort); 16951b177bbeSVladimir Sementsov-Ogievskiy job->bs = bs_parents[2]; 1696da668aa1SThomas Huth 1697da668aa1SThomas Huth job->detach_also = bs_parents[0]; 1698da668aa1SThomas Huth job->did_complete = &job_has_completed; 1699da668aa1SThomas Huth 1700da668aa1SThomas Huth job_start(&job->common.job); 1701da668aa1SThomas Huth 1702da668aa1SThomas Huth job->should_complete = true; 1703da668aa1SThomas Huth bdrv_drained_begin(bs_child); 1704da668aa1SThomas Huth g_assert(!job_has_completed); 1705da668aa1SThomas Huth bdrv_drained_end(bs_child); 1706*5e8ac217SKevin Wolf aio_poll(qemu_get_aio_context(), false); 1707da668aa1SThomas Huth g_assert(job_has_completed); 1708da668aa1SThomas Huth 1709da668aa1SThomas Huth bdrv_unref(bs_parents[0]); 1710da668aa1SThomas Huth bdrv_unref(bs_parents[1]); 1711da668aa1SThomas Huth bdrv_unref(bs_parents[2]); 1712da668aa1SThomas Huth bdrv_unref(bs_child); 1713da668aa1SThomas Huth } 1714da668aa1SThomas Huth 1715da668aa1SThomas Huth 1716da668aa1SThomas Huth typedef struct TestSimpleBlockJob { 1717da668aa1SThomas Huth BlockJob common; 1718da668aa1SThomas Huth bool should_complete; 1719da668aa1SThomas Huth bool *did_complete; 1720da668aa1SThomas Huth } TestSimpleBlockJob; 1721da668aa1SThomas Huth 1722da668aa1SThomas Huth static int coroutine_fn test_simple_job_run(Job *job, Error **errp) 1723da668aa1SThomas Huth { 1724da668aa1SThomas Huth TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); 1725da668aa1SThomas Huth 1726da668aa1SThomas Huth while (!s->should_complete) { 1727da668aa1SThomas Huth job_sleep_ns(job, 0); 1728da668aa1SThomas Huth } 1729da668aa1SThomas Huth 1730da668aa1SThomas Huth return 0; 1731da668aa1SThomas Huth } 1732da668aa1SThomas Huth 1733da668aa1SThomas Huth static void test_simple_job_clean(Job *job) 1734da668aa1SThomas Huth { 1735da668aa1SThomas Huth TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); 1736da668aa1SThomas Huth *s->did_complete = true; 1737da668aa1SThomas Huth } 1738da668aa1SThomas Huth 1739da668aa1SThomas Huth static const BlockJobDriver test_simple_job_driver = { 1740da668aa1SThomas Huth .job_driver = { 1741da668aa1SThomas Huth .instance_size = sizeof(TestSimpleBlockJob), 1742da668aa1SThomas Huth .free = block_job_free, 1743da668aa1SThomas Huth .user_resume = block_job_user_resume, 1744da668aa1SThomas Huth .run = test_simple_job_run, 1745da668aa1SThomas Huth .clean = test_simple_job_clean, 1746da668aa1SThomas Huth }, 1747da668aa1SThomas Huth }; 1748da668aa1SThomas Huth 1749da668aa1SThomas Huth static int drop_intermediate_poll_update_filename(BdrvChild *child, 1750da668aa1SThomas Huth BlockDriverState *new_base, 1751da668aa1SThomas Huth const char *filename, 1752da668aa1SThomas Huth Error **errp) 1753da668aa1SThomas Huth { 1754da668aa1SThomas Huth /* 1755da668aa1SThomas Huth * We are free to poll here, which may change the block graph, if 1756da668aa1SThomas Huth * it is not drained. 1757da668aa1SThomas Huth */ 1758da668aa1SThomas Huth 1759da668aa1SThomas Huth /* If the job is not drained: Complete it, schedule job_exit() */ 1760da668aa1SThomas Huth aio_poll(qemu_get_current_aio_context(), false); 1761da668aa1SThomas Huth /* If the job is not drained: Run job_exit(), finish the job */ 1762da668aa1SThomas Huth aio_poll(qemu_get_current_aio_context(), false); 1763da668aa1SThomas Huth 1764da668aa1SThomas Huth return 0; 1765da668aa1SThomas Huth } 1766da668aa1SThomas Huth 1767da668aa1SThomas Huth /** 1768da668aa1SThomas Huth * Test a poll in the midst of bdrv_drop_intermediate(). 1769da668aa1SThomas Huth * 1770da668aa1SThomas Huth * bdrv_drop_intermediate() calls BdrvChildClass.update_filename(), 1771da668aa1SThomas Huth * which can yield or poll. This may lead to graph changes, unless 1772da668aa1SThomas Huth * the whole subtree in question is drained. 1773da668aa1SThomas Huth * 1774da668aa1SThomas Huth * We test this on the following graph: 1775da668aa1SThomas Huth * 1776da668aa1SThomas Huth * Job 1777da668aa1SThomas Huth * 1778da668aa1SThomas Huth * | 1779da668aa1SThomas Huth * job-node 1780da668aa1SThomas Huth * | 1781da668aa1SThomas Huth * v 1782da668aa1SThomas Huth * 1783da668aa1SThomas Huth * job-node 1784da668aa1SThomas Huth * 1785da668aa1SThomas Huth * | 1786da668aa1SThomas Huth * backing 1787da668aa1SThomas Huth * | 1788da668aa1SThomas Huth * v 1789da668aa1SThomas Huth * 1790da668aa1SThomas Huth * node-2 --chain--> node-1 --chain--> node-0 1791da668aa1SThomas Huth * 1792da668aa1SThomas Huth * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0). 1793da668aa1SThomas Huth * 1794da668aa1SThomas Huth * This first updates node-2's backing filename by invoking 1795da668aa1SThomas Huth * drop_intermediate_poll_update_filename(), which polls twice. This 1796da668aa1SThomas Huth * causes the job to finish, which in turns causes the job-node to be 1797da668aa1SThomas Huth * deleted. 1798da668aa1SThomas Huth * 1799da668aa1SThomas Huth * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it 1800da668aa1SThomas Huth * already has a pointer to the BdrvChild edge between job-node and 1801da668aa1SThomas Huth * node-1. When it tries to handle that edge, we probably get a 1802da668aa1SThomas Huth * segmentation fault because the object no longer exists. 1803da668aa1SThomas Huth * 1804da668aa1SThomas Huth * 1805da668aa1SThomas Huth * The solution is for bdrv_drop_intermediate() to drain top's 1806da668aa1SThomas Huth * subtree. This prevents graph changes from happening just because 1807da668aa1SThomas Huth * BdrvChildClass.update_filename() yields or polls. Thus, the block 1808da668aa1SThomas Huth * job is paused during that drained section and must finish before or 1809da668aa1SThomas Huth * after. 1810da668aa1SThomas Huth * 1811da668aa1SThomas Huth * (In addition, bdrv_replace_child() must keep the job paused.) 1812da668aa1SThomas Huth */ 1813da668aa1SThomas Huth static void test_drop_intermediate_poll(void) 1814da668aa1SThomas Huth { 1815da668aa1SThomas Huth static BdrvChildClass chain_child_class; 1816da668aa1SThomas Huth BlockDriverState *chain[3]; 1817da668aa1SThomas Huth TestSimpleBlockJob *job; 1818da668aa1SThomas Huth BlockDriverState *job_node; 1819da668aa1SThomas Huth bool job_has_completed = false; 1820da668aa1SThomas Huth int i; 1821da668aa1SThomas Huth int ret; 1822da668aa1SThomas Huth 1823da668aa1SThomas Huth chain_child_class = child_of_bds; 1824da668aa1SThomas Huth chain_child_class.update_filename = drop_intermediate_poll_update_filename; 1825da668aa1SThomas Huth 1826da668aa1SThomas Huth for (i = 0; i < 3; i++) { 1827da668aa1SThomas Huth char name[32]; 1828da668aa1SThomas Huth snprintf(name, 32, "node-%i", i); 1829da668aa1SThomas Huth 1830da668aa1SThomas Huth chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort); 1831da668aa1SThomas Huth } 1832da668aa1SThomas Huth 1833da668aa1SThomas Huth job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR, 1834da668aa1SThomas Huth &error_abort); 1835da668aa1SThomas Huth bdrv_set_backing_hd(job_node, chain[1], &error_abort); 1836da668aa1SThomas Huth 1837da668aa1SThomas Huth /* 1838da668aa1SThomas Huth * Establish the chain last, so the chain links are the first 1839da668aa1SThomas Huth * elements in the BDS.parents lists 1840da668aa1SThomas Huth */ 1841da668aa1SThomas Huth for (i = 0; i < 3; i++) { 1842da668aa1SThomas Huth if (i) { 1843da668aa1SThomas Huth /* Takes the reference to chain[i - 1] */ 18445bb04747SVladimir Sementsov-Ogievskiy bdrv_attach_child(chain[i], chain[i - 1], "chain", 18455bb04747SVladimir Sementsov-Ogievskiy &chain_child_class, BDRV_CHILD_COW, &error_abort); 1846da668aa1SThomas Huth } 1847da668aa1SThomas Huth } 1848da668aa1SThomas Huth 1849da668aa1SThomas Huth job = block_job_create("job", &test_simple_job_driver, NULL, job_node, 1850da668aa1SThomas Huth 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); 1851da668aa1SThomas Huth 1852da668aa1SThomas Huth /* The job has a reference now */ 1853da668aa1SThomas Huth bdrv_unref(job_node); 1854da668aa1SThomas Huth 1855da668aa1SThomas Huth job->did_complete = &job_has_completed; 1856da668aa1SThomas Huth 1857da668aa1SThomas Huth job_start(&job->common.job); 1858da668aa1SThomas Huth job->should_complete = true; 1859da668aa1SThomas Huth 1860da668aa1SThomas Huth g_assert(!job_has_completed); 1861da668aa1SThomas Huth ret = bdrv_drop_intermediate(chain[1], chain[0], NULL); 1862*5e8ac217SKevin Wolf aio_poll(qemu_get_aio_context(), false); 1863da668aa1SThomas Huth g_assert(ret == 0); 1864da668aa1SThomas Huth g_assert(job_has_completed); 1865da668aa1SThomas Huth 1866da668aa1SThomas Huth bdrv_unref(chain[2]); 1867da668aa1SThomas Huth } 1868da668aa1SThomas Huth 1869da668aa1SThomas Huth 1870da668aa1SThomas Huth typedef struct BDRVReplaceTestState { 1871da668aa1SThomas Huth bool was_drained; 1872da668aa1SThomas Huth bool was_undrained; 1873da668aa1SThomas Huth bool has_read; 1874da668aa1SThomas Huth 1875da668aa1SThomas Huth int drain_count; 1876da668aa1SThomas Huth 1877da668aa1SThomas Huth bool yield_before_read; 1878da668aa1SThomas Huth Coroutine *io_co; 1879da668aa1SThomas Huth Coroutine *drain_co; 1880da668aa1SThomas Huth } BDRVReplaceTestState; 1881da668aa1SThomas Huth 1882da668aa1SThomas Huth static void bdrv_replace_test_close(BlockDriverState *bs) 1883da668aa1SThomas Huth { 1884da668aa1SThomas Huth } 1885da668aa1SThomas Huth 1886da668aa1SThomas Huth /** 1887da668aa1SThomas Huth * If @bs has a backing file: 1888da668aa1SThomas Huth * Yield if .yield_before_read is true (and wait for drain_begin to 1889da668aa1SThomas Huth * wake us up). 1890da668aa1SThomas Huth * Forward the read to bs->backing. Set .has_read to true. 1891da668aa1SThomas Huth * If drain_begin has woken us, wake it in turn. 1892da668aa1SThomas Huth * 1893da668aa1SThomas Huth * Otherwise: 1894da668aa1SThomas Huth * Set .has_read to true and return success. 1895da668aa1SThomas Huth */ 1896da668aa1SThomas Huth static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs, 1897f7ef38ddSVladimir Sementsov-Ogievskiy int64_t offset, 1898f7ef38ddSVladimir Sementsov-Ogievskiy int64_t bytes, 1899da668aa1SThomas Huth QEMUIOVector *qiov, 1900f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags) 1901da668aa1SThomas Huth { 1902da668aa1SThomas Huth BDRVReplaceTestState *s = bs->opaque; 1903da668aa1SThomas Huth 1904da668aa1SThomas Huth if (bs->backing) { 1905da668aa1SThomas Huth int ret; 1906da668aa1SThomas Huth 1907da668aa1SThomas Huth g_assert(!s->drain_count); 1908da668aa1SThomas Huth 1909da668aa1SThomas Huth s->io_co = qemu_coroutine_self(); 1910da668aa1SThomas Huth if (s->yield_before_read) { 1911da668aa1SThomas Huth s->yield_before_read = false; 1912da668aa1SThomas Huth qemu_coroutine_yield(); 1913da668aa1SThomas Huth } 1914da668aa1SThomas Huth s->io_co = NULL; 1915da668aa1SThomas Huth 1916da668aa1SThomas Huth ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0); 1917da668aa1SThomas Huth s->has_read = true; 1918da668aa1SThomas Huth 1919da668aa1SThomas Huth /* Wake up drain_co if it runs */ 1920da668aa1SThomas Huth if (s->drain_co) { 1921da668aa1SThomas Huth aio_co_wake(s->drain_co); 1922da668aa1SThomas Huth } 1923da668aa1SThomas Huth 1924da668aa1SThomas Huth return ret; 1925da668aa1SThomas Huth } 1926da668aa1SThomas Huth 1927da668aa1SThomas Huth s->has_read = true; 1928da668aa1SThomas Huth return 0; 1929da668aa1SThomas Huth } 1930da668aa1SThomas Huth 19317bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_drain_co(void *opaque) 19327bce1c29SKevin Wolf { 19337bce1c29SKevin Wolf BlockDriverState *bs = opaque; 19347bce1c29SKevin Wolf BDRVReplaceTestState *s = bs->opaque; 19357bce1c29SKevin Wolf 19367bce1c29SKevin Wolf /* Keep waking io_co up until it is done */ 19377bce1c29SKevin Wolf while (s->io_co) { 19387bce1c29SKevin Wolf aio_co_wake(s->io_co); 19397bce1c29SKevin Wolf s->io_co = NULL; 19407bce1c29SKevin Wolf qemu_coroutine_yield(); 19417bce1c29SKevin Wolf } 19427bce1c29SKevin Wolf s->drain_co = NULL; 19437bce1c29SKevin Wolf bdrv_dec_in_flight(bs); 19447bce1c29SKevin Wolf } 19457bce1c29SKevin Wolf 1946da668aa1SThomas Huth /** 1947da668aa1SThomas Huth * If .drain_count is 0, wake up .io_co if there is one; and set 1948da668aa1SThomas Huth * .was_drained. 1949da668aa1SThomas Huth * Increment .drain_count. 1950da668aa1SThomas Huth */ 1951*5e8ac217SKevin Wolf static void bdrv_replace_test_drain_begin(BlockDriverState *bs) 1952da668aa1SThomas Huth { 1953da668aa1SThomas Huth BDRVReplaceTestState *s = bs->opaque; 1954da668aa1SThomas Huth 1955da668aa1SThomas Huth if (!s->drain_count) { 19567bce1c29SKevin Wolf s->drain_co = qemu_coroutine_create(bdrv_replace_test_drain_co, bs); 19577bce1c29SKevin Wolf bdrv_inc_in_flight(bs); 19587bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), s->drain_co); 1959da668aa1SThomas Huth s->was_drained = true; 1960da668aa1SThomas Huth } 1961da668aa1SThomas Huth s->drain_count++; 1962da668aa1SThomas Huth } 1963da668aa1SThomas Huth 19647bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_read_entry(void *opaque) 19657bce1c29SKevin Wolf { 19667bce1c29SKevin Wolf BlockDriverState *bs = opaque; 19677bce1c29SKevin Wolf char data; 19687bce1c29SKevin Wolf QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1); 19697bce1c29SKevin Wolf int ret; 19707bce1c29SKevin Wolf 19717bce1c29SKevin Wolf /* Queue a read request post-drain */ 19727bce1c29SKevin Wolf ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0); 19737bce1c29SKevin Wolf g_assert(ret >= 0); 19747bce1c29SKevin Wolf bdrv_dec_in_flight(bs); 19757bce1c29SKevin Wolf } 19767bce1c29SKevin Wolf 1977da668aa1SThomas Huth /** 1978da668aa1SThomas Huth * Reduce .drain_count, set .was_undrained once it reaches 0. 1979da668aa1SThomas Huth * If .drain_count reaches 0 and the node has a backing file, issue a 1980da668aa1SThomas Huth * read request. 1981da668aa1SThomas Huth */ 1982*5e8ac217SKevin Wolf static void bdrv_replace_test_drain_end(BlockDriverState *bs) 1983da668aa1SThomas Huth { 1984da668aa1SThomas Huth BDRVReplaceTestState *s = bs->opaque; 1985da668aa1SThomas Huth 1986da668aa1SThomas Huth g_assert(s->drain_count > 0); 1987da668aa1SThomas Huth if (!--s->drain_count) { 1988da668aa1SThomas Huth s->was_undrained = true; 1989da668aa1SThomas Huth 1990da668aa1SThomas Huth if (bs->backing) { 19917bce1c29SKevin Wolf Coroutine *co = qemu_coroutine_create(bdrv_replace_test_read_entry, 19927bce1c29SKevin Wolf bs); 19937bce1c29SKevin Wolf bdrv_inc_in_flight(bs); 19947bce1c29SKevin Wolf aio_co_enter(bdrv_get_aio_context(bs), co); 1995da668aa1SThomas Huth } 1996da668aa1SThomas Huth } 1997da668aa1SThomas Huth } 1998da668aa1SThomas Huth 1999da668aa1SThomas Huth static BlockDriver bdrv_replace_test = { 2000da668aa1SThomas Huth .format_name = "replace_test", 2001da668aa1SThomas Huth .instance_size = sizeof(BDRVReplaceTestState), 20029ebfc111SVladimir Sementsov-Ogievskiy .supports_backing = true, 2003da668aa1SThomas Huth 2004da668aa1SThomas Huth .bdrv_close = bdrv_replace_test_close, 2005da668aa1SThomas Huth .bdrv_co_preadv = bdrv_replace_test_co_preadv, 2006da668aa1SThomas Huth 2007*5e8ac217SKevin Wolf .bdrv_drain_begin = bdrv_replace_test_drain_begin, 2008*5e8ac217SKevin Wolf .bdrv_drain_end = bdrv_replace_test_drain_end, 2009da668aa1SThomas Huth 2010da668aa1SThomas Huth .bdrv_child_perm = bdrv_default_perms, 2011da668aa1SThomas Huth }; 2012da668aa1SThomas Huth 2013da668aa1SThomas Huth static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque) 2014da668aa1SThomas Huth { 2015da668aa1SThomas Huth int ret; 2016da668aa1SThomas Huth char data; 2017da668aa1SThomas Huth 2018da668aa1SThomas Huth ret = blk_co_pread(opaque, 0, 1, &data, 0); 2019da668aa1SThomas Huth g_assert(ret >= 0); 2020da668aa1SThomas Huth } 2021da668aa1SThomas Huth 2022da668aa1SThomas Huth /** 2023da668aa1SThomas Huth * We test two things: 2024da668aa1SThomas Huth * (1) bdrv_replace_child_noperm() must not undrain the parent if both 2025da668aa1SThomas Huth * children are drained. 2026da668aa1SThomas Huth * (2) bdrv_replace_child_noperm() must never flush I/O requests to a 2027da668aa1SThomas Huth * drained child. If the old child is drained, it must flush I/O 2028da668aa1SThomas Huth * requests after the new one has been attached. If the new child 2029da668aa1SThomas Huth * is drained, it must flush I/O requests before the old one is 2030da668aa1SThomas Huth * detached. 2031da668aa1SThomas Huth * 2032da668aa1SThomas Huth * To do so, we create one parent node and two child nodes; then 2033da668aa1SThomas Huth * attach one of the children (old_child_bs) to the parent, then 2034da668aa1SThomas Huth * drain both old_child_bs and new_child_bs according to 2035da668aa1SThomas Huth * old_drain_count and new_drain_count, respectively, and finally 2036da668aa1SThomas Huth * we invoke bdrv_replace_node() to replace old_child_bs by 2037da668aa1SThomas Huth * new_child_bs. 2038da668aa1SThomas Huth * 2039da668aa1SThomas Huth * The test block driver we use here (bdrv_replace_test) has a read 2040da668aa1SThomas Huth * function that: 2041da668aa1SThomas Huth * - For the parent node, can optionally yield, and then forwards the 2042da668aa1SThomas Huth * read to bdrv_preadv(), 2043da668aa1SThomas Huth * - For the child node, just returns immediately. 2044da668aa1SThomas Huth * 2045da668aa1SThomas Huth * If the read yields, the drain_begin function will wake it up. 2046da668aa1SThomas Huth * 2047da668aa1SThomas Huth * The drain_end function issues a read on the parent once it is fully 2048da668aa1SThomas Huth * undrained (which simulates requests starting to come in again). 2049da668aa1SThomas Huth */ 2050da668aa1SThomas Huth static void do_test_replace_child_mid_drain(int old_drain_count, 2051da668aa1SThomas Huth int new_drain_count) 2052da668aa1SThomas Huth { 2053da668aa1SThomas Huth BlockBackend *parent_blk; 2054da668aa1SThomas Huth BlockDriverState *parent_bs; 2055da668aa1SThomas Huth BlockDriverState *old_child_bs, *new_child_bs; 2056da668aa1SThomas Huth BDRVReplaceTestState *parent_s; 2057da668aa1SThomas Huth BDRVReplaceTestState *old_child_s, *new_child_s; 2058da668aa1SThomas Huth Coroutine *io_co; 2059da668aa1SThomas Huth int i; 2060da668aa1SThomas Huth 2061da668aa1SThomas Huth parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0, 2062da668aa1SThomas Huth &error_abort); 2063da668aa1SThomas Huth parent_s = parent_bs->opaque; 2064da668aa1SThomas Huth 2065da668aa1SThomas Huth parent_blk = blk_new(qemu_get_aio_context(), 2066da668aa1SThomas Huth BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); 2067da668aa1SThomas Huth blk_insert_bs(parent_blk, parent_bs, &error_abort); 2068da668aa1SThomas Huth 2069da668aa1SThomas Huth old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0, 2070da668aa1SThomas Huth &error_abort); 2071da668aa1SThomas Huth new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0, 2072da668aa1SThomas Huth &error_abort); 2073da668aa1SThomas Huth old_child_s = old_child_bs->opaque; 2074da668aa1SThomas Huth new_child_s = new_child_bs->opaque; 2075da668aa1SThomas Huth 2076da668aa1SThomas Huth /* So that we can read something */ 2077da668aa1SThomas Huth parent_bs->total_sectors = 1; 2078da668aa1SThomas Huth old_child_bs->total_sectors = 1; 2079da668aa1SThomas Huth new_child_bs->total_sectors = 1; 2080da668aa1SThomas Huth 2081da668aa1SThomas Huth bdrv_ref(old_child_bs); 20825bb04747SVladimir Sementsov-Ogievskiy bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, 20835bb04747SVladimir Sementsov-Ogievskiy BDRV_CHILD_COW, &error_abort); 2084da668aa1SThomas Huth 2085da668aa1SThomas Huth for (i = 0; i < old_drain_count; i++) { 2086da668aa1SThomas Huth bdrv_drained_begin(old_child_bs); 2087da668aa1SThomas Huth } 2088da668aa1SThomas Huth for (i = 0; i < new_drain_count; i++) { 2089da668aa1SThomas Huth bdrv_drained_begin(new_child_bs); 2090da668aa1SThomas Huth } 2091da668aa1SThomas Huth 2092da668aa1SThomas Huth if (!old_drain_count) { 2093da668aa1SThomas Huth /* 2094da668aa1SThomas Huth * Start a read operation that will yield, so it will not 2095da668aa1SThomas Huth * complete before the node is drained. 2096da668aa1SThomas Huth */ 2097da668aa1SThomas Huth parent_s->yield_before_read = true; 2098da668aa1SThomas Huth io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co, 2099da668aa1SThomas Huth parent_blk); 2100da668aa1SThomas Huth qemu_coroutine_enter(io_co); 2101da668aa1SThomas Huth } 2102da668aa1SThomas Huth 2103da668aa1SThomas Huth /* If we have started a read operation, it should have yielded */ 2104da668aa1SThomas Huth g_assert(!parent_s->has_read); 2105da668aa1SThomas Huth 2106da668aa1SThomas Huth /* Reset drained status so we can see what bdrv_replace_node() does */ 2107da668aa1SThomas Huth parent_s->was_drained = false; 2108da668aa1SThomas Huth parent_s->was_undrained = false; 2109da668aa1SThomas Huth 2110da668aa1SThomas Huth g_assert(parent_bs->quiesce_counter == old_drain_count); 2111da668aa1SThomas Huth bdrv_replace_node(old_child_bs, new_child_bs, &error_abort); 2112da668aa1SThomas Huth g_assert(parent_bs->quiesce_counter == new_drain_count); 2113da668aa1SThomas Huth 2114da668aa1SThomas Huth if (!old_drain_count && !new_drain_count) { 2115da668aa1SThomas Huth /* 2116da668aa1SThomas Huth * From undrained to undrained drains and undrains the parent, 2117da668aa1SThomas Huth * because bdrv_replace_node() contains a drained section for 2118da668aa1SThomas Huth * @old_child_bs. 2119da668aa1SThomas Huth */ 2120da668aa1SThomas Huth g_assert(parent_s->was_drained && parent_s->was_undrained); 2121da668aa1SThomas Huth } else if (!old_drain_count && new_drain_count) { 2122da668aa1SThomas Huth /* 2123da668aa1SThomas Huth * From undrained to drained should drain the parent and keep 2124da668aa1SThomas Huth * it that way. 2125da668aa1SThomas Huth */ 2126da668aa1SThomas Huth g_assert(parent_s->was_drained && !parent_s->was_undrained); 2127da668aa1SThomas Huth } else if (old_drain_count && !new_drain_count) { 2128da668aa1SThomas Huth /* 2129da668aa1SThomas Huth * From drained to undrained should undrain the parent and 2130da668aa1SThomas Huth * keep it that way. 2131da668aa1SThomas Huth */ 2132da668aa1SThomas Huth g_assert(!parent_s->was_drained && parent_s->was_undrained); 2133da668aa1SThomas Huth } else /* if (old_drain_count && new_drain_count) */ { 2134da668aa1SThomas Huth /* 2135da668aa1SThomas Huth * From drained to drained must not undrain the parent at any 2136da668aa1SThomas Huth * point 2137da668aa1SThomas Huth */ 2138da668aa1SThomas Huth g_assert(!parent_s->was_drained && !parent_s->was_undrained); 2139da668aa1SThomas Huth } 2140da668aa1SThomas Huth 2141da668aa1SThomas Huth if (!old_drain_count || !new_drain_count) { 2142da668aa1SThomas Huth /* 2143da668aa1SThomas Huth * If !old_drain_count, we have started a read request before 2144da668aa1SThomas Huth * bdrv_replace_node(). If !new_drain_count, the parent must 2145da668aa1SThomas Huth * have been undrained at some point, and 2146da668aa1SThomas Huth * bdrv_replace_test_co_drain_end() starts a read request 2147da668aa1SThomas Huth * then. 2148da668aa1SThomas Huth */ 2149da668aa1SThomas Huth g_assert(parent_s->has_read); 2150da668aa1SThomas Huth } else { 2151da668aa1SThomas Huth /* 2152da668aa1SThomas Huth * If the parent was never undrained, there is no way to start 2153da668aa1SThomas Huth * a read request. 2154da668aa1SThomas Huth */ 2155da668aa1SThomas Huth g_assert(!parent_s->has_read); 2156da668aa1SThomas Huth } 2157da668aa1SThomas Huth 2158da668aa1SThomas Huth /* A drained child must have not received any request */ 2159da668aa1SThomas Huth g_assert(!(old_drain_count && old_child_s->has_read)); 2160da668aa1SThomas Huth g_assert(!(new_drain_count && new_child_s->has_read)); 2161da668aa1SThomas Huth 2162da668aa1SThomas Huth for (i = 0; i < new_drain_count; i++) { 2163da668aa1SThomas Huth bdrv_drained_end(new_child_bs); 2164da668aa1SThomas Huth } 2165da668aa1SThomas Huth for (i = 0; i < old_drain_count; i++) { 2166da668aa1SThomas Huth bdrv_drained_end(old_child_bs); 2167da668aa1SThomas Huth } 2168da668aa1SThomas Huth 2169da668aa1SThomas Huth /* 2170da668aa1SThomas Huth * By now, bdrv_replace_test_co_drain_end() must have been called 2171da668aa1SThomas Huth * at some point while the new child was attached to the parent. 2172da668aa1SThomas Huth */ 2173da668aa1SThomas Huth g_assert(parent_s->has_read); 2174da668aa1SThomas Huth g_assert(new_child_s->has_read); 2175da668aa1SThomas Huth 2176da668aa1SThomas Huth blk_unref(parent_blk); 2177da668aa1SThomas Huth bdrv_unref(parent_bs); 2178da668aa1SThomas Huth bdrv_unref(old_child_bs); 2179da668aa1SThomas Huth bdrv_unref(new_child_bs); 2180da668aa1SThomas Huth } 2181da668aa1SThomas Huth 2182da668aa1SThomas Huth static void test_replace_child_mid_drain(void) 2183da668aa1SThomas Huth { 2184da668aa1SThomas Huth int old_drain_count, new_drain_count; 2185da668aa1SThomas Huth 2186da668aa1SThomas Huth for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) { 2187da668aa1SThomas Huth for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) { 2188da668aa1SThomas Huth do_test_replace_child_mid_drain(old_drain_count, new_drain_count); 2189da668aa1SThomas Huth } 2190da668aa1SThomas Huth } 2191da668aa1SThomas Huth } 2192da668aa1SThomas Huth 2193da668aa1SThomas Huth int main(int argc, char **argv) 2194da668aa1SThomas Huth { 2195da668aa1SThomas Huth int ret; 2196da668aa1SThomas Huth 2197da668aa1SThomas Huth bdrv_init(); 2198da668aa1SThomas Huth qemu_init_main_loop(&error_abort); 2199da668aa1SThomas Huth 2200da668aa1SThomas Huth g_test_init(&argc, &argv, NULL); 2201da668aa1SThomas Huth qemu_event_init(&done_event, false); 2202da668aa1SThomas Huth 2203da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all); 2204da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain); 2205da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/drain_subtree", 2206da668aa1SThomas Huth test_drv_cb_drain_subtree); 2207da668aa1SThomas Huth 2208da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/co/drain_all", 2209da668aa1SThomas Huth test_drv_cb_co_drain_all); 2210da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain); 2211da668aa1SThomas Huth g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree", 2212da668aa1SThomas Huth test_drv_cb_co_drain_subtree); 2213da668aa1SThomas Huth 2214da668aa1SThomas Huth 2215da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all); 2216da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain); 2217da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/drain_subtree", 2218da668aa1SThomas Huth test_quiesce_drain_subtree); 2219da668aa1SThomas Huth 2220da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/co/drain_all", 2221da668aa1SThomas Huth test_quiesce_co_drain_all); 2222da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain); 2223da668aa1SThomas Huth g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree", 2224da668aa1SThomas Huth test_quiesce_co_drain_subtree); 2225da668aa1SThomas Huth 2226da668aa1SThomas Huth g_test_add_func("/bdrv-drain/nested", test_nested); 2227da668aa1SThomas Huth g_test_add_func("/bdrv-drain/multiparent", test_multiparent); 2228da668aa1SThomas Huth 2229da668aa1SThomas Huth g_test_add_func("/bdrv-drain/graph-change/drain_subtree", 2230da668aa1SThomas Huth test_graph_change_drain_subtree); 2231da668aa1SThomas Huth g_test_add_func("/bdrv-drain/graph-change/drain_all", 2232da668aa1SThomas Huth test_graph_change_drain_all); 2233da668aa1SThomas Huth 2234da668aa1SThomas Huth g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all); 2235da668aa1SThomas Huth g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain); 2236da668aa1SThomas Huth g_test_add_func("/bdrv-drain/iothread/drain_subtree", 2237da668aa1SThomas Huth test_iothread_drain_subtree); 2238da668aa1SThomas Huth 2239da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all); 2240da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain); 2241da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/drain_subtree", 2242da668aa1SThomas Huth test_blockjob_drain_subtree); 2243da668aa1SThomas Huth 2244da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/error/drain_all", 2245da668aa1SThomas Huth test_blockjob_error_drain_all); 2246da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/error/drain", 2247da668aa1SThomas Huth test_blockjob_error_drain); 2248da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree", 2249da668aa1SThomas Huth test_blockjob_error_drain_subtree); 2250da668aa1SThomas Huth 2251da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all", 2252da668aa1SThomas Huth test_blockjob_iothread_drain_all); 2253da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/drain", 2254da668aa1SThomas Huth test_blockjob_iothread_drain); 2255da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree", 2256da668aa1SThomas Huth test_blockjob_iothread_drain_subtree); 2257da668aa1SThomas Huth 2258da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all", 2259da668aa1SThomas Huth test_blockjob_iothread_error_drain_all); 2260da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain", 2261da668aa1SThomas Huth test_blockjob_iothread_error_drain); 2262da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree", 2263da668aa1SThomas Huth test_blockjob_iothread_error_drain_subtree); 2264da668aa1SThomas Huth 2265da668aa1SThomas Huth g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); 2266da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all); 2267da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); 2268da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); 2269da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); 2270da668aa1SThomas Huth g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb); 2271da668aa1SThomas Huth 2272da668aa1SThomas Huth g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained); 2273da668aa1SThomas Huth 2274da668aa1SThomas Huth g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context); 2275da668aa1SThomas Huth 2276da668aa1SThomas Huth g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end", 2277da668aa1SThomas Huth test_blockjob_commit_by_drained_end); 2278da668aa1SThomas Huth 2279da668aa1SThomas Huth g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll", 2280da668aa1SThomas Huth test_drop_intermediate_poll); 2281da668aa1SThomas Huth 2282da668aa1SThomas Huth g_test_add_func("/bdrv-drain/replace_child/mid-drain", 2283da668aa1SThomas Huth test_replace_child_mid_drain); 2284da668aa1SThomas Huth 2285da668aa1SThomas Huth ret = g_test_run(); 2286da668aa1SThomas Huth qemu_event_destroy(&done_event); 2287da668aa1SThomas Huth return ret; 2288da668aa1SThomas Huth } 2289