1 /* 2 * Block tests for iothreads 3 * 4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "block/block.h" 27 #include "block/block_int-global-state.h" 28 #include "block/blockjob_int.h" 29 #include "sysemu/block-backend.h" 30 #include "qapi/error.h" 31 #include "qapi/qmp/qdict.h" 32 #include "qemu/main-loop.h" 33 #include "iothread.h" 34 35 static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, 36 int64_t offset, int64_t bytes, 37 QEMUIOVector *qiov, 38 BdrvRequestFlags flags) 39 { 40 return 0; 41 } 42 43 static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs, 44 int64_t offset, int64_t bytes, 45 QEMUIOVector *qiov, 46 BdrvRequestFlags flags) 47 { 48 return 0; 49 } 50 51 static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs, 52 int64_t offset, int64_t bytes) 53 { 54 return 0; 55 } 56 57 static int coroutine_fn 58 bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact, 59 PreallocMode prealloc, BdrvRequestFlags flags, 60 Error **errp) 61 { 62 return 0; 63 } 64 65 static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs, 66 bool want_zero, 67 int64_t offset, int64_t count, 68 int64_t *pnum, int64_t *map, 69 BlockDriverState **file) 70 { 71 *pnum = count; 72 return 0; 73 } 74 75 static BlockDriver bdrv_test = { 76 .format_name = "test", 77 .instance_size = 1, 78 79 .bdrv_co_preadv = bdrv_test_co_preadv, 80 .bdrv_co_pwritev = bdrv_test_co_pwritev, 81 .bdrv_co_pdiscard = bdrv_test_co_pdiscard, 82 .bdrv_co_truncate = bdrv_test_co_truncate, 83 .bdrv_co_block_status = bdrv_test_co_block_status, 84 }; 85 86 static void test_sync_op_pread(BdrvChild *c) 87 { 88 uint8_t buf[512]; 89 int ret; 90 91 /* Success */ 92 ret = bdrv_pread(c, 0, sizeof(buf), buf, 0); 93 g_assert_cmpint(ret, ==, 0); 94 95 /* Early error: Negative offset */ 96 ret = bdrv_pread(c, -2, sizeof(buf), buf, 0); 97 g_assert_cmpint(ret, ==, -EIO); 98 } 99 100 static void test_sync_op_pwrite(BdrvChild *c) 101 { 102 uint8_t buf[512] = { 0 }; 103 int ret; 104 105 /* Success */ 106 ret = bdrv_pwrite(c, 0, sizeof(buf), buf, 0); 107 g_assert_cmpint(ret, ==, 0); 108 109 /* Early error: Negative offset */ 110 ret = bdrv_pwrite(c, -2, sizeof(buf), buf, 0); 111 g_assert_cmpint(ret, ==, -EIO); 112 } 113 114 static void test_sync_op_blk_pread(BlockBackend *blk) 115 { 116 uint8_t buf[512]; 117 int ret; 118 119 /* Success */ 120 ret = blk_pread(blk, 0, sizeof(buf), buf, 0); 121 g_assert_cmpint(ret, ==, 0); 122 123 /* Early error: Negative offset */ 124 ret = blk_pread(blk, -2, sizeof(buf), buf, 0); 125 g_assert_cmpint(ret, ==, -EIO); 126 } 127 128 static void test_sync_op_blk_pwrite(BlockBackend *blk) 129 { 130 uint8_t buf[512] = { 0 }; 131 int ret; 132 133 /* Success */ 134 ret = blk_pwrite(blk, 0, sizeof(buf), buf, 0); 135 g_assert_cmpint(ret, ==, 0); 136 137 /* Early error: Negative offset */ 138 ret = blk_pwrite(blk, -2, sizeof(buf), buf, 0); 139 g_assert_cmpint(ret, ==, -EIO); 140 } 141 142 static void test_sync_op_blk_preadv(BlockBackend *blk) 143 { 144 uint8_t buf[512]; 145 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf)); 146 int ret; 147 148 /* Success */ 149 ret = blk_preadv(blk, 0, sizeof(buf), &qiov, 0); 150 g_assert_cmpint(ret, ==, 0); 151 152 /* Early error: Negative offset */ 153 ret = blk_preadv(blk, -2, sizeof(buf), &qiov, 0); 154 g_assert_cmpint(ret, ==, -EIO); 155 } 156 157 static void test_sync_op_blk_pwritev(BlockBackend *blk) 158 { 159 uint8_t buf[512] = { 0 }; 160 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf)); 161 int ret; 162 163 /* Success */ 164 ret = blk_pwritev(blk, 0, sizeof(buf), &qiov, 0); 165 g_assert_cmpint(ret, ==, 0); 166 167 /* Early error: Negative offset */ 168 ret = blk_pwritev(blk, -2, sizeof(buf), &qiov, 0); 169 g_assert_cmpint(ret, ==, -EIO); 170 } 171 172 static void test_sync_op_blk_preadv_part(BlockBackend *blk) 173 { 174 uint8_t buf[512]; 175 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf)); 176 int ret; 177 178 /* Success */ 179 ret = blk_preadv_part(blk, 0, sizeof(buf), &qiov, 0, 0); 180 g_assert_cmpint(ret, ==, 0); 181 182 /* Early error: Negative offset */ 183 ret = blk_preadv_part(blk, -2, sizeof(buf), &qiov, 0, 0); 184 g_assert_cmpint(ret, ==, -EIO); 185 } 186 187 static void test_sync_op_blk_pwritev_part(BlockBackend *blk) 188 { 189 uint8_t buf[512] = { 0 }; 190 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf)); 191 int ret; 192 193 /* Success */ 194 ret = blk_pwritev_part(blk, 0, sizeof(buf), &qiov, 0, 0); 195 g_assert_cmpint(ret, ==, 0); 196 197 /* Early error: Negative offset */ 198 ret = blk_pwritev_part(blk, -2, sizeof(buf), &qiov, 0, 0); 199 g_assert_cmpint(ret, ==, -EIO); 200 } 201 202 static void test_sync_op_blk_pwrite_compressed(BlockBackend *blk) 203 { 204 uint8_t buf[512] = { 0 }; 205 int ret; 206 207 /* Late error: Not supported */ 208 ret = blk_pwrite_compressed(blk, 0, sizeof(buf), buf); 209 g_assert_cmpint(ret, ==, -ENOTSUP); 210 211 /* Early error: Negative offset */ 212 ret = blk_pwrite_compressed(blk, -2, sizeof(buf), buf); 213 g_assert_cmpint(ret, ==, -EIO); 214 } 215 216 static void test_sync_op_blk_pwrite_zeroes(BlockBackend *blk) 217 { 218 int ret; 219 220 /* Success */ 221 ret = blk_pwrite_zeroes(blk, 0, 512, 0); 222 g_assert_cmpint(ret, ==, 0); 223 224 /* Early error: Negative offset */ 225 ret = blk_pwrite_zeroes(blk, -2, 512, 0); 226 g_assert_cmpint(ret, ==, -EIO); 227 } 228 229 static void test_sync_op_load_vmstate(BdrvChild *c) 230 { 231 uint8_t buf[512]; 232 int ret; 233 234 /* Error: Driver does not support snapshots */ 235 ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf)); 236 g_assert_cmpint(ret, ==, -ENOTSUP); 237 } 238 239 static void test_sync_op_save_vmstate(BdrvChild *c) 240 { 241 uint8_t buf[512] = { 0 }; 242 int ret; 243 244 /* Error: Driver does not support snapshots */ 245 ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf)); 246 g_assert_cmpint(ret, ==, -ENOTSUP); 247 } 248 249 static void test_sync_op_pdiscard(BdrvChild *c) 250 { 251 int ret; 252 253 /* Normal success path */ 254 c->bs->open_flags |= BDRV_O_UNMAP; 255 ret = bdrv_pdiscard(c, 0, 512); 256 g_assert_cmpint(ret, ==, 0); 257 258 /* Early success: UNMAP not supported */ 259 c->bs->open_flags &= ~BDRV_O_UNMAP; 260 ret = bdrv_pdiscard(c, 0, 512); 261 g_assert_cmpint(ret, ==, 0); 262 263 /* Early error: Negative offset */ 264 ret = bdrv_pdiscard(c, -2, 512); 265 g_assert_cmpint(ret, ==, -EIO); 266 } 267 268 static void test_sync_op_blk_pdiscard(BlockBackend *blk) 269 { 270 int ret; 271 272 /* Early success: UNMAP not supported */ 273 ret = blk_pdiscard(blk, 0, 512); 274 g_assert_cmpint(ret, ==, 0); 275 276 /* Early error: Negative offset */ 277 ret = blk_pdiscard(blk, -2, 512); 278 g_assert_cmpint(ret, ==, -EIO); 279 } 280 281 static void test_sync_op_truncate(BdrvChild *c) 282 { 283 int ret; 284 285 /* Normal success path */ 286 ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL); 287 g_assert_cmpint(ret, ==, 0); 288 289 /* Early error: Negative offset */ 290 ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL); 291 g_assert_cmpint(ret, ==, -EINVAL); 292 293 /* Error: Read-only image */ 294 c->bs->open_flags &= ~BDRV_O_RDWR; 295 296 ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL); 297 g_assert_cmpint(ret, ==, -EACCES); 298 299 c->bs->open_flags |= BDRV_O_RDWR; 300 } 301 302 static void test_sync_op_blk_truncate(BlockBackend *blk) 303 { 304 int ret; 305 306 /* Normal success path */ 307 ret = blk_truncate(blk, 65536, false, PREALLOC_MODE_OFF, 0, NULL); 308 g_assert_cmpint(ret, ==, 0); 309 310 /* Early error: Negative offset */ 311 ret = blk_truncate(blk, -2, false, PREALLOC_MODE_OFF, 0, NULL); 312 g_assert_cmpint(ret, ==, -EINVAL); 313 } 314 315 /* Disable TSA to make bdrv_test.bdrv_co_block_status writable */ 316 static void TSA_NO_TSA test_sync_op_block_status(BdrvChild *c) 317 { 318 int ret; 319 int64_t n; 320 321 /* Normal success path */ 322 ret = bdrv_is_allocated(c->bs, 0, 65536, &n); 323 g_assert_cmpint(ret, ==, 0); 324 325 /* Early success: No driver support */ 326 bdrv_test.bdrv_co_block_status = NULL; 327 ret = bdrv_is_allocated(c->bs, 0, 65536, &n); 328 g_assert_cmpint(ret, ==, 1); 329 330 /* Early success: bytes = 0 */ 331 ret = bdrv_is_allocated(c->bs, 0, 0, &n); 332 g_assert_cmpint(ret, ==, 0); 333 334 /* Early success: Offset > image size*/ 335 ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n); 336 g_assert_cmpint(ret, ==, 0); 337 } 338 339 static void test_sync_op_flush(BdrvChild *c) 340 { 341 int ret; 342 343 /* Normal success path */ 344 ret = bdrv_flush(c->bs); 345 g_assert_cmpint(ret, ==, 0); 346 347 /* Early success: Read-only image */ 348 c->bs->open_flags &= ~BDRV_O_RDWR; 349 350 ret = bdrv_flush(c->bs); 351 g_assert_cmpint(ret, ==, 0); 352 353 c->bs->open_flags |= BDRV_O_RDWR; 354 } 355 356 static void test_sync_op_blk_flush(BlockBackend *blk) 357 { 358 BlockDriverState *bs = blk_bs(blk); 359 int ret; 360 361 /* Normal success path */ 362 ret = blk_flush(blk); 363 g_assert_cmpint(ret, ==, 0); 364 365 /* Early success: Read-only image */ 366 bs->open_flags &= ~BDRV_O_RDWR; 367 368 ret = blk_flush(blk); 369 g_assert_cmpint(ret, ==, 0); 370 371 bs->open_flags |= BDRV_O_RDWR; 372 } 373 374 static void test_sync_op_check(BdrvChild *c) 375 { 376 BdrvCheckResult result; 377 int ret; 378 379 /* Error: Driver does not implement check */ 380 ret = bdrv_check(c->bs, &result, 0); 381 g_assert_cmpint(ret, ==, -ENOTSUP); 382 } 383 384 static void test_sync_op_activate(BdrvChild *c) 385 { 386 GLOBAL_STATE_CODE(); 387 GRAPH_RDLOCK_GUARD_MAINLOOP(); 388 389 /* Early success: Image is not inactive */ 390 bdrv_activate(c->bs, NULL); 391 } 392 393 394 typedef struct SyncOpTest { 395 const char *name; 396 void (*fn)(BdrvChild *c); 397 void (*blkfn)(BlockBackend *blk); 398 } SyncOpTest; 399 400 const SyncOpTest sync_op_tests[] = { 401 { 402 .name = "/sync-op/pread", 403 .fn = test_sync_op_pread, 404 .blkfn = test_sync_op_blk_pread, 405 }, { 406 .name = "/sync-op/pwrite", 407 .fn = test_sync_op_pwrite, 408 .blkfn = test_sync_op_blk_pwrite, 409 }, { 410 .name = "/sync-op/preadv", 411 .fn = NULL, 412 .blkfn = test_sync_op_blk_preadv, 413 }, { 414 .name = "/sync-op/pwritev", 415 .fn = NULL, 416 .blkfn = test_sync_op_blk_pwritev, 417 }, { 418 .name = "/sync-op/preadv_part", 419 .fn = NULL, 420 .blkfn = test_sync_op_blk_preadv_part, 421 }, { 422 .name = "/sync-op/pwritev_part", 423 .fn = NULL, 424 .blkfn = test_sync_op_blk_pwritev_part, 425 }, { 426 .name = "/sync-op/pwrite_compressed", 427 .fn = NULL, 428 .blkfn = test_sync_op_blk_pwrite_compressed, 429 }, { 430 .name = "/sync-op/pwrite_zeroes", 431 .fn = NULL, 432 .blkfn = test_sync_op_blk_pwrite_zeroes, 433 }, { 434 .name = "/sync-op/load_vmstate", 435 .fn = test_sync_op_load_vmstate, 436 }, { 437 .name = "/sync-op/save_vmstate", 438 .fn = test_sync_op_save_vmstate, 439 }, { 440 .name = "/sync-op/pdiscard", 441 .fn = test_sync_op_pdiscard, 442 .blkfn = test_sync_op_blk_pdiscard, 443 }, { 444 .name = "/sync-op/truncate", 445 .fn = test_sync_op_truncate, 446 .blkfn = test_sync_op_blk_truncate, 447 }, { 448 .name = "/sync-op/block_status", 449 .fn = test_sync_op_block_status, 450 }, { 451 .name = "/sync-op/flush", 452 .fn = test_sync_op_flush, 453 .blkfn = test_sync_op_blk_flush, 454 }, { 455 .name = "/sync-op/check", 456 .fn = test_sync_op_check, 457 }, { 458 .name = "/sync-op/activate", 459 .fn = test_sync_op_activate, 460 }, 461 }; 462 463 /* Test synchronous operations that run in a different iothread, so we have to 464 * poll for the coroutine there to return. */ 465 static void test_sync_op(const void *opaque) 466 { 467 const SyncOpTest *t = opaque; 468 IOThread *iothread = iothread_new(); 469 AioContext *ctx = iothread_get_aio_context(iothread); 470 BlockBackend *blk; 471 BlockDriverState *bs; 472 BdrvChild *c; 473 474 GLOBAL_STATE_CODE(); 475 476 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 477 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); 478 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; 479 blk_insert_bs(blk, bs, &error_abort); 480 481 bdrv_graph_rdlock_main_loop(); 482 c = QLIST_FIRST(&bs->parents); 483 bdrv_graph_rdunlock_main_loop(); 484 485 blk_set_aio_context(blk, ctx, &error_abort); 486 aio_context_acquire(ctx); 487 if (t->fn) { 488 t->fn(c); 489 } 490 if (t->blkfn) { 491 t->blkfn(blk); 492 } 493 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); 494 aio_context_release(ctx); 495 496 bdrv_unref(bs); 497 blk_unref(blk); 498 } 499 500 typedef struct TestBlockJob { 501 BlockJob common; 502 bool should_complete; 503 int n; 504 } TestBlockJob; 505 506 static int test_job_prepare(Job *job) 507 { 508 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 509 return 0; 510 } 511 512 static int coroutine_fn test_job_run(Job *job, Error **errp) 513 { 514 TestBlockJob *s = container_of(job, TestBlockJob, common.job); 515 516 job_transition_to_ready(&s->common.job); 517 while (!s->should_complete) { 518 s->n++; 519 g_assert(qemu_get_current_aio_context() == job->aio_context); 520 521 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to 522 * emulate some actual activity (probably some I/O) here so that the 523 * drain involved in AioContext switches has to wait for this activity 524 * to stop. */ 525 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000); 526 527 job_pause_point(&s->common.job); 528 } 529 530 g_assert(qemu_get_current_aio_context() == job->aio_context); 531 return 0; 532 } 533 534 static void test_job_complete(Job *job, Error **errp) 535 { 536 TestBlockJob *s = container_of(job, TestBlockJob, common.job); 537 s->should_complete = true; 538 } 539 540 BlockJobDriver test_job_driver = { 541 .job_driver = { 542 .instance_size = sizeof(TestBlockJob), 543 .free = block_job_free, 544 .user_resume = block_job_user_resume, 545 .run = test_job_run, 546 .complete = test_job_complete, 547 .prepare = test_job_prepare, 548 }, 549 }; 550 551 static void test_attach_blockjob(void) 552 { 553 IOThread *iothread = iothread_new(); 554 AioContext *ctx = iothread_get_aio_context(iothread); 555 BlockBackend *blk; 556 BlockDriverState *bs; 557 TestBlockJob *tjob; 558 559 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); 560 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); 561 blk_insert_bs(blk, bs, &error_abort); 562 563 tjob = block_job_create("job0", &test_job_driver, NULL, bs, 564 0, BLK_PERM_ALL, 565 0, 0, NULL, NULL, &error_abort); 566 job_start(&tjob->common.job); 567 568 while (tjob->n == 0) { 569 aio_poll(qemu_get_aio_context(), false); 570 } 571 572 blk_set_aio_context(blk, ctx, &error_abort); 573 574 tjob->n = 0; 575 while (tjob->n == 0) { 576 aio_poll(qemu_get_aio_context(), false); 577 } 578 579 aio_context_acquire(ctx); 580 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); 581 aio_context_release(ctx); 582 583 tjob->n = 0; 584 while (tjob->n == 0) { 585 aio_poll(qemu_get_aio_context(), false); 586 } 587 588 blk_set_aio_context(blk, ctx, &error_abort); 589 590 tjob->n = 0; 591 while (tjob->n == 0) { 592 aio_poll(qemu_get_aio_context(), false); 593 } 594 595 WITH_JOB_LOCK_GUARD() { 596 job_complete_sync_locked(&tjob->common.job, &error_abort); 597 } 598 aio_context_acquire(ctx); 599 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); 600 aio_context_release(ctx); 601 602 bdrv_unref(bs); 603 blk_unref(blk); 604 } 605 606 /* 607 * Test that changing the AioContext for one node in a tree (here through blk) 608 * changes all other nodes as well: 609 * 610 * blk 611 * | 612 * | bs_verify [blkverify] 613 * | / \ 614 * | / \ 615 * bs_a [bdrv_test] bs_b [bdrv_test] 616 * 617 */ 618 static void test_propagate_basic(void) 619 { 620 IOThread *iothread = iothread_new(); 621 AioContext *ctx = iothread_get_aio_context(iothread); 622 AioContext *main_ctx; 623 BlockBackend *blk; 624 BlockDriverState *bs_a, *bs_b, *bs_verify; 625 QDict *options; 626 627 /* 628 * Create bs_a and its BlockBackend. We cannot take the RESIZE 629 * permission because blkverify will not share it on the test 630 * image. 631 */ 632 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE, 633 BLK_PERM_ALL); 634 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort); 635 blk_insert_bs(blk, bs_a, &error_abort); 636 637 /* Create bs_b */ 638 bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort); 639 640 /* Create blkverify filter that references both bs_a and bs_b */ 641 options = qdict_new(); 642 qdict_put_str(options, "driver", "blkverify"); 643 qdict_put_str(options, "test", "bs_a"); 644 qdict_put_str(options, "raw", "bs_b"); 645 646 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); 647 648 /* Switch the AioContext */ 649 blk_set_aio_context(blk, ctx, &error_abort); 650 g_assert(blk_get_aio_context(blk) == ctx); 651 g_assert(bdrv_get_aio_context(bs_a) == ctx); 652 g_assert(bdrv_get_aio_context(bs_verify) == ctx); 653 g_assert(bdrv_get_aio_context(bs_b) == ctx); 654 655 /* Switch the AioContext back */ 656 main_ctx = qemu_get_aio_context(); 657 aio_context_acquire(ctx); 658 blk_set_aio_context(blk, main_ctx, &error_abort); 659 aio_context_release(ctx); 660 g_assert(blk_get_aio_context(blk) == main_ctx); 661 g_assert(bdrv_get_aio_context(bs_a) == main_ctx); 662 g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); 663 g_assert(bdrv_get_aio_context(bs_b) == main_ctx); 664 665 bdrv_unref(bs_verify); 666 bdrv_unref(bs_b); 667 bdrv_unref(bs_a); 668 blk_unref(blk); 669 } 670 671 /* 672 * Test that diamonds in the graph don't lead to endless recursion: 673 * 674 * blk 675 * | 676 * bs_verify [blkverify] 677 * / \ 678 * / \ 679 * bs_b [raw] bs_c[raw] 680 * \ / 681 * \ / 682 * bs_a [bdrv_test] 683 */ 684 static void test_propagate_diamond(void) 685 { 686 IOThread *iothread = iothread_new(); 687 AioContext *ctx = iothread_get_aio_context(iothread); 688 AioContext *main_ctx; 689 BlockBackend *blk; 690 BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify; 691 QDict *options; 692 693 /* Create bs_a */ 694 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort); 695 696 /* Create bs_b and bc_c */ 697 options = qdict_new(); 698 qdict_put_str(options, "driver", "raw"); 699 qdict_put_str(options, "file", "bs_a"); 700 qdict_put_str(options, "node-name", "bs_b"); 701 bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); 702 703 options = qdict_new(); 704 qdict_put_str(options, "driver", "raw"); 705 qdict_put_str(options, "file", "bs_a"); 706 qdict_put_str(options, "node-name", "bs_c"); 707 bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); 708 709 /* Create blkverify filter that references both bs_b and bs_c */ 710 options = qdict_new(); 711 qdict_put_str(options, "driver", "blkverify"); 712 qdict_put_str(options, "test", "bs_b"); 713 qdict_put_str(options, "raw", "bs_c"); 714 715 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); 716 /* 717 * Do not take the RESIZE permission: This would require the same 718 * from bs_c and thus from bs_a; however, blkverify will not share 719 * it on bs_b, and thus it will not be available for bs_a. 720 */ 721 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE, 722 BLK_PERM_ALL); 723 blk_insert_bs(blk, bs_verify, &error_abort); 724 725 /* Switch the AioContext */ 726 blk_set_aio_context(blk, ctx, &error_abort); 727 g_assert(blk_get_aio_context(blk) == ctx); 728 g_assert(bdrv_get_aio_context(bs_verify) == ctx); 729 g_assert(bdrv_get_aio_context(bs_a) == ctx); 730 g_assert(bdrv_get_aio_context(bs_b) == ctx); 731 g_assert(bdrv_get_aio_context(bs_c) == ctx); 732 733 /* Switch the AioContext back */ 734 main_ctx = qemu_get_aio_context(); 735 aio_context_acquire(ctx); 736 blk_set_aio_context(blk, main_ctx, &error_abort); 737 aio_context_release(ctx); 738 g_assert(blk_get_aio_context(blk) == main_ctx); 739 g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); 740 g_assert(bdrv_get_aio_context(bs_a) == main_ctx); 741 g_assert(bdrv_get_aio_context(bs_b) == main_ctx); 742 g_assert(bdrv_get_aio_context(bs_c) == main_ctx); 743 744 blk_unref(blk); 745 bdrv_unref(bs_verify); 746 bdrv_unref(bs_c); 747 bdrv_unref(bs_b); 748 bdrv_unref(bs_a); 749 } 750 751 static void test_propagate_mirror(void) 752 { 753 IOThread *iothread = iothread_new(); 754 AioContext *ctx = iothread_get_aio_context(iothread); 755 AioContext *main_ctx = qemu_get_aio_context(); 756 BlockDriverState *src, *target, *filter; 757 BlockBackend *blk; 758 Job *job; 759 Error *local_err = NULL; 760 761 /* Create src and target*/ 762 src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort); 763 target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR, 764 &error_abort); 765 766 /* Start a mirror job */ 767 aio_context_acquire(main_ctx); 768 mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0, 769 MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false, 770 BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, 771 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, 772 &error_abort); 773 aio_context_release(main_ctx); 774 775 WITH_JOB_LOCK_GUARD() { 776 job = job_get_locked("job0"); 777 } 778 filter = bdrv_find_node("filter_node"); 779 780 /* Change the AioContext of src */ 781 bdrv_try_change_aio_context(src, ctx, NULL, &error_abort); 782 g_assert(bdrv_get_aio_context(src) == ctx); 783 g_assert(bdrv_get_aio_context(target) == ctx); 784 g_assert(bdrv_get_aio_context(filter) == ctx); 785 g_assert(job->aio_context == ctx); 786 787 /* Change the AioContext of target */ 788 aio_context_acquire(ctx); 789 bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); 790 aio_context_release(ctx); 791 g_assert(bdrv_get_aio_context(src) == main_ctx); 792 g_assert(bdrv_get_aio_context(target) == main_ctx); 793 g_assert(bdrv_get_aio_context(filter) == main_ctx); 794 795 /* With a BlockBackend on src, changing target must fail */ 796 blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 797 blk_insert_bs(blk, src, &error_abort); 798 799 bdrv_try_change_aio_context(target, ctx, NULL, &local_err); 800 error_free_or_abort(&local_err); 801 802 g_assert(blk_get_aio_context(blk) == main_ctx); 803 g_assert(bdrv_get_aio_context(src) == main_ctx); 804 g_assert(bdrv_get_aio_context(target) == main_ctx); 805 g_assert(bdrv_get_aio_context(filter) == main_ctx); 806 807 /* ...unless we explicitly allow it */ 808 aio_context_acquire(ctx); 809 blk_set_allow_aio_context_change(blk, true); 810 bdrv_try_change_aio_context(target, ctx, NULL, &error_abort); 811 aio_context_release(ctx); 812 813 g_assert(blk_get_aio_context(blk) == ctx); 814 g_assert(bdrv_get_aio_context(src) == ctx); 815 g_assert(bdrv_get_aio_context(target) == ctx); 816 g_assert(bdrv_get_aio_context(filter) == ctx); 817 818 job_cancel_sync_all(); 819 820 aio_context_acquire(ctx); 821 blk_set_aio_context(blk, main_ctx, &error_abort); 822 bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); 823 aio_context_release(ctx); 824 825 blk_unref(blk); 826 bdrv_unref(src); 827 bdrv_unref(target); 828 } 829 830 static void test_attach_second_node(void) 831 { 832 IOThread *iothread = iothread_new(); 833 AioContext *ctx = iothread_get_aio_context(iothread); 834 AioContext *main_ctx = qemu_get_aio_context(); 835 BlockBackend *blk; 836 BlockDriverState *bs, *filter; 837 QDict *options; 838 839 aio_context_acquire(main_ctx); 840 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); 841 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); 842 blk_insert_bs(blk, bs, &error_abort); 843 844 options = qdict_new(); 845 qdict_put_str(options, "driver", "raw"); 846 qdict_put_str(options, "file", "base"); 847 848 filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); 849 aio_context_release(main_ctx); 850 851 g_assert(blk_get_aio_context(blk) == ctx); 852 g_assert(bdrv_get_aio_context(bs) == ctx); 853 g_assert(bdrv_get_aio_context(filter) == ctx); 854 855 aio_context_acquire(ctx); 856 blk_set_aio_context(blk, main_ctx, &error_abort); 857 aio_context_release(ctx); 858 g_assert(blk_get_aio_context(blk) == main_ctx); 859 g_assert(bdrv_get_aio_context(bs) == main_ctx); 860 g_assert(bdrv_get_aio_context(filter) == main_ctx); 861 862 bdrv_unref(filter); 863 bdrv_unref(bs); 864 blk_unref(blk); 865 } 866 867 static void test_attach_preserve_blk_ctx(void) 868 { 869 IOThread *iothread = iothread_new(); 870 AioContext *ctx = iothread_get_aio_context(iothread); 871 AioContext *main_ctx = qemu_get_aio_context(); 872 BlockBackend *blk; 873 BlockDriverState *bs; 874 875 aio_context_acquire(main_ctx); 876 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); 877 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); 878 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; 879 880 /* Add node to BlockBackend that has an iothread context assigned */ 881 blk_insert_bs(blk, bs, &error_abort); 882 g_assert(blk_get_aio_context(blk) == ctx); 883 g_assert(bdrv_get_aio_context(bs) == ctx); 884 aio_context_release(main_ctx); 885 886 /* Remove the node again */ 887 aio_context_acquire(ctx); 888 blk_remove_bs(blk); 889 aio_context_release(ctx); 890 g_assert(blk_get_aio_context(blk) == ctx); 891 g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context()); 892 893 /* Re-attach the node */ 894 aio_context_acquire(main_ctx); 895 blk_insert_bs(blk, bs, &error_abort); 896 aio_context_release(main_ctx); 897 g_assert(blk_get_aio_context(blk) == ctx); 898 g_assert(bdrv_get_aio_context(bs) == ctx); 899 900 aio_context_acquire(ctx); 901 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); 902 aio_context_release(ctx); 903 bdrv_unref(bs); 904 blk_unref(blk); 905 } 906 907 int main(int argc, char **argv) 908 { 909 int i; 910 911 bdrv_init(); 912 qemu_init_main_loop(&error_abort); 913 914 g_test_init(&argc, &argv, NULL); 915 916 for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) { 917 const SyncOpTest *t = &sync_op_tests[i]; 918 g_test_add_data_func(t->name, t, test_sync_op); 919 } 920 921 g_test_add_func("/attach/blockjob", test_attach_blockjob); 922 g_test_add_func("/attach/second_node", test_attach_second_node); 923 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx); 924 g_test_add_func("/propagate/basic", test_propagate_basic); 925 g_test_add_func("/propagate/diamond", test_propagate_diamond); 926 g_test_add_func("/propagate/mirror", test_propagate_mirror); 927 928 return g_test_run(); 929 } 930