1 /*
2 * Block tests for iothreads
3 *
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/block_int-global-state.h"
28 #include "block/blockjob_int.h"
29 #include "sysemu/block-backend.h"
30 #include "qapi/error.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qemu/main-loop.h"
33 #include "iothread.h"
34
bdrv_test_co_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)35 static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
36 int64_t offset, int64_t bytes,
37 QEMUIOVector *qiov,
38 BdrvRequestFlags flags)
39 {
40 return 0;
41 }
42
bdrv_test_co_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)43 static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs,
44 int64_t offset, int64_t bytes,
45 QEMUIOVector *qiov,
46 BdrvRequestFlags flags)
47 {
48 return 0;
49 }
50
bdrv_test_co_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes)51 static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
52 int64_t offset, int64_t bytes)
53 {
54 return 0;
55 }
56
57 static int coroutine_fn
bdrv_test_co_truncate(BlockDriverState * bs,int64_t offset,bool exact,PreallocMode prealloc,BdrvRequestFlags flags,Error ** errp)58 bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
59 PreallocMode prealloc, BdrvRequestFlags flags,
60 Error **errp)
61 {
62 return 0;
63 }
64
bdrv_test_co_block_status(BlockDriverState * bs,bool want_zero,int64_t offset,int64_t count,int64_t * pnum,int64_t * map,BlockDriverState ** file)65 static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
66 bool want_zero,
67 int64_t offset, int64_t count,
68 int64_t *pnum, int64_t *map,
69 BlockDriverState **file)
70 {
71 *pnum = count;
72 return 0;
73 }
74
75 static BlockDriver bdrv_test = {
76 .format_name = "test",
77 .instance_size = 1,
78
79 .bdrv_co_preadv = bdrv_test_co_preadv,
80 .bdrv_co_pwritev = bdrv_test_co_pwritev,
81 .bdrv_co_pdiscard = bdrv_test_co_pdiscard,
82 .bdrv_co_truncate = bdrv_test_co_truncate,
83 .bdrv_co_block_status = bdrv_test_co_block_status,
84 };
85
test_sync_op_pread(BdrvChild * c)86 static void test_sync_op_pread(BdrvChild *c)
87 {
88 uint8_t buf[512];
89 int ret;
90
91 /* Success */
92 ret = bdrv_pread(c, 0, sizeof(buf), buf, 0);
93 g_assert_cmpint(ret, ==, 0);
94
95 /* Early error: Negative offset */
96 ret = bdrv_pread(c, -2, sizeof(buf), buf, 0);
97 g_assert_cmpint(ret, ==, -EIO);
98 }
99
test_sync_op_pwrite(BdrvChild * c)100 static void test_sync_op_pwrite(BdrvChild *c)
101 {
102 uint8_t buf[512] = { 0 };
103 int ret;
104
105 /* Success */
106 ret = bdrv_pwrite(c, 0, sizeof(buf), buf, 0);
107 g_assert_cmpint(ret, ==, 0);
108
109 /* Early error: Negative offset */
110 ret = bdrv_pwrite(c, -2, sizeof(buf), buf, 0);
111 g_assert_cmpint(ret, ==, -EIO);
112 }
113
test_sync_op_blk_pread(BlockBackend * blk)114 static void test_sync_op_blk_pread(BlockBackend *blk)
115 {
116 uint8_t buf[512];
117 int ret;
118
119 /* Success */
120 ret = blk_pread(blk, 0, sizeof(buf), buf, 0);
121 g_assert_cmpint(ret, ==, 0);
122
123 /* Early error: Negative offset */
124 ret = blk_pread(blk, -2, sizeof(buf), buf, 0);
125 g_assert_cmpint(ret, ==, -EIO);
126 }
127
test_sync_op_blk_pwrite(BlockBackend * blk)128 static void test_sync_op_blk_pwrite(BlockBackend *blk)
129 {
130 uint8_t buf[512] = { 0 };
131 int ret;
132
133 /* Success */
134 ret = blk_pwrite(blk, 0, sizeof(buf), buf, 0);
135 g_assert_cmpint(ret, ==, 0);
136
137 /* Early error: Negative offset */
138 ret = blk_pwrite(blk, -2, sizeof(buf), buf, 0);
139 g_assert_cmpint(ret, ==, -EIO);
140 }
141
test_sync_op_blk_preadv(BlockBackend * blk)142 static void test_sync_op_blk_preadv(BlockBackend *blk)
143 {
144 uint8_t buf[512];
145 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
146 int ret;
147
148 /* Success */
149 ret = blk_preadv(blk, 0, sizeof(buf), &qiov, 0);
150 g_assert_cmpint(ret, ==, 0);
151
152 /* Early error: Negative offset */
153 ret = blk_preadv(blk, -2, sizeof(buf), &qiov, 0);
154 g_assert_cmpint(ret, ==, -EIO);
155 }
156
test_sync_op_blk_pwritev(BlockBackend * blk)157 static void test_sync_op_blk_pwritev(BlockBackend *blk)
158 {
159 uint8_t buf[512] = { 0 };
160 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
161 int ret;
162
163 /* Success */
164 ret = blk_pwritev(blk, 0, sizeof(buf), &qiov, 0);
165 g_assert_cmpint(ret, ==, 0);
166
167 /* Early error: Negative offset */
168 ret = blk_pwritev(blk, -2, sizeof(buf), &qiov, 0);
169 g_assert_cmpint(ret, ==, -EIO);
170 }
171
test_sync_op_blk_preadv_part(BlockBackend * blk)172 static void test_sync_op_blk_preadv_part(BlockBackend *blk)
173 {
174 uint8_t buf[512];
175 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
176 int ret;
177
178 /* Success */
179 ret = blk_preadv_part(blk, 0, sizeof(buf), &qiov, 0, 0);
180 g_assert_cmpint(ret, ==, 0);
181
182 /* Early error: Negative offset */
183 ret = blk_preadv_part(blk, -2, sizeof(buf), &qiov, 0, 0);
184 g_assert_cmpint(ret, ==, -EIO);
185 }
186
test_sync_op_blk_pwritev_part(BlockBackend * blk)187 static void test_sync_op_blk_pwritev_part(BlockBackend *blk)
188 {
189 uint8_t buf[512] = { 0 };
190 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
191 int ret;
192
193 /* Success */
194 ret = blk_pwritev_part(blk, 0, sizeof(buf), &qiov, 0, 0);
195 g_assert_cmpint(ret, ==, 0);
196
197 /* Early error: Negative offset */
198 ret = blk_pwritev_part(blk, -2, sizeof(buf), &qiov, 0, 0);
199 g_assert_cmpint(ret, ==, -EIO);
200 }
201
test_sync_op_blk_pwrite_compressed(BlockBackend * blk)202 static void test_sync_op_blk_pwrite_compressed(BlockBackend *blk)
203 {
204 uint8_t buf[512] = { 0 };
205 int ret;
206
207 /* Late error: Not supported */
208 ret = blk_pwrite_compressed(blk, 0, sizeof(buf), buf);
209 g_assert_cmpint(ret, ==, -ENOTSUP);
210
211 /* Early error: Negative offset */
212 ret = blk_pwrite_compressed(blk, -2, sizeof(buf), buf);
213 g_assert_cmpint(ret, ==, -EIO);
214 }
215
test_sync_op_blk_pwrite_zeroes(BlockBackend * blk)216 static void test_sync_op_blk_pwrite_zeroes(BlockBackend *blk)
217 {
218 int ret;
219
220 /* Success */
221 ret = blk_pwrite_zeroes(blk, 0, 512, 0);
222 g_assert_cmpint(ret, ==, 0);
223
224 /* Early error: Negative offset */
225 ret = blk_pwrite_zeroes(blk, -2, 512, 0);
226 g_assert_cmpint(ret, ==, -EIO);
227 }
228
test_sync_op_load_vmstate(BdrvChild * c)229 static void test_sync_op_load_vmstate(BdrvChild *c)
230 {
231 uint8_t buf[512];
232 int ret;
233
234 /* Error: Driver does not support snapshots */
235 ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
236 g_assert_cmpint(ret, ==, -ENOTSUP);
237 }
238
test_sync_op_save_vmstate(BdrvChild * c)239 static void test_sync_op_save_vmstate(BdrvChild *c)
240 {
241 uint8_t buf[512] = { 0 };
242 int ret;
243
244 /* Error: Driver does not support snapshots */
245 ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
246 g_assert_cmpint(ret, ==, -ENOTSUP);
247 }
248
test_sync_op_pdiscard(BdrvChild * c)249 static void test_sync_op_pdiscard(BdrvChild *c)
250 {
251 int ret;
252
253 /* Normal success path */
254 c->bs->open_flags |= BDRV_O_UNMAP;
255 ret = bdrv_pdiscard(c, 0, 512);
256 g_assert_cmpint(ret, ==, 0);
257
258 /* Early success: UNMAP not supported */
259 c->bs->open_flags &= ~BDRV_O_UNMAP;
260 ret = bdrv_pdiscard(c, 0, 512);
261 g_assert_cmpint(ret, ==, 0);
262
263 /* Early error: Negative offset */
264 ret = bdrv_pdiscard(c, -2, 512);
265 g_assert_cmpint(ret, ==, -EIO);
266 }
267
test_sync_op_blk_pdiscard(BlockBackend * blk)268 static void test_sync_op_blk_pdiscard(BlockBackend *blk)
269 {
270 int ret;
271
272 /* Early success: UNMAP not supported */
273 ret = blk_pdiscard(blk, 0, 512);
274 g_assert_cmpint(ret, ==, 0);
275
276 /* Early error: Negative offset */
277 ret = blk_pdiscard(blk, -2, 512);
278 g_assert_cmpint(ret, ==, -EIO);
279 }
280
test_sync_op_truncate(BdrvChild * c)281 static void test_sync_op_truncate(BdrvChild *c)
282 {
283 int ret;
284
285 /* Normal success path */
286 ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
287 g_assert_cmpint(ret, ==, 0);
288
289 /* Early error: Negative offset */
290 ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
291 g_assert_cmpint(ret, ==, -EINVAL);
292
293 /* Error: Read-only image */
294 c->bs->open_flags &= ~BDRV_O_RDWR;
295
296 ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
297 g_assert_cmpint(ret, ==, -EACCES);
298
299 c->bs->open_flags |= BDRV_O_RDWR;
300 }
301
test_sync_op_blk_truncate(BlockBackend * blk)302 static void test_sync_op_blk_truncate(BlockBackend *blk)
303 {
304 int ret;
305
306 /* Normal success path */
307 ret = blk_truncate(blk, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
308 g_assert_cmpint(ret, ==, 0);
309
310 /* Early error: Negative offset */
311 ret = blk_truncate(blk, -2, false, PREALLOC_MODE_OFF, 0, NULL);
312 g_assert_cmpint(ret, ==, -EINVAL);
313 }
314
315 /* Disable TSA to make bdrv_test.bdrv_co_block_status writable */
test_sync_op_block_status(BdrvChild * c)316 static void TSA_NO_TSA test_sync_op_block_status(BdrvChild *c)
317 {
318 int ret;
319 int64_t n;
320
321 /* Normal success path */
322 ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
323 g_assert_cmpint(ret, ==, 0);
324
325 /* Early success: No driver support */
326 bdrv_test.bdrv_co_block_status = NULL;
327 ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
328 g_assert_cmpint(ret, ==, 1);
329
330 /* Early success: bytes = 0 */
331 ret = bdrv_is_allocated(c->bs, 0, 0, &n);
332 g_assert_cmpint(ret, ==, 0);
333
334 /* Early success: Offset > image size*/
335 ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
336 g_assert_cmpint(ret, ==, 0);
337 }
338
test_sync_op_flush(BdrvChild * c)339 static void test_sync_op_flush(BdrvChild *c)
340 {
341 int ret;
342
343 /* Normal success path */
344 ret = bdrv_flush(c->bs);
345 g_assert_cmpint(ret, ==, 0);
346
347 /* Early success: Read-only image */
348 c->bs->open_flags &= ~BDRV_O_RDWR;
349
350 ret = bdrv_flush(c->bs);
351 g_assert_cmpint(ret, ==, 0);
352
353 c->bs->open_flags |= BDRV_O_RDWR;
354 }
355
test_sync_op_blk_flush(BlockBackend * blk)356 static void test_sync_op_blk_flush(BlockBackend *blk)
357 {
358 BlockDriverState *bs = blk_bs(blk);
359 int ret;
360
361 /* Normal success path */
362 ret = blk_flush(blk);
363 g_assert_cmpint(ret, ==, 0);
364
365 /* Early success: Read-only image */
366 bs->open_flags &= ~BDRV_O_RDWR;
367
368 ret = blk_flush(blk);
369 g_assert_cmpint(ret, ==, 0);
370
371 bs->open_flags |= BDRV_O_RDWR;
372 }
373
test_sync_op_check(BdrvChild * c)374 static void test_sync_op_check(BdrvChild *c)
375 {
376 BdrvCheckResult result;
377 int ret;
378
379 /* Error: Driver does not implement check */
380 ret = bdrv_check(c->bs, &result, 0);
381 g_assert_cmpint(ret, ==, -ENOTSUP);
382 }
383
test_sync_op_activate(BdrvChild * c)384 static void test_sync_op_activate(BdrvChild *c)
385 {
386 GLOBAL_STATE_CODE();
387 GRAPH_RDLOCK_GUARD_MAINLOOP();
388
389 /* Early success: Image is not inactive */
390 bdrv_activate(c->bs, NULL);
391 }
392
393
394 typedef struct SyncOpTest {
395 const char *name;
396 void (*fn)(BdrvChild *c);
397 void (*blkfn)(BlockBackend *blk);
398 } SyncOpTest;
399
400 const SyncOpTest sync_op_tests[] = {
401 {
402 .name = "/sync-op/pread",
403 .fn = test_sync_op_pread,
404 .blkfn = test_sync_op_blk_pread,
405 }, {
406 .name = "/sync-op/pwrite",
407 .fn = test_sync_op_pwrite,
408 .blkfn = test_sync_op_blk_pwrite,
409 }, {
410 .name = "/sync-op/preadv",
411 .fn = NULL,
412 .blkfn = test_sync_op_blk_preadv,
413 }, {
414 .name = "/sync-op/pwritev",
415 .fn = NULL,
416 .blkfn = test_sync_op_blk_pwritev,
417 }, {
418 .name = "/sync-op/preadv_part",
419 .fn = NULL,
420 .blkfn = test_sync_op_blk_preadv_part,
421 }, {
422 .name = "/sync-op/pwritev_part",
423 .fn = NULL,
424 .blkfn = test_sync_op_blk_pwritev_part,
425 }, {
426 .name = "/sync-op/pwrite_compressed",
427 .fn = NULL,
428 .blkfn = test_sync_op_blk_pwrite_compressed,
429 }, {
430 .name = "/sync-op/pwrite_zeroes",
431 .fn = NULL,
432 .blkfn = test_sync_op_blk_pwrite_zeroes,
433 }, {
434 .name = "/sync-op/load_vmstate",
435 .fn = test_sync_op_load_vmstate,
436 }, {
437 .name = "/sync-op/save_vmstate",
438 .fn = test_sync_op_save_vmstate,
439 }, {
440 .name = "/sync-op/pdiscard",
441 .fn = test_sync_op_pdiscard,
442 .blkfn = test_sync_op_blk_pdiscard,
443 }, {
444 .name = "/sync-op/truncate",
445 .fn = test_sync_op_truncate,
446 .blkfn = test_sync_op_blk_truncate,
447 }, {
448 .name = "/sync-op/block_status",
449 .fn = test_sync_op_block_status,
450 }, {
451 .name = "/sync-op/flush",
452 .fn = test_sync_op_flush,
453 .blkfn = test_sync_op_blk_flush,
454 }, {
455 .name = "/sync-op/check",
456 .fn = test_sync_op_check,
457 }, {
458 .name = "/sync-op/activate",
459 .fn = test_sync_op_activate,
460 },
461 };
462
463 /* Test synchronous operations that run in a different iothread, so we have to
464 * poll for the coroutine there to return. */
test_sync_op(const void * opaque)465 static void test_sync_op(const void *opaque)
466 {
467 const SyncOpTest *t = opaque;
468 IOThread *iothread = iothread_new();
469 AioContext *ctx = iothread_get_aio_context(iothread);
470 BlockBackend *blk;
471 BlockDriverState *bs;
472 BdrvChild *c;
473
474 GLOBAL_STATE_CODE();
475
476 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
477 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
478 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
479 blk_insert_bs(blk, bs, &error_abort);
480
481 bdrv_graph_rdlock_main_loop();
482 c = QLIST_FIRST(&bs->parents);
483 bdrv_graph_rdunlock_main_loop();
484
485 blk_set_aio_context(blk, ctx, &error_abort);
486 if (t->fn) {
487 t->fn(c);
488 }
489 if (t->blkfn) {
490 t->blkfn(blk);
491 }
492 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
493
494 bdrv_unref(bs);
495 blk_unref(blk);
496 }
497
498 typedef struct TestBlockJob {
499 BlockJob common;
500 bool should_complete;
501 int n;
502 } TestBlockJob;
503
test_job_prepare(Job * job)504 static int test_job_prepare(Job *job)
505 {
506 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
507 return 0;
508 }
509
test_job_run(Job * job,Error ** errp)510 static int coroutine_fn test_job_run(Job *job, Error **errp)
511 {
512 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
513
514 job_transition_to_ready(&s->common.job);
515 while (!s->should_complete) {
516 s->n++;
517 g_assert(qemu_get_current_aio_context() == job->aio_context);
518
519 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
520 * emulate some actual activity (probably some I/O) here so that the
521 * drain involved in AioContext switches has to wait for this activity
522 * to stop. */
523 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
524
525 job_pause_point(&s->common.job);
526 }
527
528 g_assert(qemu_get_current_aio_context() == job->aio_context);
529 return 0;
530 }
531
test_job_complete(Job * job,Error ** errp)532 static void test_job_complete(Job *job, Error **errp)
533 {
534 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
535 s->should_complete = true;
536 }
537
538 BlockJobDriver test_job_driver = {
539 .job_driver = {
540 .instance_size = sizeof(TestBlockJob),
541 .free = block_job_free,
542 .user_resume = block_job_user_resume,
543 .run = test_job_run,
544 .complete = test_job_complete,
545 .prepare = test_job_prepare,
546 },
547 };
548
test_attach_blockjob(void)549 static void test_attach_blockjob(void)
550 {
551 IOThread *iothread = iothread_new();
552 AioContext *ctx = iothread_get_aio_context(iothread);
553 BlockBackend *blk;
554 BlockDriverState *bs;
555 TestBlockJob *tjob;
556
557 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
558 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
559 blk_insert_bs(blk, bs, &error_abort);
560
561 tjob = block_job_create("job0", &test_job_driver, NULL, bs,
562 0, BLK_PERM_ALL,
563 0, 0, NULL, NULL, &error_abort);
564 job_start(&tjob->common.job);
565
566 while (tjob->n == 0) {
567 aio_poll(qemu_get_aio_context(), false);
568 }
569
570 blk_set_aio_context(blk, ctx, &error_abort);
571
572 tjob->n = 0;
573 while (tjob->n == 0) {
574 aio_poll(qemu_get_aio_context(), false);
575 }
576
577 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
578
579 tjob->n = 0;
580 while (tjob->n == 0) {
581 aio_poll(qemu_get_aio_context(), false);
582 }
583
584 blk_set_aio_context(blk, ctx, &error_abort);
585
586 tjob->n = 0;
587 while (tjob->n == 0) {
588 aio_poll(qemu_get_aio_context(), false);
589 }
590
591 WITH_JOB_LOCK_GUARD() {
592 job_complete_sync_locked(&tjob->common.job, &error_abort);
593 }
594 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
595
596 bdrv_unref(bs);
597 blk_unref(blk);
598 }
599
600 /*
601 * Test that changing the AioContext for one node in a tree (here through blk)
602 * changes all other nodes as well:
603 *
604 * blk
605 * |
606 * | bs_verify [blkverify]
607 * | / \
608 * | / \
609 * bs_a [bdrv_test] bs_b [bdrv_test]
610 *
611 */
test_propagate_basic(void)612 static void test_propagate_basic(void)
613 {
614 IOThread *iothread = iothread_new();
615 AioContext *ctx = iothread_get_aio_context(iothread);
616 AioContext *main_ctx;
617 BlockBackend *blk;
618 BlockDriverState *bs_a, *bs_b, *bs_verify;
619 QDict *options;
620
621 /*
622 * Create bs_a and its BlockBackend. We cannot take the RESIZE
623 * permission because blkverify will not share it on the test
624 * image.
625 */
626 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
627 BLK_PERM_ALL);
628 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
629 blk_insert_bs(blk, bs_a, &error_abort);
630
631 /* Create bs_b */
632 bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
633
634 /* Create blkverify filter that references both bs_a and bs_b */
635 options = qdict_new();
636 qdict_put_str(options, "driver", "blkverify");
637 qdict_put_str(options, "test", "bs_a");
638 qdict_put_str(options, "raw", "bs_b");
639
640 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
641
642 /* Switch the AioContext */
643 blk_set_aio_context(blk, ctx, &error_abort);
644 g_assert(blk_get_aio_context(blk) == ctx);
645 g_assert(bdrv_get_aio_context(bs_a) == ctx);
646 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
647 g_assert(bdrv_get_aio_context(bs_b) == ctx);
648
649 /* Switch the AioContext back */
650 main_ctx = qemu_get_aio_context();
651 blk_set_aio_context(blk, main_ctx, &error_abort);
652 g_assert(blk_get_aio_context(blk) == main_ctx);
653 g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
654 g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
655 g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
656
657 bdrv_unref(bs_verify);
658 bdrv_unref(bs_b);
659 bdrv_unref(bs_a);
660 blk_unref(blk);
661 }
662
663 /*
664 * Test that diamonds in the graph don't lead to endless recursion:
665 *
666 * blk
667 * |
668 * bs_verify [blkverify]
669 * / \
670 * / \
671 * bs_b [raw] bs_c[raw]
672 * \ /
673 * \ /
674 * bs_a [bdrv_test]
675 */
test_propagate_diamond(void)676 static void test_propagate_diamond(void)
677 {
678 IOThread *iothread = iothread_new();
679 AioContext *ctx = iothread_get_aio_context(iothread);
680 AioContext *main_ctx;
681 BlockBackend *blk;
682 BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
683 QDict *options;
684
685 /* Create bs_a */
686 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
687
688 /* Create bs_b and bc_c */
689 options = qdict_new();
690 qdict_put_str(options, "driver", "raw");
691 qdict_put_str(options, "file", "bs_a");
692 qdict_put_str(options, "node-name", "bs_b");
693 bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
694
695 options = qdict_new();
696 qdict_put_str(options, "driver", "raw");
697 qdict_put_str(options, "file", "bs_a");
698 qdict_put_str(options, "node-name", "bs_c");
699 bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
700
701 /* Create blkverify filter that references both bs_b and bs_c */
702 options = qdict_new();
703 qdict_put_str(options, "driver", "blkverify");
704 qdict_put_str(options, "test", "bs_b");
705 qdict_put_str(options, "raw", "bs_c");
706
707 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
708 /*
709 * Do not take the RESIZE permission: This would require the same
710 * from bs_c and thus from bs_a; however, blkverify will not share
711 * it on bs_b, and thus it will not be available for bs_a.
712 */
713 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
714 BLK_PERM_ALL);
715 blk_insert_bs(blk, bs_verify, &error_abort);
716
717 /* Switch the AioContext */
718 blk_set_aio_context(blk, ctx, &error_abort);
719 g_assert(blk_get_aio_context(blk) == ctx);
720 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
721 g_assert(bdrv_get_aio_context(bs_a) == ctx);
722 g_assert(bdrv_get_aio_context(bs_b) == ctx);
723 g_assert(bdrv_get_aio_context(bs_c) == ctx);
724
725 /* Switch the AioContext back */
726 main_ctx = qemu_get_aio_context();
727 blk_set_aio_context(blk, main_ctx, &error_abort);
728 g_assert(blk_get_aio_context(blk) == main_ctx);
729 g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
730 g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
731 g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
732 g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
733
734 blk_unref(blk);
735 bdrv_unref(bs_verify);
736 bdrv_unref(bs_c);
737 bdrv_unref(bs_b);
738 bdrv_unref(bs_a);
739 }
740
test_propagate_mirror(void)741 static void test_propagate_mirror(void)
742 {
743 IOThread *iothread = iothread_new();
744 AioContext *ctx = iothread_get_aio_context(iothread);
745 AioContext *main_ctx = qemu_get_aio_context();
746 BlockDriverState *src, *target, *filter;
747 BlockBackend *blk;
748 Job *job;
749 Error *local_err = NULL;
750
751 /* Create src and target*/
752 src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
753 target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
754 &error_abort);
755
756 /* Start a mirror job */
757 mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
758 MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
759 BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
760 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
761 &error_abort);
762
763 WITH_JOB_LOCK_GUARD() {
764 job = job_get_locked("job0");
765 }
766 filter = bdrv_find_node("filter_node");
767
768 /* Change the AioContext of src */
769 bdrv_try_change_aio_context(src, ctx, NULL, &error_abort);
770 g_assert(bdrv_get_aio_context(src) == ctx);
771 g_assert(bdrv_get_aio_context(target) == ctx);
772 g_assert(bdrv_get_aio_context(filter) == ctx);
773 g_assert(job->aio_context == ctx);
774
775 /* Change the AioContext of target */
776 bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
777 g_assert(bdrv_get_aio_context(src) == main_ctx);
778 g_assert(bdrv_get_aio_context(target) == main_ctx);
779 g_assert(bdrv_get_aio_context(filter) == main_ctx);
780
781 /* With a BlockBackend on src, changing target must fail */
782 blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
783 blk_insert_bs(blk, src, &error_abort);
784
785 bdrv_try_change_aio_context(target, ctx, NULL, &local_err);
786 error_free_or_abort(&local_err);
787
788 g_assert(blk_get_aio_context(blk) == main_ctx);
789 g_assert(bdrv_get_aio_context(src) == main_ctx);
790 g_assert(bdrv_get_aio_context(target) == main_ctx);
791 g_assert(bdrv_get_aio_context(filter) == main_ctx);
792
793 /* ...unless we explicitly allow it */
794 blk_set_allow_aio_context_change(blk, true);
795 bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
796
797 g_assert(blk_get_aio_context(blk) == ctx);
798 g_assert(bdrv_get_aio_context(src) == ctx);
799 g_assert(bdrv_get_aio_context(target) == ctx);
800 g_assert(bdrv_get_aio_context(filter) == ctx);
801
802 job_cancel_sync_all();
803
804 blk_set_aio_context(blk, main_ctx, &error_abort);
805 bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
806
807 blk_unref(blk);
808 bdrv_unref(src);
809 bdrv_unref(target);
810 }
811
test_attach_second_node(void)812 static void test_attach_second_node(void)
813 {
814 IOThread *iothread = iothread_new();
815 AioContext *ctx = iothread_get_aio_context(iothread);
816 AioContext *main_ctx = qemu_get_aio_context();
817 BlockBackend *blk;
818 BlockDriverState *bs, *filter;
819 QDict *options;
820
821 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
822 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
823 blk_insert_bs(blk, bs, &error_abort);
824
825 options = qdict_new();
826 qdict_put_str(options, "driver", "raw");
827 qdict_put_str(options, "file", "base");
828
829 filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
830
831 g_assert(blk_get_aio_context(blk) == ctx);
832 g_assert(bdrv_get_aio_context(bs) == ctx);
833 g_assert(bdrv_get_aio_context(filter) == ctx);
834
835 blk_set_aio_context(blk, main_ctx, &error_abort);
836 g_assert(blk_get_aio_context(blk) == main_ctx);
837 g_assert(bdrv_get_aio_context(bs) == main_ctx);
838 g_assert(bdrv_get_aio_context(filter) == main_ctx);
839
840 bdrv_unref(filter);
841 bdrv_unref(bs);
842 blk_unref(blk);
843 }
844
test_attach_preserve_blk_ctx(void)845 static void test_attach_preserve_blk_ctx(void)
846 {
847 IOThread *iothread = iothread_new();
848 AioContext *ctx = iothread_get_aio_context(iothread);
849 BlockBackend *blk;
850 BlockDriverState *bs;
851
852 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
853 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
854 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
855
856 /* Add node to BlockBackend that has an iothread context assigned */
857 blk_insert_bs(blk, bs, &error_abort);
858 g_assert(blk_get_aio_context(blk) == ctx);
859 g_assert(bdrv_get_aio_context(bs) == ctx);
860
861 /* Remove the node again */
862 blk_remove_bs(blk);
863 g_assert(blk_get_aio_context(blk) == ctx);
864 g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
865
866 /* Re-attach the node */
867 blk_insert_bs(blk, bs, &error_abort);
868 g_assert(blk_get_aio_context(blk) == ctx);
869 g_assert(bdrv_get_aio_context(bs) == ctx);
870
871 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
872 bdrv_unref(bs);
873 blk_unref(blk);
874 }
875
main(int argc,char ** argv)876 int main(int argc, char **argv)
877 {
878 int i;
879
880 bdrv_init();
881 qemu_init_main_loop(&error_abort);
882
883 g_test_init(&argc, &argv, NULL);
884
885 for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
886 const SyncOpTest *t = &sync_op_tests[i];
887 g_test_add_data_func(t->name, t, test_sync_op);
888 }
889
890 g_test_add_func("/attach/blockjob", test_attach_blockjob);
891 g_test_add_func("/attach/second_node", test_attach_second_node);
892 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
893 g_test_add_func("/propagate/basic", test_propagate_basic);
894 g_test_add_func("/propagate/diamond", test_propagate_diamond);
895 g_test_add_func("/propagate/mirror", test_propagate_mirror);
896
897 return g_test_run();
898 }
899