1 /* 2 * Blockjob tests 3 * 4 * Copyright Igalia, S.L. 2016 5 * 6 * Authors: 7 * Alberto Garcia <berto@igalia.com> 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10 * See the COPYING.LIB file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "qemu/main-loop.h" 16 #include "block/blockjob_int.h" 17 #include "sysemu/block-backend.h" 18 #include "qapi/qmp/qdict.h" 19 #include "iothread.h" 20 21 static const BlockJobDriver test_block_job_driver = { 22 .job_driver = { 23 .instance_size = sizeof(BlockJob), 24 .free = block_job_free, 25 .user_resume = block_job_user_resume, 26 }, 27 }; 28 29 static void block_job_cb(void *opaque, int ret) 30 { 31 } 32 33 static BlockJob *mk_job(BlockBackend *blk, const char *id, 34 const BlockJobDriver *drv, bool should_succeed, 35 int flags) 36 { 37 BlockJob *job; 38 Error *err = NULL; 39 40 job = block_job_create(id, drv, NULL, blk_bs(blk), 41 0, BLK_PERM_ALL, 0, flags, block_job_cb, 42 NULL, &err); 43 if (should_succeed) { 44 g_assert_null(err); 45 g_assert_nonnull(job); 46 if (id) { 47 g_assert_cmpstr(job->job.id, ==, id); 48 } else { 49 g_assert_cmpstr(job->job.id, ==, blk_name(blk)); 50 } 51 } else { 52 error_free_or_abort(&err); 53 g_assert_null(job); 54 } 55 56 return job; 57 } 58 59 static BlockJob *do_test_id(BlockBackend *blk, const char *id, 60 bool should_succeed) 61 { 62 return mk_job(blk, id, &test_block_job_driver, 63 should_succeed, JOB_DEFAULT); 64 } 65 66 /* This creates a BlockBackend (optionally with a name) with a 67 * BlockDriverState inserted. */ 68 static BlockBackend *create_blk(const char *name) 69 { 70 /* No I/O is performed on this device */ 71 BlockBackend *blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 72 BlockDriverState *bs; 73 74 QDict *opt = qdict_new(); 75 qdict_put_str(opt, "file.read-zeroes", "on"); 76 bs = bdrv_open("null-co://", NULL, opt, 0, &error_abort); 77 g_assert_nonnull(bs); 78 79 blk_insert_bs(blk, bs, &error_abort); 80 bdrv_unref(bs); 81 82 if (name) { 83 Error *err = NULL; 84 monitor_add_blk(blk, name, &err); 85 g_assert_null(err); 86 } 87 88 return blk; 89 } 90 91 /* This destroys the backend */ 92 static void destroy_blk(BlockBackend *blk) 93 { 94 if (blk_name(blk)[0] != '\0') { 95 monitor_remove_blk(blk); 96 } 97 98 blk_remove_bs(blk); 99 blk_unref(blk); 100 } 101 102 static void test_job_ids(void) 103 { 104 BlockBackend *blk[3]; 105 BlockJob *job[3]; 106 107 blk[0] = create_blk(NULL); 108 blk[1] = create_blk("drive1"); 109 blk[2] = create_blk("drive2"); 110 111 /* No job ID provided and the block backend has no name */ 112 job[0] = do_test_id(blk[0], NULL, false); 113 114 /* These are all invalid job IDs */ 115 job[0] = do_test_id(blk[0], "0id", false); 116 job[0] = do_test_id(blk[0], "", false); 117 job[0] = do_test_id(blk[0], " ", false); 118 job[0] = do_test_id(blk[0], "123", false); 119 job[0] = do_test_id(blk[0], "_id", false); 120 job[0] = do_test_id(blk[0], "-id", false); 121 job[0] = do_test_id(blk[0], ".id", false); 122 job[0] = do_test_id(blk[0], "#id", false); 123 124 /* This one is valid */ 125 job[0] = do_test_id(blk[0], "id0", true); 126 127 /* We can have two jobs in the same BDS */ 128 job[1] = do_test_id(blk[0], "id1", true); 129 job_early_fail(&job[1]->job); 130 131 /* Duplicate job IDs are not allowed */ 132 job[1] = do_test_id(blk[1], "id0", false); 133 134 /* But once job[0] finishes we can reuse its ID */ 135 job_early_fail(&job[0]->job); 136 job[1] = do_test_id(blk[1], "id0", true); 137 138 /* No job ID specified, defaults to the backend name ('drive1') */ 139 job_early_fail(&job[1]->job); 140 job[1] = do_test_id(blk[1], NULL, true); 141 142 /* Duplicate job ID */ 143 job[2] = do_test_id(blk[2], "drive1", false); 144 145 /* The ID of job[2] would default to 'drive2' but it is already in use */ 146 job[0] = do_test_id(blk[0], "drive2", true); 147 job[2] = do_test_id(blk[2], NULL, false); 148 149 /* This one is valid */ 150 job[2] = do_test_id(blk[2], "id_2", true); 151 152 job_early_fail(&job[0]->job); 153 job_early_fail(&job[1]->job); 154 job_early_fail(&job[2]->job); 155 156 destroy_blk(blk[0]); 157 destroy_blk(blk[1]); 158 destroy_blk(blk[2]); 159 } 160 161 typedef struct CancelJob { 162 BlockJob common; 163 BlockBackend *blk; 164 bool should_converge; 165 bool should_complete; 166 } CancelJob; 167 168 static void cancel_job_complete(Job *job, Error **errp) 169 { 170 CancelJob *s = container_of(job, CancelJob, common.job); 171 s->should_complete = true; 172 } 173 174 static int coroutine_fn cancel_job_run(Job *job, Error **errp) 175 { 176 CancelJob *s = container_of(job, CancelJob, common.job); 177 178 while (!s->should_complete) { 179 if (job_is_cancelled(&s->common.job)) { 180 return 0; 181 } 182 183 if (!job_is_ready(&s->common.job) && s->should_converge) { 184 job_transition_to_ready(&s->common.job); 185 } 186 187 job_sleep_ns(&s->common.job, 100000); 188 } 189 190 return 0; 191 } 192 193 static const BlockJobDriver test_cancel_driver = { 194 .job_driver = { 195 .instance_size = sizeof(CancelJob), 196 .free = block_job_free, 197 .user_resume = block_job_user_resume, 198 .run = cancel_job_run, 199 .complete = cancel_job_complete, 200 }, 201 }; 202 203 static CancelJob *create_common(Job **pjob) 204 { 205 BlockBackend *blk; 206 Job *job; 207 BlockJob *bjob; 208 CancelJob *s; 209 210 blk = create_blk(NULL); 211 bjob = mk_job(blk, "Steve", &test_cancel_driver, true, 212 JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); 213 job = &bjob->job; 214 WITH_JOB_LOCK_GUARD() { 215 job_ref_locked(job); 216 assert(job->status == JOB_STATUS_CREATED); 217 } 218 219 s = container_of(bjob, CancelJob, common); 220 s->blk = blk; 221 222 *pjob = job; 223 return s; 224 } 225 226 static void cancel_common(CancelJob *s) 227 { 228 BlockJob *job = &s->common; 229 BlockBackend *blk = s->blk; 230 JobStatus sts = job->job.status; 231 AioContext *ctx; 232 233 ctx = job->job.aio_context; 234 aio_context_acquire(ctx); 235 236 job_cancel_sync(&job->job, true); 237 WITH_JOB_LOCK_GUARD() { 238 if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { 239 Job *dummy = &job->job; 240 job_dismiss_locked(&dummy, &error_abort); 241 } 242 assert(job->job.status == JOB_STATUS_NULL); 243 job_unref_locked(&job->job); 244 } 245 destroy_blk(blk); 246 247 aio_context_release(ctx); 248 } 249 250 static void test_cancel_created(void) 251 { 252 Job *job; 253 CancelJob *s; 254 255 s = create_common(&job); 256 cancel_common(s); 257 } 258 259 static void assert_job_status_is(Job *job, int status) 260 { 261 WITH_JOB_LOCK_GUARD() { 262 assert(job->status == status); 263 } 264 } 265 266 static void test_cancel_running(void) 267 { 268 Job *job; 269 CancelJob *s; 270 271 s = create_common(&job); 272 273 job_start(job); 274 assert_job_status_is(job, JOB_STATUS_RUNNING); 275 276 cancel_common(s); 277 } 278 279 static void test_cancel_paused(void) 280 { 281 Job *job; 282 CancelJob *s; 283 284 s = create_common(&job); 285 286 job_start(job); 287 WITH_JOB_LOCK_GUARD() { 288 assert(job->status == JOB_STATUS_RUNNING); 289 job_user_pause_locked(job, &error_abort); 290 } 291 job_enter(job); 292 assert_job_status_is(job, JOB_STATUS_PAUSED); 293 294 cancel_common(s); 295 } 296 297 static void test_cancel_ready(void) 298 { 299 Job *job; 300 CancelJob *s; 301 302 s = create_common(&job); 303 304 job_start(job); 305 assert_job_status_is(job, JOB_STATUS_RUNNING); 306 307 s->should_converge = true; 308 job_enter(job); 309 assert_job_status_is(job, JOB_STATUS_READY); 310 311 cancel_common(s); 312 } 313 314 static void test_cancel_standby(void) 315 { 316 Job *job; 317 CancelJob *s; 318 319 s = create_common(&job); 320 321 job_start(job); 322 assert_job_status_is(job, JOB_STATUS_RUNNING); 323 324 s->should_converge = true; 325 job_enter(job); 326 WITH_JOB_LOCK_GUARD() { 327 assert(job->status == JOB_STATUS_READY); 328 job_user_pause_locked(job, &error_abort); 329 } 330 job_enter(job); 331 assert_job_status_is(job, JOB_STATUS_STANDBY); 332 333 cancel_common(s); 334 } 335 336 static void test_cancel_pending(void) 337 { 338 Job *job; 339 CancelJob *s; 340 341 s = create_common(&job); 342 343 job_start(job); 344 assert_job_status_is(job, JOB_STATUS_RUNNING); 345 346 s->should_converge = true; 347 job_enter(job); 348 WITH_JOB_LOCK_GUARD() { 349 assert(job->status == JOB_STATUS_READY); 350 job_complete_locked(job, &error_abort); 351 } 352 job_enter(job); 353 while (!job->deferred_to_main_loop) { 354 aio_poll(qemu_get_aio_context(), true); 355 } 356 assert_job_status_is(job, JOB_STATUS_READY); 357 aio_poll(qemu_get_aio_context(), true); 358 assert_job_status_is(job, JOB_STATUS_PENDING); 359 360 cancel_common(s); 361 } 362 363 static void test_cancel_concluded(void) 364 { 365 Job *job; 366 CancelJob *s; 367 368 s = create_common(&job); 369 370 job_start(job); 371 assert_job_status_is(job, JOB_STATUS_RUNNING); 372 373 s->should_converge = true; 374 job_enter(job); 375 WITH_JOB_LOCK_GUARD() { 376 assert(job->status == JOB_STATUS_READY); 377 job_complete_locked(job, &error_abort); 378 } 379 job_enter(job); 380 while (!job->deferred_to_main_loop) { 381 aio_poll(qemu_get_aio_context(), true); 382 } 383 assert_job_status_is(job, JOB_STATUS_READY); 384 aio_poll(qemu_get_aio_context(), true); 385 assert_job_status_is(job, JOB_STATUS_PENDING); 386 387 aio_context_acquire(job->aio_context); 388 WITH_JOB_LOCK_GUARD() { 389 job_finalize_locked(job, &error_abort); 390 } 391 aio_context_release(job->aio_context); 392 assert_job_status_is(job, JOB_STATUS_CONCLUDED); 393 394 cancel_common(s); 395 } 396 397 /* (See test_yielding_driver for the job description) */ 398 typedef struct YieldingJob { 399 BlockJob common; 400 bool should_complete; 401 } YieldingJob; 402 403 static void yielding_job_complete(Job *job, Error **errp) 404 { 405 YieldingJob *s = container_of(job, YieldingJob, common.job); 406 s->should_complete = true; 407 job_enter(job); 408 } 409 410 static int coroutine_fn yielding_job_run(Job *job, Error **errp) 411 { 412 YieldingJob *s = container_of(job, YieldingJob, common.job); 413 414 job_transition_to_ready(job); 415 416 while (!s->should_complete) { 417 job_yield(job); 418 } 419 420 return 0; 421 } 422 423 /* 424 * This job transitions immediately to the READY state, and then 425 * yields until it is to complete. 426 */ 427 static const BlockJobDriver test_yielding_driver = { 428 .job_driver = { 429 .instance_size = sizeof(YieldingJob), 430 .free = block_job_free, 431 .user_resume = block_job_user_resume, 432 .run = yielding_job_run, 433 .complete = yielding_job_complete, 434 }, 435 }; 436 437 /* 438 * Test that job_complete() works even on jobs that are in a paused 439 * state (i.e., STANDBY). 440 * 441 * To do this, run YieldingJob in an IO thread, get it into the READY 442 * state, then have a drained section. Before ending the section, 443 * acquire the context so the job will not be entered and will thus 444 * remain on STANDBY. 445 * 446 * job_complete() should still work without error. 447 * 448 * Note that on the QMP interface, it is impossible to lock an IO 449 * thread before a drained section ends. In practice, the 450 * bdrv_drain_all_end() and the aio_context_acquire() will be 451 * reversed. However, that makes for worse reproducibility here: 452 * Sometimes, the job would no longer be in STANDBY then but already 453 * be started. We cannot prevent that, because the IO thread runs 454 * concurrently. We can only prevent it by taking the lock before 455 * ending the drained section, so we do that. 456 * 457 * (You can reverse the order of operations and most of the time the 458 * test will pass, but sometimes the assert(status == STANDBY) will 459 * fail.) 460 */ 461 static void test_complete_in_standby(void) 462 { 463 BlockBackend *blk; 464 IOThread *iothread; 465 AioContext *ctx; 466 Job *job; 467 BlockJob *bjob; 468 469 /* Create a test drive, move it to an IO thread */ 470 blk = create_blk(NULL); 471 iothread = iothread_new(); 472 473 ctx = iothread_get_aio_context(iothread); 474 blk_set_aio_context(blk, ctx, &error_abort); 475 476 /* Create our test job */ 477 bjob = mk_job(blk, "job", &test_yielding_driver, true, 478 JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); 479 job = &bjob->job; 480 assert_job_status_is(job, JOB_STATUS_CREATED); 481 482 /* Wait for the job to become READY */ 483 job_start(job); 484 aio_context_acquire(ctx); 485 /* 486 * Here we are waiting for the status to change, so don't bother 487 * protecting the read every time. 488 */ 489 AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); 490 aio_context_release(ctx); 491 492 /* Begin the drained section, pausing the job */ 493 bdrv_drain_all_begin(); 494 assert_job_status_is(job, JOB_STATUS_STANDBY); 495 496 /* Lock the IO thread to prevent the job from being run */ 497 aio_context_acquire(ctx); 498 /* This will schedule the job to resume it */ 499 bdrv_drain_all_end(); 500 501 WITH_JOB_LOCK_GUARD() { 502 /* But the job cannot run, so it will remain on standby */ 503 assert(job->status == JOB_STATUS_STANDBY); 504 505 /* Even though the job is on standby, this should work */ 506 job_complete_locked(job, &error_abort); 507 508 /* The test is done now, clean up. */ 509 job_finish_sync_locked(job, NULL, &error_abort); 510 assert(job->status == JOB_STATUS_PENDING); 511 512 job_finalize_locked(job, &error_abort); 513 assert(job->status == JOB_STATUS_CONCLUDED); 514 515 job_dismiss_locked(&job, &error_abort); 516 } 517 518 destroy_blk(blk); 519 aio_context_release(ctx); 520 iothread_join(iothread); 521 } 522 523 int main(int argc, char **argv) 524 { 525 qemu_init_main_loop(&error_abort); 526 bdrv_init(); 527 528 g_test_init(&argc, &argv, NULL); 529 g_test_add_func("/blockjob/ids", test_job_ids); 530 g_test_add_func("/blockjob/cancel/created", test_cancel_created); 531 g_test_add_func("/blockjob/cancel/running", test_cancel_running); 532 g_test_add_func("/blockjob/cancel/paused", test_cancel_paused); 533 g_test_add_func("/blockjob/cancel/ready", test_cancel_ready); 534 g_test_add_func("/blockjob/cancel/standby", test_cancel_standby); 535 g_test_add_func("/blockjob/cancel/pending", test_cancel_pending); 536 g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded); 537 g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby); 538 return g_test_run(); 539 } 540