1 /* 2 * Background jobs (long-running operations) 3 * 4 * Copyright (c) 2011 IBM Corp. 5 * Copyright (c) 2012, 2018 Red Hat, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qapi/error.h" 28 #include "qemu/job.h" 29 #include "qemu/id.h" 30 #include "qemu/main-loop.h" 31 #include "block/aio-wait.h" 32 #include "trace/trace-root.h" 33 #include "qapi/qapi-events-job.h" 34 35 static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); 36 37 /* Job State Transition Table */ 38 bool JobSTT[JOB_STATUS__MAX][JOB_STATUS__MAX] = { 39 /* U, C, R, P, Y, S, W, D, X, E, N */ 40 /* U: */ [JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41 /* C: */ [JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1}, 42 /* R: */ [JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0}, 43 /* P: */ [JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 44 /* Y: */ [JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0}, 45 /* S: */ [JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, 46 /* W: */ [JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0}, 47 /* D: */ [JOB_STATUS_PENDING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, 48 /* X: */ [JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, 49 /* E: */ [JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, 50 /* N: */ [JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51 }; 52 53 bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = { 54 /* U, C, R, P, Y, S, W, D, X, E, N */ 55 [JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0}, 56 [JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 57 [JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 58 [JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 59 [JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, 60 [JOB_VERB_FINALIZE] = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, 61 [JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, 62 }; 63 64 /* Transactional group of jobs */ 65 struct JobTxn { 66 67 /* Is this txn being cancelled? */ 68 bool aborting; 69 70 /* List of jobs */ 71 QLIST_HEAD(, Job) jobs; 72 73 /* Reference count */ 74 int refcnt; 75 }; 76 77 /* Right now, this mutex is only needed to synchronize accesses to job->busy 78 * and job->sleep_timer, such as concurrent calls to job_do_yield and 79 * job_enter. */ 80 static QemuMutex job_mutex; 81 82 static void job_lock(void) 83 { 84 qemu_mutex_lock(&job_mutex); 85 } 86 87 static void job_unlock(void) 88 { 89 qemu_mutex_unlock(&job_mutex); 90 } 91 92 static void __attribute__((__constructor__)) job_init(void) 93 { 94 qemu_mutex_init(&job_mutex); 95 } 96 97 JobTxn *job_txn_new(void) 98 { 99 JobTxn *txn = g_new0(JobTxn, 1); 100 QLIST_INIT(&txn->jobs); 101 txn->refcnt = 1; 102 return txn; 103 } 104 105 static void job_txn_ref(JobTxn *txn) 106 { 107 txn->refcnt++; 108 } 109 110 void job_txn_unref(JobTxn *txn) 111 { 112 if (txn && --txn->refcnt == 0) { 113 g_free(txn); 114 } 115 } 116 117 void job_txn_add_job(JobTxn *txn, Job *job) 118 { 119 if (!txn) { 120 return; 121 } 122 123 assert(!job->txn); 124 job->txn = txn; 125 126 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); 127 job_txn_ref(txn); 128 } 129 130 static void job_txn_del_job(Job *job) 131 { 132 if (job->txn) { 133 QLIST_REMOVE(job, txn_list); 134 job_txn_unref(job->txn); 135 job->txn = NULL; 136 } 137 } 138 139 static int job_txn_apply(Job *job, int fn(Job *)) 140 { 141 AioContext *inner_ctx; 142 Job *other_job, *next; 143 JobTxn *txn = job->txn; 144 int rc = 0; 145 146 /* 147 * Similar to job_completed_txn_abort, we take each job's lock before 148 * applying fn, but since we assume that outer_ctx is held by the caller, 149 * we need to release it here to avoid holding the lock twice - which would 150 * break AIO_WAIT_WHILE from within fn. 151 */ 152 job_ref(job); 153 aio_context_release(job->aio_context); 154 155 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { 156 inner_ctx = other_job->aio_context; 157 aio_context_acquire(inner_ctx); 158 rc = fn(other_job); 159 aio_context_release(inner_ctx); 160 if (rc) { 161 break; 162 } 163 } 164 165 /* 166 * Note that job->aio_context might have been changed by calling fn, so we 167 * can't use a local variable to cache it. 168 */ 169 aio_context_acquire(job->aio_context); 170 job_unref(job); 171 return rc; 172 } 173 174 bool job_is_internal(Job *job) 175 { 176 return (job->id == NULL); 177 } 178 179 static void job_state_transition(Job *job, JobStatus s1) 180 { 181 JobStatus s0 = job->status; 182 assert(s1 >= 0 && s1 < JOB_STATUS__MAX); 183 trace_job_state_transition(job, job->ret, 184 JobSTT[s0][s1] ? "allowed" : "disallowed", 185 JobStatus_str(s0), JobStatus_str(s1)); 186 assert(JobSTT[s0][s1]); 187 job->status = s1; 188 189 if (!job_is_internal(job) && s1 != s0) { 190 qapi_event_send_job_status_change(job->id, job->status); 191 } 192 } 193 194 int job_apply_verb(Job *job, JobVerb verb, Error **errp) 195 { 196 JobStatus s0 = job->status; 197 assert(verb >= 0 && verb < JOB_VERB__MAX); 198 trace_job_apply_verb(job, JobStatus_str(s0), JobVerb_str(verb), 199 JobVerbTable[verb][s0] ? "allowed" : "prohibited"); 200 if (JobVerbTable[verb][s0]) { 201 return 0; 202 } 203 error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'", 204 job->id, JobStatus_str(s0), JobVerb_str(verb)); 205 return -EPERM; 206 } 207 208 JobType job_type(const Job *job) 209 { 210 return job->driver->job_type; 211 } 212 213 const char *job_type_str(const Job *job) 214 { 215 return JobType_str(job_type(job)); 216 } 217 218 bool job_is_cancelled(Job *job) 219 { 220 return job->cancelled; 221 } 222 223 bool job_is_ready(Job *job) 224 { 225 switch (job->status) { 226 case JOB_STATUS_UNDEFINED: 227 case JOB_STATUS_CREATED: 228 case JOB_STATUS_RUNNING: 229 case JOB_STATUS_PAUSED: 230 case JOB_STATUS_WAITING: 231 case JOB_STATUS_PENDING: 232 case JOB_STATUS_ABORTING: 233 case JOB_STATUS_CONCLUDED: 234 case JOB_STATUS_NULL: 235 return false; 236 case JOB_STATUS_READY: 237 case JOB_STATUS_STANDBY: 238 return true; 239 default: 240 g_assert_not_reached(); 241 } 242 return false; 243 } 244 245 bool job_is_completed(Job *job) 246 { 247 switch (job->status) { 248 case JOB_STATUS_UNDEFINED: 249 case JOB_STATUS_CREATED: 250 case JOB_STATUS_RUNNING: 251 case JOB_STATUS_PAUSED: 252 case JOB_STATUS_READY: 253 case JOB_STATUS_STANDBY: 254 return false; 255 case JOB_STATUS_WAITING: 256 case JOB_STATUS_PENDING: 257 case JOB_STATUS_ABORTING: 258 case JOB_STATUS_CONCLUDED: 259 case JOB_STATUS_NULL: 260 return true; 261 default: 262 g_assert_not_reached(); 263 } 264 return false; 265 } 266 267 static bool job_started(Job *job) 268 { 269 return job->co; 270 } 271 272 static bool job_should_pause(Job *job) 273 { 274 return job->pause_count > 0; 275 } 276 277 Job *job_next(Job *job) 278 { 279 if (!job) { 280 return QLIST_FIRST(&jobs); 281 } 282 return QLIST_NEXT(job, job_list); 283 } 284 285 Job *job_get(const char *id) 286 { 287 Job *job; 288 289 QLIST_FOREACH(job, &jobs, job_list) { 290 if (job->id && !strcmp(id, job->id)) { 291 return job; 292 } 293 } 294 295 return NULL; 296 } 297 298 static void job_sleep_timer_cb(void *opaque) 299 { 300 Job *job = opaque; 301 302 job_enter(job); 303 } 304 305 void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, 306 AioContext *ctx, int flags, BlockCompletionFunc *cb, 307 void *opaque, Error **errp) 308 { 309 Job *job; 310 311 if (job_id) { 312 if (flags & JOB_INTERNAL) { 313 error_setg(errp, "Cannot specify job ID for internal job"); 314 return NULL; 315 } 316 if (!id_wellformed(job_id)) { 317 error_setg(errp, "Invalid job ID '%s'", job_id); 318 return NULL; 319 } 320 if (job_get(job_id)) { 321 error_setg(errp, "Job ID '%s' already in use", job_id); 322 return NULL; 323 } 324 } else if (!(flags & JOB_INTERNAL)) { 325 error_setg(errp, "An explicit job ID is required"); 326 return NULL; 327 } 328 329 job = g_malloc0(driver->instance_size); 330 job->driver = driver; 331 job->id = g_strdup(job_id); 332 job->refcnt = 1; 333 job->aio_context = ctx; 334 job->busy = false; 335 job->paused = true; 336 job->pause_count = 1; 337 job->auto_finalize = !(flags & JOB_MANUAL_FINALIZE); 338 job->auto_dismiss = !(flags & JOB_MANUAL_DISMISS); 339 job->cb = cb; 340 job->opaque = opaque; 341 342 notifier_list_init(&job->on_finalize_cancelled); 343 notifier_list_init(&job->on_finalize_completed); 344 notifier_list_init(&job->on_pending); 345 notifier_list_init(&job->on_ready); 346 347 job_state_transition(job, JOB_STATUS_CREATED); 348 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, 349 QEMU_CLOCK_REALTIME, SCALE_NS, 350 job_sleep_timer_cb, job); 351 352 QLIST_INSERT_HEAD(&jobs, job, job_list); 353 354 /* Single jobs are modeled as single-job transactions for sake of 355 * consolidating the job management logic */ 356 if (!txn) { 357 txn = job_txn_new(); 358 job_txn_add_job(txn, job); 359 job_txn_unref(txn); 360 } else { 361 job_txn_add_job(txn, job); 362 } 363 364 return job; 365 } 366 367 void job_ref(Job *job) 368 { 369 ++job->refcnt; 370 } 371 372 void job_unref(Job *job) 373 { 374 if (--job->refcnt == 0) { 375 assert(job->status == JOB_STATUS_NULL); 376 assert(!timer_pending(&job->sleep_timer)); 377 assert(!job->txn); 378 379 if (job->driver->free) { 380 job->driver->free(job); 381 } 382 383 QLIST_REMOVE(job, job_list); 384 385 error_free(job->err); 386 g_free(job->id); 387 g_free(job); 388 } 389 } 390 391 void job_progress_update(Job *job, uint64_t done) 392 { 393 progress_work_done(&job->progress, done); 394 } 395 396 void job_progress_set_remaining(Job *job, uint64_t remaining) 397 { 398 progress_set_remaining(&job->progress, remaining); 399 } 400 401 void job_progress_increase_remaining(Job *job, uint64_t delta) 402 { 403 progress_increase_remaining(&job->progress, delta); 404 } 405 406 void job_event_cancelled(Job *job) 407 { 408 notifier_list_notify(&job->on_finalize_cancelled, job); 409 } 410 411 void job_event_completed(Job *job) 412 { 413 notifier_list_notify(&job->on_finalize_completed, job); 414 } 415 416 static void job_event_pending(Job *job) 417 { 418 notifier_list_notify(&job->on_pending, job); 419 } 420 421 static void job_event_ready(Job *job) 422 { 423 notifier_list_notify(&job->on_ready, job); 424 } 425 426 static void job_event_idle(Job *job) 427 { 428 notifier_list_notify(&job->on_idle, job); 429 } 430 431 void job_enter_cond(Job *job, bool(*fn)(Job *job)) 432 { 433 if (!job_started(job)) { 434 return; 435 } 436 if (job->deferred_to_main_loop) { 437 return; 438 } 439 440 job_lock(); 441 if (job->busy) { 442 job_unlock(); 443 return; 444 } 445 446 if (fn && !fn(job)) { 447 job_unlock(); 448 return; 449 } 450 451 assert(!job->deferred_to_main_loop); 452 timer_del(&job->sleep_timer); 453 job->busy = true; 454 job_unlock(); 455 aio_co_enter(job->aio_context, job->co); 456 } 457 458 void job_enter(Job *job) 459 { 460 job_enter_cond(job, NULL); 461 } 462 463 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. 464 * Reentering the job coroutine with job_enter() before the timer has expired 465 * is allowed and cancels the timer. 466 * 467 * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be 468 * called explicitly. */ 469 static void coroutine_fn job_do_yield(Job *job, uint64_t ns) 470 { 471 job_lock(); 472 if (ns != -1) { 473 timer_mod(&job->sleep_timer, ns); 474 } 475 job->busy = false; 476 job_event_idle(job); 477 job_unlock(); 478 qemu_coroutine_yield(); 479 480 /* Set by job_enter_cond() before re-entering the coroutine. */ 481 assert(job->busy); 482 } 483 484 void coroutine_fn job_pause_point(Job *job) 485 { 486 assert(job && job_started(job)); 487 488 if (!job_should_pause(job)) { 489 return; 490 } 491 if (job_is_cancelled(job)) { 492 return; 493 } 494 495 if (job->driver->pause) { 496 job->driver->pause(job); 497 } 498 499 if (job_should_pause(job) && !job_is_cancelled(job)) { 500 JobStatus status = job->status; 501 job_state_transition(job, status == JOB_STATUS_READY 502 ? JOB_STATUS_STANDBY 503 : JOB_STATUS_PAUSED); 504 job->paused = true; 505 job_do_yield(job, -1); 506 job->paused = false; 507 job_state_transition(job, status); 508 } 509 510 if (job->driver->resume) { 511 job->driver->resume(job); 512 } 513 } 514 515 void job_yield(Job *job) 516 { 517 assert(job->busy); 518 519 /* Check cancellation *before* setting busy = false, too! */ 520 if (job_is_cancelled(job)) { 521 return; 522 } 523 524 if (!job_should_pause(job)) { 525 job_do_yield(job, -1); 526 } 527 528 job_pause_point(job); 529 } 530 531 void coroutine_fn job_sleep_ns(Job *job, int64_t ns) 532 { 533 assert(job->busy); 534 535 /* Check cancellation *before* setting busy = false, too! */ 536 if (job_is_cancelled(job)) { 537 return; 538 } 539 540 if (!job_should_pause(job)) { 541 job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); 542 } 543 544 job_pause_point(job); 545 } 546 547 /* Assumes the block_job_mutex is held */ 548 static bool job_timer_not_pending(Job *job) 549 { 550 return !timer_pending(&job->sleep_timer); 551 } 552 553 void job_pause(Job *job) 554 { 555 job->pause_count++; 556 if (!job->paused) { 557 job_enter(job); 558 } 559 } 560 561 void job_resume(Job *job) 562 { 563 assert(job->pause_count > 0); 564 job->pause_count--; 565 if (job->pause_count) { 566 return; 567 } 568 569 /* kick only if no timer is pending */ 570 job_enter_cond(job, job_timer_not_pending); 571 } 572 573 void job_user_pause(Job *job, Error **errp) 574 { 575 if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) { 576 return; 577 } 578 if (job->user_paused) { 579 error_setg(errp, "Job is already paused"); 580 return; 581 } 582 job->user_paused = true; 583 job_pause(job); 584 } 585 586 bool job_user_paused(Job *job) 587 { 588 return job->user_paused; 589 } 590 591 void job_user_resume(Job *job, Error **errp) 592 { 593 assert(job); 594 if (!job->user_paused || job->pause_count <= 0) { 595 error_setg(errp, "Can't resume a job that was not paused"); 596 return; 597 } 598 if (job_apply_verb(job, JOB_VERB_RESUME, errp)) { 599 return; 600 } 601 if (job->driver->user_resume) { 602 job->driver->user_resume(job); 603 } 604 job->user_paused = false; 605 job_resume(job); 606 } 607 608 static void job_do_dismiss(Job *job) 609 { 610 assert(job); 611 job->busy = false; 612 job->paused = false; 613 job->deferred_to_main_loop = true; 614 615 job_txn_del_job(job); 616 617 job_state_transition(job, JOB_STATUS_NULL); 618 job_unref(job); 619 } 620 621 void job_dismiss(Job **jobptr, Error **errp) 622 { 623 Job *job = *jobptr; 624 /* similarly to _complete, this is QMP-interface only. */ 625 assert(job->id); 626 if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) { 627 return; 628 } 629 630 job_do_dismiss(job); 631 *jobptr = NULL; 632 } 633 634 void job_early_fail(Job *job) 635 { 636 assert(job->status == JOB_STATUS_CREATED); 637 job_do_dismiss(job); 638 } 639 640 static void job_conclude(Job *job) 641 { 642 job_state_transition(job, JOB_STATUS_CONCLUDED); 643 if (job->auto_dismiss || !job_started(job)) { 644 job_do_dismiss(job); 645 } 646 } 647 648 static void job_update_rc(Job *job) 649 { 650 if (!job->ret && job_is_cancelled(job)) { 651 job->ret = -ECANCELED; 652 } 653 if (job->ret) { 654 if (!job->err) { 655 error_setg(&job->err, "%s", strerror(-job->ret)); 656 } 657 job_state_transition(job, JOB_STATUS_ABORTING); 658 } 659 } 660 661 static void job_commit(Job *job) 662 { 663 assert(!job->ret); 664 if (job->driver->commit) { 665 job->driver->commit(job); 666 } 667 } 668 669 static void job_abort(Job *job) 670 { 671 assert(job->ret); 672 if (job->driver->abort) { 673 job->driver->abort(job); 674 } 675 } 676 677 static void job_clean(Job *job) 678 { 679 if (job->driver->clean) { 680 job->driver->clean(job); 681 } 682 } 683 684 static int job_finalize_single(Job *job) 685 { 686 assert(job_is_completed(job)); 687 688 /* Ensure abort is called for late-transactional failures */ 689 job_update_rc(job); 690 691 if (!job->ret) { 692 job_commit(job); 693 } else { 694 job_abort(job); 695 } 696 job_clean(job); 697 698 if (job->cb) { 699 job->cb(job->opaque, job->ret); 700 } 701 702 /* Emit events only if we actually started */ 703 if (job_started(job)) { 704 if (job_is_cancelled(job)) { 705 job_event_cancelled(job); 706 } else { 707 job_event_completed(job); 708 } 709 } 710 711 job_txn_del_job(job); 712 job_conclude(job); 713 return 0; 714 } 715 716 static void job_cancel_async(Job *job, bool force) 717 { 718 if (job->user_paused) { 719 /* Do not call job_enter here, the caller will handle it. */ 720 if (job->driver->user_resume) { 721 job->driver->user_resume(job); 722 } 723 job->user_paused = false; 724 assert(job->pause_count > 0); 725 job->pause_count--; 726 } 727 job->cancelled = true; 728 /* To prevent 'force == false' overriding a previous 'force == true' */ 729 job->force_cancel |= force; 730 } 731 732 static void job_completed_txn_abort(Job *job) 733 { 734 AioContext *outer_ctx = job->aio_context; 735 AioContext *ctx; 736 JobTxn *txn = job->txn; 737 Job *other_job; 738 739 if (txn->aborting) { 740 /* 741 * We are cancelled by another job, which will handle everything. 742 */ 743 return; 744 } 745 txn->aborting = true; 746 job_txn_ref(txn); 747 748 /* We can only hold the single job's AioContext lock while calling 749 * job_finalize_single() because the finalization callbacks can involve 750 * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. */ 751 aio_context_release(outer_ctx); 752 753 /* Other jobs are effectively cancelled by us, set the status for 754 * them; this job, however, may or may not be cancelled, depending 755 * on the caller, so leave it. */ 756 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 757 if (other_job != job) { 758 ctx = other_job->aio_context; 759 aio_context_acquire(ctx); 760 job_cancel_async(other_job, false); 761 aio_context_release(ctx); 762 } 763 } 764 while (!QLIST_EMPTY(&txn->jobs)) { 765 other_job = QLIST_FIRST(&txn->jobs); 766 ctx = other_job->aio_context; 767 aio_context_acquire(ctx); 768 if (!job_is_completed(other_job)) { 769 assert(job_is_cancelled(other_job)); 770 job_finish_sync(other_job, NULL, NULL); 771 } 772 job_finalize_single(other_job); 773 aio_context_release(ctx); 774 } 775 776 aio_context_acquire(outer_ctx); 777 778 job_txn_unref(txn); 779 } 780 781 static int job_prepare(Job *job) 782 { 783 if (job->ret == 0 && job->driver->prepare) { 784 job->ret = job->driver->prepare(job); 785 job_update_rc(job); 786 } 787 return job->ret; 788 } 789 790 static int job_needs_finalize(Job *job) 791 { 792 return !job->auto_finalize; 793 } 794 795 static void job_do_finalize(Job *job) 796 { 797 int rc; 798 assert(job && job->txn); 799 800 /* prepare the transaction to complete */ 801 rc = job_txn_apply(job, job_prepare); 802 if (rc) { 803 job_completed_txn_abort(job); 804 } else { 805 job_txn_apply(job, job_finalize_single); 806 } 807 } 808 809 void job_finalize(Job *job, Error **errp) 810 { 811 assert(job && job->id); 812 if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) { 813 return; 814 } 815 job_do_finalize(job); 816 } 817 818 static int job_transition_to_pending(Job *job) 819 { 820 job_state_transition(job, JOB_STATUS_PENDING); 821 if (!job->auto_finalize) { 822 job_event_pending(job); 823 } 824 return 0; 825 } 826 827 void job_transition_to_ready(Job *job) 828 { 829 job_state_transition(job, JOB_STATUS_READY); 830 job_event_ready(job); 831 } 832 833 static void job_completed_txn_success(Job *job) 834 { 835 JobTxn *txn = job->txn; 836 Job *other_job; 837 838 job_state_transition(job, JOB_STATUS_WAITING); 839 840 /* 841 * Successful completion, see if there are other running jobs in this 842 * txn. 843 */ 844 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 845 if (!job_is_completed(other_job)) { 846 return; 847 } 848 assert(other_job->ret == 0); 849 } 850 851 job_txn_apply(job, job_transition_to_pending); 852 853 /* If no jobs need manual finalization, automatically do so */ 854 if (job_txn_apply(job, job_needs_finalize) == 0) { 855 job_do_finalize(job); 856 } 857 } 858 859 static void job_completed(Job *job) 860 { 861 assert(job && job->txn && !job_is_completed(job)); 862 863 job_update_rc(job); 864 trace_job_completed(job, job->ret); 865 if (job->ret) { 866 job_completed_txn_abort(job); 867 } else { 868 job_completed_txn_success(job); 869 } 870 } 871 872 /** Useful only as a type shim for aio_bh_schedule_oneshot. */ 873 static void job_exit(void *opaque) 874 { 875 Job *job = (Job *)opaque; 876 AioContext *ctx; 877 878 job_ref(job); 879 aio_context_acquire(job->aio_context); 880 881 /* This is a lie, we're not quiescent, but still doing the completion 882 * callbacks. However, completion callbacks tend to involve operations that 883 * drain block nodes, and if .drained_poll still returned true, we would 884 * deadlock. */ 885 job->busy = false; 886 job_event_idle(job); 887 888 job_completed(job); 889 890 /* 891 * Note that calling job_completed can move the job to a different 892 * aio_context, so we cannot cache from above. job_txn_apply takes care of 893 * acquiring the new lock, and we ref/unref to avoid job_completed freeing 894 * the job underneath us. 895 */ 896 ctx = job->aio_context; 897 job_unref(job); 898 aio_context_release(ctx); 899 } 900 901 /** 902 * All jobs must allow a pause point before entering their job proper. This 903 * ensures that jobs can be paused prior to being started, then resumed later. 904 */ 905 static void coroutine_fn job_co_entry(void *opaque) 906 { 907 Job *job = opaque; 908 909 assert(job && job->driver && job->driver->run); 910 job_pause_point(job); 911 job->ret = job->driver->run(job, &job->err); 912 job->deferred_to_main_loop = true; 913 job->busy = true; 914 aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job); 915 } 916 917 void job_start(Job *job) 918 { 919 assert(job && !job_started(job) && job->paused && 920 job->driver && job->driver->run); 921 job->co = qemu_coroutine_create(job_co_entry, job); 922 job->pause_count--; 923 job->busy = true; 924 job->paused = false; 925 job_state_transition(job, JOB_STATUS_RUNNING); 926 aio_co_enter(job->aio_context, job->co); 927 } 928 929 void job_cancel(Job *job, bool force) 930 { 931 if (job->status == JOB_STATUS_CONCLUDED) { 932 job_do_dismiss(job); 933 return; 934 } 935 job_cancel_async(job, force); 936 if (!job_started(job)) { 937 job_completed(job); 938 } else if (job->deferred_to_main_loop) { 939 job_completed_txn_abort(job); 940 } else { 941 job_enter(job); 942 } 943 } 944 945 void job_user_cancel(Job *job, bool force, Error **errp) 946 { 947 if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) { 948 return; 949 } 950 job_cancel(job, force); 951 } 952 953 /* A wrapper around job_cancel() taking an Error ** parameter so it may be 954 * used with job_finish_sync() without the need for (rather nasty) function 955 * pointer casts there. */ 956 static void job_cancel_err(Job *job, Error **errp) 957 { 958 job_cancel(job, false); 959 } 960 961 int job_cancel_sync(Job *job) 962 { 963 return job_finish_sync(job, &job_cancel_err, NULL); 964 } 965 966 void job_cancel_sync_all(void) 967 { 968 Job *job; 969 AioContext *aio_context; 970 971 while ((job = job_next(NULL))) { 972 aio_context = job->aio_context; 973 aio_context_acquire(aio_context); 974 job_cancel_sync(job); 975 aio_context_release(aio_context); 976 } 977 } 978 979 int job_complete_sync(Job *job, Error **errp) 980 { 981 return job_finish_sync(job, job_complete, errp); 982 } 983 984 void job_complete(Job *job, Error **errp) 985 { 986 /* Should not be reachable via external interface for internal jobs */ 987 assert(job->id); 988 if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) { 989 return; 990 } 991 if (job->pause_count || job_is_cancelled(job) || !job->driver->complete) { 992 error_setg(errp, "The active block job '%s' cannot be completed", 993 job->id); 994 return; 995 } 996 997 job->driver->complete(job, errp); 998 } 999 1000 int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) 1001 { 1002 Error *local_err = NULL; 1003 int ret; 1004 1005 job_ref(job); 1006 1007 if (finish) { 1008 finish(job, &local_err); 1009 } 1010 if (local_err) { 1011 error_propagate(errp, local_err); 1012 job_unref(job); 1013 return -EBUSY; 1014 } 1015 1016 AIO_WAIT_WHILE(job->aio_context, 1017 (job_enter(job), !job_is_completed(job))); 1018 1019 ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret; 1020 job_unref(job); 1021 return ret; 1022 } 1023