1 /* 2 * Background jobs (long-running operations) 3 * 4 * Copyright (c) 2011 IBM Corp. 5 * Copyright (c) 2012, 2018 Red Hat, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qemu-common.h" 28 #include "qapi/error.h" 29 #include "qemu/job.h" 30 #include "qemu/id.h" 31 #include "qemu/main-loop.h" 32 #include "trace-root.h" 33 #include "qapi/qapi-events-job.h" 34 35 static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); 36 37 /* Job State Transition Table */ 38 bool JobSTT[JOB_STATUS__MAX][JOB_STATUS__MAX] = { 39 /* U, C, R, P, Y, S, W, D, X, E, N */ 40 /* U: */ [JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41 /* C: */ [JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1}, 42 /* R: */ [JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0}, 43 /* P: */ [JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 44 /* Y: */ [JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0}, 45 /* S: */ [JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, 46 /* W: */ [JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0}, 47 /* D: */ [JOB_STATUS_PENDING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, 48 /* X: */ [JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, 49 /* E: */ [JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, 50 /* N: */ [JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51 }; 52 53 bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = { 54 /* U, C, R, P, Y, S, W, D, X, E, N */ 55 [JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0}, 56 [JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 57 [JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 58 [JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 59 [JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, 60 [JOB_VERB_FINALIZE] = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, 61 [JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, 62 }; 63 64 /* Transactional group of jobs */ 65 struct JobTxn { 66 67 /* Is this txn being cancelled? */ 68 bool aborting; 69 70 /* List of jobs */ 71 QLIST_HEAD(, Job) jobs; 72 73 /* Reference count */ 74 int refcnt; 75 }; 76 77 /* Right now, this mutex is only needed to synchronize accesses to job->busy 78 * and job->sleep_timer, such as concurrent calls to job_do_yield and 79 * job_enter. */ 80 static QemuMutex job_mutex; 81 82 static void job_lock(void) 83 { 84 qemu_mutex_lock(&job_mutex); 85 } 86 87 static void job_unlock(void) 88 { 89 qemu_mutex_unlock(&job_mutex); 90 } 91 92 static void __attribute__((__constructor__)) job_init(void) 93 { 94 qemu_mutex_init(&job_mutex); 95 } 96 97 JobTxn *job_txn_new(void) 98 { 99 JobTxn *txn = g_new0(JobTxn, 1); 100 QLIST_INIT(&txn->jobs); 101 txn->refcnt = 1; 102 return txn; 103 } 104 105 static void job_txn_ref(JobTxn *txn) 106 { 107 txn->refcnt++; 108 } 109 110 void job_txn_unref(JobTxn *txn) 111 { 112 if (txn && --txn->refcnt == 0) { 113 g_free(txn); 114 } 115 } 116 117 void job_txn_add_job(JobTxn *txn, Job *job) 118 { 119 if (!txn) { 120 return; 121 } 122 123 assert(!job->txn); 124 job->txn = txn; 125 126 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); 127 job_txn_ref(txn); 128 } 129 130 static void job_txn_del_job(Job *job) 131 { 132 if (job->txn) { 133 QLIST_REMOVE(job, txn_list); 134 job_txn_unref(job->txn); 135 job->txn = NULL; 136 } 137 } 138 139 static int job_txn_apply(JobTxn *txn, int fn(Job *), bool lock) 140 { 141 AioContext *ctx; 142 Job *job, *next; 143 int rc = 0; 144 145 QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) { 146 if (lock) { 147 ctx = job->aio_context; 148 aio_context_acquire(ctx); 149 } 150 rc = fn(job); 151 if (lock) { 152 aio_context_release(ctx); 153 } 154 if (rc) { 155 break; 156 } 157 } 158 return rc; 159 } 160 161 bool job_is_internal(Job *job) 162 { 163 return (job->id == NULL); 164 } 165 166 static void job_state_transition(Job *job, JobStatus s1) 167 { 168 JobStatus s0 = job->status; 169 assert(s1 >= 0 && s1 <= JOB_STATUS__MAX); 170 trace_job_state_transition(job, job->ret, 171 JobSTT[s0][s1] ? "allowed" : "disallowed", 172 JobStatus_str(s0), JobStatus_str(s1)); 173 assert(JobSTT[s0][s1]); 174 job->status = s1; 175 176 if (!job_is_internal(job) && s1 != s0) { 177 qapi_event_send_job_status_change(job->id, job->status, &error_abort); 178 } 179 } 180 181 int job_apply_verb(Job *job, JobVerb verb, Error **errp) 182 { 183 JobStatus s0 = job->status; 184 assert(verb >= 0 && verb <= JOB_VERB__MAX); 185 trace_job_apply_verb(job, JobStatus_str(s0), JobVerb_str(verb), 186 JobVerbTable[verb][s0] ? "allowed" : "prohibited"); 187 if (JobVerbTable[verb][s0]) { 188 return 0; 189 } 190 error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'", 191 job->id, JobStatus_str(s0), JobVerb_str(verb)); 192 return -EPERM; 193 } 194 195 JobType job_type(const Job *job) 196 { 197 return job->driver->job_type; 198 } 199 200 const char *job_type_str(const Job *job) 201 { 202 return JobType_str(job_type(job)); 203 } 204 205 bool job_is_cancelled(Job *job) 206 { 207 return job->cancelled; 208 } 209 210 bool job_is_ready(Job *job) 211 { 212 switch (job->status) { 213 case JOB_STATUS_UNDEFINED: 214 case JOB_STATUS_CREATED: 215 case JOB_STATUS_RUNNING: 216 case JOB_STATUS_PAUSED: 217 case JOB_STATUS_WAITING: 218 case JOB_STATUS_PENDING: 219 case JOB_STATUS_ABORTING: 220 case JOB_STATUS_CONCLUDED: 221 case JOB_STATUS_NULL: 222 return false; 223 case JOB_STATUS_READY: 224 case JOB_STATUS_STANDBY: 225 return true; 226 default: 227 g_assert_not_reached(); 228 } 229 return false; 230 } 231 232 bool job_is_completed(Job *job) 233 { 234 switch (job->status) { 235 case JOB_STATUS_UNDEFINED: 236 case JOB_STATUS_CREATED: 237 case JOB_STATUS_RUNNING: 238 case JOB_STATUS_PAUSED: 239 case JOB_STATUS_READY: 240 case JOB_STATUS_STANDBY: 241 return false; 242 case JOB_STATUS_WAITING: 243 case JOB_STATUS_PENDING: 244 case JOB_STATUS_ABORTING: 245 case JOB_STATUS_CONCLUDED: 246 case JOB_STATUS_NULL: 247 return true; 248 default: 249 g_assert_not_reached(); 250 } 251 return false; 252 } 253 254 static bool job_started(Job *job) 255 { 256 return job->co; 257 } 258 259 static bool job_should_pause(Job *job) 260 { 261 return job->pause_count > 0; 262 } 263 264 Job *job_next(Job *job) 265 { 266 if (!job) { 267 return QLIST_FIRST(&jobs); 268 } 269 return QLIST_NEXT(job, job_list); 270 } 271 272 Job *job_get(const char *id) 273 { 274 Job *job; 275 276 QLIST_FOREACH(job, &jobs, job_list) { 277 if (job->id && !strcmp(id, job->id)) { 278 return job; 279 } 280 } 281 282 return NULL; 283 } 284 285 static void job_sleep_timer_cb(void *opaque) 286 { 287 Job *job = opaque; 288 289 job_enter(job); 290 } 291 292 void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, 293 AioContext *ctx, int flags, BlockCompletionFunc *cb, 294 void *opaque, Error **errp) 295 { 296 Job *job; 297 298 if (job_id) { 299 if (flags & JOB_INTERNAL) { 300 error_setg(errp, "Cannot specify job ID for internal job"); 301 return NULL; 302 } 303 if (!id_wellformed(job_id)) { 304 error_setg(errp, "Invalid job ID '%s'", job_id); 305 return NULL; 306 } 307 if (job_get(job_id)) { 308 error_setg(errp, "Job ID '%s' already in use", job_id); 309 return NULL; 310 } 311 } else if (!(flags & JOB_INTERNAL)) { 312 error_setg(errp, "An explicit job ID is required"); 313 return NULL; 314 } 315 316 job = g_malloc0(driver->instance_size); 317 job->driver = driver; 318 job->id = g_strdup(job_id); 319 job->refcnt = 1; 320 job->aio_context = ctx; 321 job->busy = false; 322 job->paused = true; 323 job->pause_count = 1; 324 job->auto_finalize = !(flags & JOB_MANUAL_FINALIZE); 325 job->auto_dismiss = !(flags & JOB_MANUAL_DISMISS); 326 job->cb = cb; 327 job->opaque = opaque; 328 329 notifier_list_init(&job->on_finalize_cancelled); 330 notifier_list_init(&job->on_finalize_completed); 331 notifier_list_init(&job->on_pending); 332 notifier_list_init(&job->on_ready); 333 334 job_state_transition(job, JOB_STATUS_CREATED); 335 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, 336 QEMU_CLOCK_REALTIME, SCALE_NS, 337 job_sleep_timer_cb, job); 338 339 QLIST_INSERT_HEAD(&jobs, job, job_list); 340 341 /* Single jobs are modeled as single-job transactions for sake of 342 * consolidating the job management logic */ 343 if (!txn) { 344 txn = job_txn_new(); 345 job_txn_add_job(txn, job); 346 job_txn_unref(txn); 347 } else { 348 job_txn_add_job(txn, job); 349 } 350 351 return job; 352 } 353 354 void job_ref(Job *job) 355 { 356 ++job->refcnt; 357 } 358 359 void job_unref(Job *job) 360 { 361 if (--job->refcnt == 0) { 362 assert(job->status == JOB_STATUS_NULL); 363 assert(!timer_pending(&job->sleep_timer)); 364 assert(!job->txn); 365 366 if (job->driver->free) { 367 job->driver->free(job); 368 } 369 370 QLIST_REMOVE(job, job_list); 371 372 g_free(job->id); 373 g_free(job); 374 } 375 } 376 377 void job_progress_update(Job *job, uint64_t done) 378 { 379 job->progress_current += done; 380 } 381 382 void job_progress_set_remaining(Job *job, uint64_t remaining) 383 { 384 job->progress_total = job->progress_current + remaining; 385 } 386 387 void job_event_cancelled(Job *job) 388 { 389 notifier_list_notify(&job->on_finalize_cancelled, job); 390 } 391 392 void job_event_completed(Job *job) 393 { 394 notifier_list_notify(&job->on_finalize_completed, job); 395 } 396 397 static void job_event_pending(Job *job) 398 { 399 notifier_list_notify(&job->on_pending, job); 400 } 401 402 static void job_event_ready(Job *job) 403 { 404 notifier_list_notify(&job->on_ready, job); 405 } 406 407 void job_enter_cond(Job *job, bool(*fn)(Job *job)) 408 { 409 if (!job_started(job)) { 410 return; 411 } 412 if (job->deferred_to_main_loop) { 413 return; 414 } 415 416 job_lock(); 417 if (job->busy) { 418 job_unlock(); 419 return; 420 } 421 422 if (fn && !fn(job)) { 423 job_unlock(); 424 return; 425 } 426 427 assert(!job->deferred_to_main_loop); 428 timer_del(&job->sleep_timer); 429 job->busy = true; 430 job_unlock(); 431 aio_co_wake(job->co); 432 } 433 434 void job_enter(Job *job) 435 { 436 job_enter_cond(job, NULL); 437 } 438 439 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. 440 * Reentering the job coroutine with job_enter() before the timer has expired 441 * is allowed and cancels the timer. 442 * 443 * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be 444 * called explicitly. */ 445 static void coroutine_fn job_do_yield(Job *job, uint64_t ns) 446 { 447 job_lock(); 448 if (ns != -1) { 449 timer_mod(&job->sleep_timer, ns); 450 } 451 job->busy = false; 452 job_unlock(); 453 qemu_coroutine_yield(); 454 455 /* Set by job_enter_cond() before re-entering the coroutine. */ 456 assert(job->busy); 457 } 458 459 void coroutine_fn job_pause_point(Job *job) 460 { 461 assert(job && job_started(job)); 462 463 if (!job_should_pause(job)) { 464 return; 465 } 466 if (job_is_cancelled(job)) { 467 return; 468 } 469 470 if (job->driver->pause) { 471 job->driver->pause(job); 472 } 473 474 if (job_should_pause(job) && !job_is_cancelled(job)) { 475 JobStatus status = job->status; 476 job_state_transition(job, status == JOB_STATUS_READY 477 ? JOB_STATUS_STANDBY 478 : JOB_STATUS_PAUSED); 479 job->paused = true; 480 job_do_yield(job, -1); 481 job->paused = false; 482 job_state_transition(job, status); 483 } 484 485 if (job->driver->resume) { 486 job->driver->resume(job); 487 } 488 } 489 490 void job_yield(Job *job) 491 { 492 assert(job->busy); 493 494 /* Check cancellation *before* setting busy = false, too! */ 495 if (job_is_cancelled(job)) { 496 return; 497 } 498 499 if (!job_should_pause(job)) { 500 job_do_yield(job, -1); 501 } 502 503 job_pause_point(job); 504 } 505 506 void coroutine_fn job_sleep_ns(Job *job, int64_t ns) 507 { 508 assert(job->busy); 509 510 /* Check cancellation *before* setting busy = false, too! */ 511 if (job_is_cancelled(job)) { 512 return; 513 } 514 515 if (!job_should_pause(job)) { 516 job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); 517 } 518 519 job_pause_point(job); 520 } 521 522 void job_drain(Job *job) 523 { 524 /* If job is !busy this kicks it into the next pause point. */ 525 job_enter(job); 526 527 if (job->driver->drain) { 528 job->driver->drain(job); 529 } 530 } 531 532 533 /** 534 * All jobs must allow a pause point before entering their job proper. This 535 * ensures that jobs can be paused prior to being started, then resumed later. 536 */ 537 static void coroutine_fn job_co_entry(void *opaque) 538 { 539 Job *job = opaque; 540 541 assert(job && job->driver && job->driver->start); 542 job_pause_point(job); 543 job->driver->start(job); 544 } 545 546 547 void job_start(Job *job) 548 { 549 assert(job && !job_started(job) && job->paused && 550 job->driver && job->driver->start); 551 job->co = qemu_coroutine_create(job_co_entry, job); 552 job->pause_count--; 553 job->busy = true; 554 job->paused = false; 555 job_state_transition(job, JOB_STATUS_RUNNING); 556 aio_co_enter(job->aio_context, job->co); 557 } 558 559 /* Assumes the block_job_mutex is held */ 560 static bool job_timer_not_pending(Job *job) 561 { 562 return !timer_pending(&job->sleep_timer); 563 } 564 565 void job_pause(Job *job) 566 { 567 job->pause_count++; 568 } 569 570 void job_resume(Job *job) 571 { 572 assert(job->pause_count > 0); 573 job->pause_count--; 574 if (job->pause_count) { 575 return; 576 } 577 578 /* kick only if no timer is pending */ 579 job_enter_cond(job, job_timer_not_pending); 580 } 581 582 void job_user_pause(Job *job, Error **errp) 583 { 584 if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) { 585 return; 586 } 587 if (job->user_paused) { 588 error_setg(errp, "Job is already paused"); 589 return; 590 } 591 job->user_paused = true; 592 job_pause(job); 593 } 594 595 bool job_user_paused(Job *job) 596 { 597 return job->user_paused; 598 } 599 600 void job_user_resume(Job *job, Error **errp) 601 { 602 assert(job); 603 if (!job->user_paused || job->pause_count <= 0) { 604 error_setg(errp, "Can't resume a job that was not paused"); 605 return; 606 } 607 if (job_apply_verb(job, JOB_VERB_RESUME, errp)) { 608 return; 609 } 610 if (job->driver->user_resume) { 611 job->driver->user_resume(job); 612 } 613 job->user_paused = false; 614 job_resume(job); 615 } 616 617 static void job_do_dismiss(Job *job) 618 { 619 assert(job); 620 job->busy = false; 621 job->paused = false; 622 job->deferred_to_main_loop = true; 623 624 job_txn_del_job(job); 625 626 job_state_transition(job, JOB_STATUS_NULL); 627 job_unref(job); 628 } 629 630 void job_dismiss(Job **jobptr, Error **errp) 631 { 632 Job *job = *jobptr; 633 /* similarly to _complete, this is QMP-interface only. */ 634 assert(job->id); 635 if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) { 636 return; 637 } 638 639 job_do_dismiss(job); 640 *jobptr = NULL; 641 } 642 643 void job_early_fail(Job *job) 644 { 645 assert(job->status == JOB_STATUS_CREATED); 646 job_do_dismiss(job); 647 } 648 649 static void job_conclude(Job *job) 650 { 651 job_state_transition(job, JOB_STATUS_CONCLUDED); 652 if (job->auto_dismiss || !job_started(job)) { 653 job_do_dismiss(job); 654 } 655 } 656 657 static void job_update_rc(Job *job) 658 { 659 if (!job->ret && job_is_cancelled(job)) { 660 job->ret = -ECANCELED; 661 } 662 if (job->ret) { 663 job_state_transition(job, JOB_STATUS_ABORTING); 664 } 665 } 666 667 static void job_commit(Job *job) 668 { 669 assert(!job->ret); 670 if (job->driver->commit) { 671 job->driver->commit(job); 672 } 673 } 674 675 static void job_abort(Job *job) 676 { 677 assert(job->ret); 678 if (job->driver->abort) { 679 job->driver->abort(job); 680 } 681 } 682 683 static void job_clean(Job *job) 684 { 685 if (job->driver->clean) { 686 job->driver->clean(job); 687 } 688 } 689 690 static int job_finalize_single(Job *job) 691 { 692 assert(job_is_completed(job)); 693 694 /* Ensure abort is called for late-transactional failures */ 695 job_update_rc(job); 696 697 if (!job->ret) { 698 job_commit(job); 699 } else { 700 job_abort(job); 701 } 702 job_clean(job); 703 704 if (job->cb) { 705 job->cb(job->opaque, job->ret); 706 } 707 708 /* Emit events only if we actually started */ 709 if (job_started(job)) { 710 if (job_is_cancelled(job)) { 711 job_event_cancelled(job); 712 } else { 713 job_event_completed(job); 714 } 715 } 716 717 job_txn_del_job(job); 718 job_conclude(job); 719 return 0; 720 } 721 722 static void job_cancel_async(Job *job, bool force) 723 { 724 if (job->user_paused) { 725 /* Do not call job_enter here, the caller will handle it. */ 726 job->user_paused = false; 727 if (job->driver->user_resume) { 728 job->driver->user_resume(job); 729 } 730 assert(job->pause_count > 0); 731 job->pause_count--; 732 } 733 job->cancelled = true; 734 /* To prevent 'force == false' overriding a previous 'force == true' */ 735 job->force_cancel |= force; 736 } 737 738 static void job_completed_txn_abort(Job *job) 739 { 740 AioContext *ctx; 741 JobTxn *txn = job->txn; 742 Job *other_job; 743 744 if (txn->aborting) { 745 /* 746 * We are cancelled by another job, which will handle everything. 747 */ 748 return; 749 } 750 txn->aborting = true; 751 job_txn_ref(txn); 752 753 /* We are the first failed job. Cancel other jobs. */ 754 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 755 ctx = other_job->aio_context; 756 aio_context_acquire(ctx); 757 } 758 759 /* Other jobs are effectively cancelled by us, set the status for 760 * them; this job, however, may or may not be cancelled, depending 761 * on the caller, so leave it. */ 762 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 763 if (other_job != job) { 764 job_cancel_async(other_job, false); 765 } 766 } 767 while (!QLIST_EMPTY(&txn->jobs)) { 768 other_job = QLIST_FIRST(&txn->jobs); 769 ctx = other_job->aio_context; 770 if (!job_is_completed(other_job)) { 771 assert(job_is_cancelled(other_job)); 772 job_finish_sync(other_job, NULL, NULL); 773 } 774 job_finalize_single(other_job); 775 aio_context_release(ctx); 776 } 777 778 job_txn_unref(txn); 779 } 780 781 static int job_prepare(Job *job) 782 { 783 if (job->ret == 0 && job->driver->prepare) { 784 job->ret = job->driver->prepare(job); 785 } 786 return job->ret; 787 } 788 789 static int job_needs_finalize(Job *job) 790 { 791 return !job->auto_finalize; 792 } 793 794 static void job_do_finalize(Job *job) 795 { 796 int rc; 797 assert(job && job->txn); 798 799 /* prepare the transaction to complete */ 800 rc = job_txn_apply(job->txn, job_prepare, true); 801 if (rc) { 802 job_completed_txn_abort(job); 803 } else { 804 job_txn_apply(job->txn, job_finalize_single, true); 805 } 806 } 807 808 void job_finalize(Job *job, Error **errp) 809 { 810 assert(job && job->id); 811 if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) { 812 return; 813 } 814 job_do_finalize(job); 815 } 816 817 static int job_transition_to_pending(Job *job) 818 { 819 job_state_transition(job, JOB_STATUS_PENDING); 820 if (!job->auto_finalize) { 821 job_event_pending(job); 822 } 823 return 0; 824 } 825 826 void job_transition_to_ready(Job *job) 827 { 828 job_state_transition(job, JOB_STATUS_READY); 829 job_event_ready(job); 830 } 831 832 static void job_completed_txn_success(Job *job) 833 { 834 JobTxn *txn = job->txn; 835 Job *other_job; 836 837 job_state_transition(job, JOB_STATUS_WAITING); 838 839 /* 840 * Successful completion, see if there are other running jobs in this 841 * txn. 842 */ 843 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 844 if (!job_is_completed(other_job)) { 845 return; 846 } 847 assert(other_job->ret == 0); 848 } 849 850 job_txn_apply(txn, job_transition_to_pending, false); 851 852 /* If no jobs need manual finalization, automatically do so */ 853 if (job_txn_apply(txn, job_needs_finalize, false) == 0) { 854 job_do_finalize(job); 855 } 856 } 857 858 void job_completed(Job *job, int ret) 859 { 860 assert(job && job->txn && !job_is_completed(job)); 861 job->ret = ret; 862 job_update_rc(job); 863 trace_job_completed(job, ret, job->ret); 864 if (job->ret) { 865 job_completed_txn_abort(job); 866 } else { 867 job_completed_txn_success(job); 868 } 869 } 870 871 void job_cancel(Job *job, bool force) 872 { 873 if (job->status == JOB_STATUS_CONCLUDED) { 874 job_do_dismiss(job); 875 return; 876 } 877 job_cancel_async(job, force); 878 if (!job_started(job)) { 879 job_completed(job, -ECANCELED); 880 } else if (job->deferred_to_main_loop) { 881 job_completed_txn_abort(job); 882 } else { 883 job_enter(job); 884 } 885 } 886 887 void job_user_cancel(Job *job, bool force, Error **errp) 888 { 889 if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) { 890 return; 891 } 892 job_cancel(job, force); 893 } 894 895 /* A wrapper around job_cancel() taking an Error ** parameter so it may be 896 * used with job_finish_sync() without the need for (rather nasty) function 897 * pointer casts there. */ 898 static void job_cancel_err(Job *job, Error **errp) 899 { 900 job_cancel(job, false); 901 } 902 903 int job_cancel_sync(Job *job) 904 { 905 return job_finish_sync(job, &job_cancel_err, NULL); 906 } 907 908 void job_cancel_sync_all(void) 909 { 910 Job *job; 911 AioContext *aio_context; 912 913 while ((job = job_next(NULL))) { 914 aio_context = job->aio_context; 915 aio_context_acquire(aio_context); 916 job_cancel_sync(job); 917 aio_context_release(aio_context); 918 } 919 } 920 921 int job_complete_sync(Job *job, Error **errp) 922 { 923 return job_finish_sync(job, job_complete, errp); 924 } 925 926 void job_complete(Job *job, Error **errp) 927 { 928 /* Should not be reachable via external interface for internal jobs */ 929 assert(job->id); 930 if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) { 931 return; 932 } 933 if (job->pause_count || job_is_cancelled(job) || !job->driver->complete) { 934 error_setg(errp, "The active block job '%s' cannot be completed", 935 job->id); 936 return; 937 } 938 939 job->driver->complete(job, errp); 940 } 941 942 943 typedef struct { 944 Job *job; 945 JobDeferToMainLoopFn *fn; 946 void *opaque; 947 } JobDeferToMainLoopData; 948 949 static void job_defer_to_main_loop_bh(void *opaque) 950 { 951 JobDeferToMainLoopData *data = opaque; 952 Job *job = data->job; 953 AioContext *aio_context = job->aio_context; 954 955 aio_context_acquire(aio_context); 956 data->fn(data->job, data->opaque); 957 aio_context_release(aio_context); 958 959 g_free(data); 960 } 961 962 void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque) 963 { 964 JobDeferToMainLoopData *data = g_malloc(sizeof(*data)); 965 data->job = job; 966 data->fn = fn; 967 data->opaque = opaque; 968 job->deferred_to_main_loop = true; 969 970 aio_bh_schedule_oneshot(qemu_get_aio_context(), 971 job_defer_to_main_loop_bh, data); 972 } 973 974 int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) 975 { 976 Error *local_err = NULL; 977 int ret; 978 979 job_ref(job); 980 981 if (finish) { 982 finish(job, &local_err); 983 } 984 if (local_err) { 985 error_propagate(errp, local_err); 986 job_unref(job); 987 return -EBUSY; 988 } 989 /* job_drain calls job_enter, and it should be enough to induce progress 990 * until the job completes or moves to the main thread. */ 991 while (!job->deferred_to_main_loop && !job_is_completed(job)) { 992 job_drain(job); 993 } 994 while (!job_is_completed(job)) { 995 aio_poll(qemu_get_aio_context(), true); 996 } 997 ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret; 998 job_unref(job); 999 return ret; 1000 } 1001