1 /* 2 * Background jobs (long-running operations) 3 * 4 * Copyright (c) 2011 IBM Corp. 5 * Copyright (c) 2012, 2018 Red Hat, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qapi/error.h" 28 #include "qemu/job.h" 29 #include "qemu/id.h" 30 #include "qemu/main-loop.h" 31 #include "block/aio-wait.h" 32 #include "trace/trace-root.h" 33 #include "qapi/qapi-events-job.h" 34 35 static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); 36 37 /* Job State Transition Table */ 38 bool JobSTT[JOB_STATUS__MAX][JOB_STATUS__MAX] = { 39 /* U, C, R, P, Y, S, W, D, X, E, N */ 40 /* U: */ [JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 41 /* C: */ [JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1}, 42 /* R: */ [JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0}, 43 /* P: */ [JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 44 /* Y: */ [JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0}, 45 /* S: */ [JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, 46 /* W: */ [JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0}, 47 /* D: */ [JOB_STATUS_PENDING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, 48 /* X: */ [JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, 49 /* E: */ [JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, 50 /* N: */ [JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 51 }; 52 53 bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = { 54 /* U, C, R, P, Y, S, W, D, X, E, N */ 55 [JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0}, 56 [JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 57 [JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 58 [JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, 59 [JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0}, 60 [JOB_VERB_FINALIZE] = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, 61 [JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, 62 }; 63 64 /* Transactional group of jobs */ 65 struct JobTxn { 66 67 /* Is this txn being cancelled? */ 68 bool aborting; 69 70 /* List of jobs */ 71 QLIST_HEAD(, Job) jobs; 72 73 /* Reference count */ 74 int refcnt; 75 }; 76 77 /* Right now, this mutex is only needed to synchronize accesses to job->busy 78 * and job->sleep_timer, such as concurrent calls to job_do_yield and 79 * job_enter. */ 80 static QemuMutex job_mutex; 81 82 static void job_lock(void) 83 { 84 qemu_mutex_lock(&job_mutex); 85 } 86 87 static void job_unlock(void) 88 { 89 qemu_mutex_unlock(&job_mutex); 90 } 91 92 static void __attribute__((__constructor__)) job_init(void) 93 { 94 qemu_mutex_init(&job_mutex); 95 } 96 97 JobTxn *job_txn_new(void) 98 { 99 JobTxn *txn = g_new0(JobTxn, 1); 100 QLIST_INIT(&txn->jobs); 101 txn->refcnt = 1; 102 return txn; 103 } 104 105 static void job_txn_ref(JobTxn *txn) 106 { 107 txn->refcnt++; 108 } 109 110 void job_txn_unref(JobTxn *txn) 111 { 112 if (txn && --txn->refcnt == 0) { 113 g_free(txn); 114 } 115 } 116 117 void job_txn_add_job(JobTxn *txn, Job *job) 118 { 119 if (!txn) { 120 return; 121 } 122 123 assert(!job->txn); 124 job->txn = txn; 125 126 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); 127 job_txn_ref(txn); 128 } 129 130 static void job_txn_del_job(Job *job) 131 { 132 if (job->txn) { 133 QLIST_REMOVE(job, txn_list); 134 job_txn_unref(job->txn); 135 job->txn = NULL; 136 } 137 } 138 139 static int job_txn_apply(Job *job, int fn(Job *)) 140 { 141 AioContext *inner_ctx; 142 Job *other_job, *next; 143 JobTxn *txn = job->txn; 144 int rc = 0; 145 146 /* 147 * Similar to job_completed_txn_abort, we take each job's lock before 148 * applying fn, but since we assume that outer_ctx is held by the caller, 149 * we need to release it here to avoid holding the lock twice - which would 150 * break AIO_WAIT_WHILE from within fn. 151 */ 152 job_ref(job); 153 aio_context_release(job->aio_context); 154 155 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { 156 inner_ctx = other_job->aio_context; 157 aio_context_acquire(inner_ctx); 158 rc = fn(other_job); 159 aio_context_release(inner_ctx); 160 if (rc) { 161 break; 162 } 163 } 164 165 /* 166 * Note that job->aio_context might have been changed by calling fn, so we 167 * can't use a local variable to cache it. 168 */ 169 aio_context_acquire(job->aio_context); 170 job_unref(job); 171 return rc; 172 } 173 174 bool job_is_internal(Job *job) 175 { 176 return (job->id == NULL); 177 } 178 179 static void job_state_transition(Job *job, JobStatus s1) 180 { 181 JobStatus s0 = job->status; 182 assert(s1 >= 0 && s1 < JOB_STATUS__MAX); 183 trace_job_state_transition(job, job->ret, 184 JobSTT[s0][s1] ? "allowed" : "disallowed", 185 JobStatus_str(s0), JobStatus_str(s1)); 186 assert(JobSTT[s0][s1]); 187 job->status = s1; 188 189 if (!job_is_internal(job) && s1 != s0) { 190 qapi_event_send_job_status_change(job->id, job->status); 191 } 192 } 193 194 int job_apply_verb(Job *job, JobVerb verb, Error **errp) 195 { 196 JobStatus s0 = job->status; 197 assert(verb >= 0 && verb < JOB_VERB__MAX); 198 trace_job_apply_verb(job, JobStatus_str(s0), JobVerb_str(verb), 199 JobVerbTable[verb][s0] ? "allowed" : "prohibited"); 200 if (JobVerbTable[verb][s0]) { 201 return 0; 202 } 203 error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'", 204 job->id, JobStatus_str(s0), JobVerb_str(verb)); 205 return -EPERM; 206 } 207 208 JobType job_type(const Job *job) 209 { 210 return job->driver->job_type; 211 } 212 213 const char *job_type_str(const Job *job) 214 { 215 return JobType_str(job_type(job)); 216 } 217 218 bool job_is_cancelled(Job *job) 219 { 220 return job->cancelled; 221 } 222 223 bool job_is_ready(Job *job) 224 { 225 switch (job->status) { 226 case JOB_STATUS_UNDEFINED: 227 case JOB_STATUS_CREATED: 228 case JOB_STATUS_RUNNING: 229 case JOB_STATUS_PAUSED: 230 case JOB_STATUS_WAITING: 231 case JOB_STATUS_PENDING: 232 case JOB_STATUS_ABORTING: 233 case JOB_STATUS_CONCLUDED: 234 case JOB_STATUS_NULL: 235 return false; 236 case JOB_STATUS_READY: 237 case JOB_STATUS_STANDBY: 238 return true; 239 default: 240 g_assert_not_reached(); 241 } 242 return false; 243 } 244 245 bool job_is_completed(Job *job) 246 { 247 switch (job->status) { 248 case JOB_STATUS_UNDEFINED: 249 case JOB_STATUS_CREATED: 250 case JOB_STATUS_RUNNING: 251 case JOB_STATUS_PAUSED: 252 case JOB_STATUS_READY: 253 case JOB_STATUS_STANDBY: 254 return false; 255 case JOB_STATUS_WAITING: 256 case JOB_STATUS_PENDING: 257 case JOB_STATUS_ABORTING: 258 case JOB_STATUS_CONCLUDED: 259 case JOB_STATUS_NULL: 260 return true; 261 default: 262 g_assert_not_reached(); 263 } 264 return false; 265 } 266 267 static bool job_started(Job *job) 268 { 269 return job->co; 270 } 271 272 static bool job_should_pause(Job *job) 273 { 274 return job->pause_count > 0; 275 } 276 277 Job *job_next(Job *job) 278 { 279 if (!job) { 280 return QLIST_FIRST(&jobs); 281 } 282 return QLIST_NEXT(job, job_list); 283 } 284 285 Job *job_get(const char *id) 286 { 287 Job *job; 288 289 QLIST_FOREACH(job, &jobs, job_list) { 290 if (job->id && !strcmp(id, job->id)) { 291 return job; 292 } 293 } 294 295 return NULL; 296 } 297 298 static void job_sleep_timer_cb(void *opaque) 299 { 300 Job *job = opaque; 301 302 job_enter(job); 303 } 304 305 void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, 306 AioContext *ctx, int flags, BlockCompletionFunc *cb, 307 void *opaque, Error **errp) 308 { 309 Job *job; 310 311 if (job_id) { 312 if (flags & JOB_INTERNAL) { 313 error_setg(errp, "Cannot specify job ID for internal job"); 314 return NULL; 315 } 316 if (!id_wellformed(job_id)) { 317 error_setg(errp, "Invalid job ID '%s'", job_id); 318 return NULL; 319 } 320 if (job_get(job_id)) { 321 error_setg(errp, "Job ID '%s' already in use", job_id); 322 return NULL; 323 } 324 } else if (!(flags & JOB_INTERNAL)) { 325 error_setg(errp, "An explicit job ID is required"); 326 return NULL; 327 } 328 329 job = g_malloc0(driver->instance_size); 330 job->driver = driver; 331 job->id = g_strdup(job_id); 332 job->refcnt = 1; 333 job->aio_context = ctx; 334 job->busy = false; 335 job->paused = true; 336 job->pause_count = 1; 337 job->auto_finalize = !(flags & JOB_MANUAL_FINALIZE); 338 job->auto_dismiss = !(flags & JOB_MANUAL_DISMISS); 339 job->cb = cb; 340 job->opaque = opaque; 341 342 progress_init(&job->progress); 343 344 notifier_list_init(&job->on_finalize_cancelled); 345 notifier_list_init(&job->on_finalize_completed); 346 notifier_list_init(&job->on_pending); 347 notifier_list_init(&job->on_ready); 348 349 job_state_transition(job, JOB_STATUS_CREATED); 350 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, 351 QEMU_CLOCK_REALTIME, SCALE_NS, 352 job_sleep_timer_cb, job); 353 354 QLIST_INSERT_HEAD(&jobs, job, job_list); 355 356 /* Single jobs are modeled as single-job transactions for sake of 357 * consolidating the job management logic */ 358 if (!txn) { 359 txn = job_txn_new(); 360 job_txn_add_job(txn, job); 361 job_txn_unref(txn); 362 } else { 363 job_txn_add_job(txn, job); 364 } 365 366 return job; 367 } 368 369 void job_ref(Job *job) 370 { 371 ++job->refcnt; 372 } 373 374 void job_unref(Job *job) 375 { 376 if (--job->refcnt == 0) { 377 assert(job->status == JOB_STATUS_NULL); 378 assert(!timer_pending(&job->sleep_timer)); 379 assert(!job->txn); 380 381 if (job->driver->free) { 382 job->driver->free(job); 383 } 384 385 QLIST_REMOVE(job, job_list); 386 387 progress_destroy(&job->progress); 388 error_free(job->err); 389 g_free(job->id); 390 g_free(job); 391 } 392 } 393 394 void job_progress_update(Job *job, uint64_t done) 395 { 396 progress_work_done(&job->progress, done); 397 } 398 399 void job_progress_set_remaining(Job *job, uint64_t remaining) 400 { 401 progress_set_remaining(&job->progress, remaining); 402 } 403 404 void job_progress_increase_remaining(Job *job, uint64_t delta) 405 { 406 progress_increase_remaining(&job->progress, delta); 407 } 408 409 void job_event_cancelled(Job *job) 410 { 411 notifier_list_notify(&job->on_finalize_cancelled, job); 412 } 413 414 void job_event_completed(Job *job) 415 { 416 notifier_list_notify(&job->on_finalize_completed, job); 417 } 418 419 static void job_event_pending(Job *job) 420 { 421 notifier_list_notify(&job->on_pending, job); 422 } 423 424 static void job_event_ready(Job *job) 425 { 426 notifier_list_notify(&job->on_ready, job); 427 } 428 429 static void job_event_idle(Job *job) 430 { 431 notifier_list_notify(&job->on_idle, job); 432 } 433 434 void job_enter_cond(Job *job, bool(*fn)(Job *job)) 435 { 436 if (!job_started(job)) { 437 return; 438 } 439 if (job->deferred_to_main_loop) { 440 return; 441 } 442 443 job_lock(); 444 if (job->busy) { 445 job_unlock(); 446 return; 447 } 448 449 if (fn && !fn(job)) { 450 job_unlock(); 451 return; 452 } 453 454 assert(!job->deferred_to_main_loop); 455 timer_del(&job->sleep_timer); 456 job->busy = true; 457 job_unlock(); 458 aio_co_enter(job->aio_context, job->co); 459 } 460 461 void job_enter(Job *job) 462 { 463 job_enter_cond(job, NULL); 464 } 465 466 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. 467 * Reentering the job coroutine with job_enter() before the timer has expired 468 * is allowed and cancels the timer. 469 * 470 * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be 471 * called explicitly. */ 472 static void coroutine_fn job_do_yield(Job *job, uint64_t ns) 473 { 474 job_lock(); 475 if (ns != -1) { 476 timer_mod(&job->sleep_timer, ns); 477 } 478 job->busy = false; 479 job_event_idle(job); 480 job_unlock(); 481 qemu_coroutine_yield(); 482 483 /* Set by job_enter_cond() before re-entering the coroutine. */ 484 assert(job->busy); 485 } 486 487 void coroutine_fn job_pause_point(Job *job) 488 { 489 assert(job && job_started(job)); 490 491 if (!job_should_pause(job)) { 492 return; 493 } 494 if (job_is_cancelled(job)) { 495 return; 496 } 497 498 if (job->driver->pause) { 499 job->driver->pause(job); 500 } 501 502 if (job_should_pause(job) && !job_is_cancelled(job)) { 503 JobStatus status = job->status; 504 job_state_transition(job, status == JOB_STATUS_READY 505 ? JOB_STATUS_STANDBY 506 : JOB_STATUS_PAUSED); 507 job->paused = true; 508 job_do_yield(job, -1); 509 job->paused = false; 510 job_state_transition(job, status); 511 } 512 513 if (job->driver->resume) { 514 job->driver->resume(job); 515 } 516 } 517 518 void job_yield(Job *job) 519 { 520 assert(job->busy); 521 522 /* Check cancellation *before* setting busy = false, too! */ 523 if (job_is_cancelled(job)) { 524 return; 525 } 526 527 if (!job_should_pause(job)) { 528 job_do_yield(job, -1); 529 } 530 531 job_pause_point(job); 532 } 533 534 void coroutine_fn job_sleep_ns(Job *job, int64_t ns) 535 { 536 assert(job->busy); 537 538 /* Check cancellation *before* setting busy = false, too! */ 539 if (job_is_cancelled(job)) { 540 return; 541 } 542 543 if (!job_should_pause(job)) { 544 job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); 545 } 546 547 job_pause_point(job); 548 } 549 550 /* Assumes the block_job_mutex is held */ 551 static bool job_timer_not_pending(Job *job) 552 { 553 return !timer_pending(&job->sleep_timer); 554 } 555 556 void job_pause(Job *job) 557 { 558 job->pause_count++; 559 if (!job->paused) { 560 job_enter(job); 561 } 562 } 563 564 void job_resume(Job *job) 565 { 566 assert(job->pause_count > 0); 567 job->pause_count--; 568 if (job->pause_count) { 569 return; 570 } 571 572 /* kick only if no timer is pending */ 573 job_enter_cond(job, job_timer_not_pending); 574 } 575 576 void job_user_pause(Job *job, Error **errp) 577 { 578 if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) { 579 return; 580 } 581 if (job->user_paused) { 582 error_setg(errp, "Job is already paused"); 583 return; 584 } 585 job->user_paused = true; 586 job_pause(job); 587 } 588 589 bool job_user_paused(Job *job) 590 { 591 return job->user_paused; 592 } 593 594 void job_user_resume(Job *job, Error **errp) 595 { 596 assert(job); 597 if (!job->user_paused || job->pause_count <= 0) { 598 error_setg(errp, "Can't resume a job that was not paused"); 599 return; 600 } 601 if (job_apply_verb(job, JOB_VERB_RESUME, errp)) { 602 return; 603 } 604 if (job->driver->user_resume) { 605 job->driver->user_resume(job); 606 } 607 job->user_paused = false; 608 job_resume(job); 609 } 610 611 static void job_do_dismiss(Job *job) 612 { 613 assert(job); 614 job->busy = false; 615 job->paused = false; 616 job->deferred_to_main_loop = true; 617 618 job_txn_del_job(job); 619 620 job_state_transition(job, JOB_STATUS_NULL); 621 job_unref(job); 622 } 623 624 void job_dismiss(Job **jobptr, Error **errp) 625 { 626 Job *job = *jobptr; 627 /* similarly to _complete, this is QMP-interface only. */ 628 assert(job->id); 629 if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) { 630 return; 631 } 632 633 job_do_dismiss(job); 634 *jobptr = NULL; 635 } 636 637 void job_early_fail(Job *job) 638 { 639 assert(job->status == JOB_STATUS_CREATED); 640 job_do_dismiss(job); 641 } 642 643 static void job_conclude(Job *job) 644 { 645 job_state_transition(job, JOB_STATUS_CONCLUDED); 646 if (job->auto_dismiss || !job_started(job)) { 647 job_do_dismiss(job); 648 } 649 } 650 651 static void job_update_rc(Job *job) 652 { 653 if (!job->ret && job_is_cancelled(job)) { 654 job->ret = -ECANCELED; 655 } 656 if (job->ret) { 657 if (!job->err) { 658 error_setg(&job->err, "%s", strerror(-job->ret)); 659 } 660 job_state_transition(job, JOB_STATUS_ABORTING); 661 } 662 } 663 664 static void job_commit(Job *job) 665 { 666 assert(!job->ret); 667 if (job->driver->commit) { 668 job->driver->commit(job); 669 } 670 } 671 672 static void job_abort(Job *job) 673 { 674 assert(job->ret); 675 if (job->driver->abort) { 676 job->driver->abort(job); 677 } 678 } 679 680 static void job_clean(Job *job) 681 { 682 if (job->driver->clean) { 683 job->driver->clean(job); 684 } 685 } 686 687 static int job_finalize_single(Job *job) 688 { 689 assert(job_is_completed(job)); 690 691 /* Ensure abort is called for late-transactional failures */ 692 job_update_rc(job); 693 694 if (!job->ret) { 695 job_commit(job); 696 } else { 697 job_abort(job); 698 } 699 job_clean(job); 700 701 if (job->cb) { 702 job->cb(job->opaque, job->ret); 703 } 704 705 /* Emit events only if we actually started */ 706 if (job_started(job)) { 707 if (job_is_cancelled(job)) { 708 job_event_cancelled(job); 709 } else { 710 job_event_completed(job); 711 } 712 } 713 714 job_txn_del_job(job); 715 job_conclude(job); 716 return 0; 717 } 718 719 static void job_cancel_async(Job *job, bool force) 720 { 721 if (job->driver->cancel) { 722 job->driver->cancel(job, force); 723 } 724 if (job->user_paused) { 725 /* Do not call job_enter here, the caller will handle it. */ 726 if (job->driver->user_resume) { 727 job->driver->user_resume(job); 728 } 729 job->user_paused = false; 730 assert(job->pause_count > 0); 731 job->pause_count--; 732 } 733 job->cancelled = true; 734 /* To prevent 'force == false' overriding a previous 'force == true' */ 735 job->force_cancel |= force; 736 } 737 738 static void job_completed_txn_abort(Job *job) 739 { 740 AioContext *outer_ctx = job->aio_context; 741 AioContext *ctx; 742 JobTxn *txn = job->txn; 743 Job *other_job; 744 745 if (txn->aborting) { 746 /* 747 * We are cancelled by another job, which will handle everything. 748 */ 749 return; 750 } 751 txn->aborting = true; 752 job_txn_ref(txn); 753 754 /* We can only hold the single job's AioContext lock while calling 755 * job_finalize_single() because the finalization callbacks can involve 756 * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. */ 757 aio_context_release(outer_ctx); 758 759 /* Other jobs are effectively cancelled by us, set the status for 760 * them; this job, however, may or may not be cancelled, depending 761 * on the caller, so leave it. */ 762 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 763 if (other_job != job) { 764 ctx = other_job->aio_context; 765 aio_context_acquire(ctx); 766 job_cancel_async(other_job, false); 767 aio_context_release(ctx); 768 } 769 } 770 while (!QLIST_EMPTY(&txn->jobs)) { 771 other_job = QLIST_FIRST(&txn->jobs); 772 ctx = other_job->aio_context; 773 aio_context_acquire(ctx); 774 if (!job_is_completed(other_job)) { 775 assert(job_is_cancelled(other_job)); 776 job_finish_sync(other_job, NULL, NULL); 777 } 778 job_finalize_single(other_job); 779 aio_context_release(ctx); 780 } 781 782 aio_context_acquire(outer_ctx); 783 784 job_txn_unref(txn); 785 } 786 787 static int job_prepare(Job *job) 788 { 789 if (job->ret == 0 && job->driver->prepare) { 790 job->ret = job->driver->prepare(job); 791 job_update_rc(job); 792 } 793 return job->ret; 794 } 795 796 static int job_needs_finalize(Job *job) 797 { 798 return !job->auto_finalize; 799 } 800 801 static void job_do_finalize(Job *job) 802 { 803 int rc; 804 assert(job && job->txn); 805 806 /* prepare the transaction to complete */ 807 rc = job_txn_apply(job, job_prepare); 808 if (rc) { 809 job_completed_txn_abort(job); 810 } else { 811 job_txn_apply(job, job_finalize_single); 812 } 813 } 814 815 void job_finalize(Job *job, Error **errp) 816 { 817 assert(job && job->id); 818 if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) { 819 return; 820 } 821 job_do_finalize(job); 822 } 823 824 static int job_transition_to_pending(Job *job) 825 { 826 job_state_transition(job, JOB_STATUS_PENDING); 827 if (!job->auto_finalize) { 828 job_event_pending(job); 829 } 830 return 0; 831 } 832 833 void job_transition_to_ready(Job *job) 834 { 835 job_state_transition(job, JOB_STATUS_READY); 836 job_event_ready(job); 837 } 838 839 static void job_completed_txn_success(Job *job) 840 { 841 JobTxn *txn = job->txn; 842 Job *other_job; 843 844 job_state_transition(job, JOB_STATUS_WAITING); 845 846 /* 847 * Successful completion, see if there are other running jobs in this 848 * txn. 849 */ 850 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 851 if (!job_is_completed(other_job)) { 852 return; 853 } 854 assert(other_job->ret == 0); 855 } 856 857 job_txn_apply(job, job_transition_to_pending); 858 859 /* If no jobs need manual finalization, automatically do so */ 860 if (job_txn_apply(job, job_needs_finalize) == 0) { 861 job_do_finalize(job); 862 } 863 } 864 865 static void job_completed(Job *job) 866 { 867 assert(job && job->txn && !job_is_completed(job)); 868 869 job_update_rc(job); 870 trace_job_completed(job, job->ret); 871 if (job->ret) { 872 job_completed_txn_abort(job); 873 } else { 874 job_completed_txn_success(job); 875 } 876 } 877 878 /** Useful only as a type shim for aio_bh_schedule_oneshot. */ 879 static void job_exit(void *opaque) 880 { 881 Job *job = (Job *)opaque; 882 AioContext *ctx; 883 884 job_ref(job); 885 aio_context_acquire(job->aio_context); 886 887 /* This is a lie, we're not quiescent, but still doing the completion 888 * callbacks. However, completion callbacks tend to involve operations that 889 * drain block nodes, and if .drained_poll still returned true, we would 890 * deadlock. */ 891 job->busy = false; 892 job_event_idle(job); 893 894 job_completed(job); 895 896 /* 897 * Note that calling job_completed can move the job to a different 898 * aio_context, so we cannot cache from above. job_txn_apply takes care of 899 * acquiring the new lock, and we ref/unref to avoid job_completed freeing 900 * the job underneath us. 901 */ 902 ctx = job->aio_context; 903 job_unref(job); 904 aio_context_release(ctx); 905 } 906 907 /** 908 * All jobs must allow a pause point before entering their job proper. This 909 * ensures that jobs can be paused prior to being started, then resumed later. 910 */ 911 static void coroutine_fn job_co_entry(void *opaque) 912 { 913 Job *job = opaque; 914 915 assert(job && job->driver && job->driver->run); 916 job_pause_point(job); 917 job->ret = job->driver->run(job, &job->err); 918 job->deferred_to_main_loop = true; 919 job->busy = true; 920 aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job); 921 } 922 923 void job_start(Job *job) 924 { 925 assert(job && !job_started(job) && job->paused && 926 job->driver && job->driver->run); 927 job->co = qemu_coroutine_create(job_co_entry, job); 928 job->pause_count--; 929 job->busy = true; 930 job->paused = false; 931 job_state_transition(job, JOB_STATUS_RUNNING); 932 aio_co_enter(job->aio_context, job->co); 933 } 934 935 void job_cancel(Job *job, bool force) 936 { 937 if (job->status == JOB_STATUS_CONCLUDED) { 938 job_do_dismiss(job); 939 return; 940 } 941 job_cancel_async(job, force); 942 if (!job_started(job)) { 943 job_completed(job); 944 } else if (job->deferred_to_main_loop) { 945 job_completed_txn_abort(job); 946 } else { 947 job_enter(job); 948 } 949 } 950 951 void job_user_cancel(Job *job, bool force, Error **errp) 952 { 953 if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) { 954 return; 955 } 956 job_cancel(job, force); 957 } 958 959 /* A wrapper around job_cancel() taking an Error ** parameter so it may be 960 * used with job_finish_sync() without the need for (rather nasty) function 961 * pointer casts there. */ 962 static void job_cancel_err(Job *job, Error **errp) 963 { 964 job_cancel(job, false); 965 } 966 967 int job_cancel_sync(Job *job) 968 { 969 return job_finish_sync(job, &job_cancel_err, NULL); 970 } 971 972 void job_cancel_sync_all(void) 973 { 974 Job *job; 975 AioContext *aio_context; 976 977 while ((job = job_next(NULL))) { 978 aio_context = job->aio_context; 979 aio_context_acquire(aio_context); 980 job_cancel_sync(job); 981 aio_context_release(aio_context); 982 } 983 } 984 985 int job_complete_sync(Job *job, Error **errp) 986 { 987 return job_finish_sync(job, job_complete, errp); 988 } 989 990 void job_complete(Job *job, Error **errp) 991 { 992 /* Should not be reachable via external interface for internal jobs */ 993 assert(job->id); 994 if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) { 995 return; 996 } 997 if (job_is_cancelled(job) || !job->driver->complete) { 998 error_setg(errp, "The active block job '%s' cannot be completed", 999 job->id); 1000 return; 1001 } 1002 1003 job->driver->complete(job, errp); 1004 } 1005 1006 int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) 1007 { 1008 Error *local_err = NULL; 1009 int ret; 1010 1011 job_ref(job); 1012 1013 if (finish) { 1014 finish(job, &local_err); 1015 } 1016 if (local_err) { 1017 error_propagate(errp, local_err); 1018 job_unref(job); 1019 return -EBUSY; 1020 } 1021 1022 AIO_WAIT_WHILE(job->aio_context, 1023 (job_enter(job), !job_is_completed(job))); 1024 1025 ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret; 1026 job_unref(job); 1027 return ret; 1028 } 1029