1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2011 IBM Corp. 5 * Copyright (c) 2012 Red Hat, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qemu-common.h" 28 #include "block/block.h" 29 #include "block/blockjob_int.h" 30 #include "block/block_int.h" 31 #include "sysemu/block-backend.h" 32 #include "qapi/error.h" 33 #include "qapi/qmp/qerror.h" 34 #include "qapi/qmp/qjson.h" 35 #include "qemu/coroutine.h" 36 #include "qemu/id.h" 37 #include "qmp-commands.h" 38 #include "qemu/timer.h" 39 #include "qapi-event.h" 40 41 /* Right now, this mutex is only needed to synchronize accesses to job->busy 42 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and 43 * block_job_enter. */ 44 static QemuMutex block_job_mutex; 45 46 static void block_job_lock(void) 47 { 48 qemu_mutex_lock(&block_job_mutex); 49 } 50 51 static void block_job_unlock(void) 52 { 53 qemu_mutex_unlock(&block_job_mutex); 54 } 55 56 static void __attribute__((__constructor__)) block_job_init(void) 57 { 58 qemu_mutex_init(&block_job_mutex); 59 } 60 61 static void block_job_event_cancelled(BlockJob *job); 62 static void block_job_event_completed(BlockJob *job, const char *msg); 63 static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)); 64 65 /* Transactional group of block jobs */ 66 struct BlockJobTxn { 67 68 /* Is this txn being cancelled? */ 69 bool aborting; 70 71 /* List of jobs */ 72 QLIST_HEAD(, BlockJob) jobs; 73 74 /* Reference count */ 75 int refcnt; 76 }; 77 78 static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs); 79 80 /* 81 * The block job API is composed of two categories of functions. 82 * 83 * The first includes functions used by the monitor. The monitor is 84 * peculiar in that it accesses the block job list with block_job_get, and 85 * therefore needs consistency across block_job_get and the actual operation 86 * (e.g. block_job_set_speed). The consistency is achieved with 87 * aio_context_acquire/release. These functions are declared in blockjob.h. 88 * 89 * The second includes functions used by the block job drivers and sometimes 90 * by the core block layer. These do not care about locking, because the 91 * whole coroutine runs under the AioContext lock, and are declared in 92 * blockjob_int.h. 93 */ 94 95 BlockJob *block_job_next(BlockJob *job) 96 { 97 if (!job) { 98 return QLIST_FIRST(&block_jobs); 99 } 100 return QLIST_NEXT(job, job_list); 101 } 102 103 BlockJob *block_job_get(const char *id) 104 { 105 BlockJob *job; 106 107 QLIST_FOREACH(job, &block_jobs, job_list) { 108 if (job->id && !strcmp(id, job->id)) { 109 return job; 110 } 111 } 112 113 return NULL; 114 } 115 116 BlockJobTxn *block_job_txn_new(void) 117 { 118 BlockJobTxn *txn = g_new0(BlockJobTxn, 1); 119 QLIST_INIT(&txn->jobs); 120 txn->refcnt = 1; 121 return txn; 122 } 123 124 static void block_job_txn_ref(BlockJobTxn *txn) 125 { 126 txn->refcnt++; 127 } 128 129 void block_job_txn_unref(BlockJobTxn *txn) 130 { 131 if (txn && --txn->refcnt == 0) { 132 g_free(txn); 133 } 134 } 135 136 void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job) 137 { 138 if (!txn) { 139 return; 140 } 141 142 assert(!job->txn); 143 job->txn = txn; 144 145 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); 146 block_job_txn_ref(txn); 147 } 148 149 static void block_job_pause(BlockJob *job) 150 { 151 job->pause_count++; 152 } 153 154 static void block_job_resume(BlockJob *job) 155 { 156 assert(job->pause_count > 0); 157 job->pause_count--; 158 if (job->pause_count) { 159 return; 160 } 161 block_job_enter(job); 162 } 163 164 void block_job_ref(BlockJob *job) 165 { 166 ++job->refcnt; 167 } 168 169 static void block_job_attached_aio_context(AioContext *new_context, 170 void *opaque); 171 static void block_job_detach_aio_context(void *opaque); 172 173 void block_job_unref(BlockJob *job) 174 { 175 if (--job->refcnt == 0) { 176 BlockDriverState *bs = blk_bs(job->blk); 177 QLIST_REMOVE(job, job_list); 178 bs->job = NULL; 179 block_job_remove_all_bdrv(job); 180 blk_remove_aio_context_notifier(job->blk, 181 block_job_attached_aio_context, 182 block_job_detach_aio_context, job); 183 blk_unref(job->blk); 184 error_free(job->blocker); 185 g_free(job->id); 186 assert(!timer_pending(&job->sleep_timer)); 187 g_free(job); 188 } 189 } 190 191 static void block_job_attached_aio_context(AioContext *new_context, 192 void *opaque) 193 { 194 BlockJob *job = opaque; 195 196 if (job->driver->attached_aio_context) { 197 job->driver->attached_aio_context(job, new_context); 198 } 199 200 block_job_resume(job); 201 } 202 203 static void block_job_drain(BlockJob *job) 204 { 205 /* If job is !job->busy this kicks it into the next pause point. */ 206 block_job_enter(job); 207 208 blk_drain(job->blk); 209 if (job->driver->drain) { 210 job->driver->drain(job); 211 } 212 } 213 214 static void block_job_detach_aio_context(void *opaque) 215 { 216 BlockJob *job = opaque; 217 218 /* In case the job terminates during aio_poll()... */ 219 block_job_ref(job); 220 221 block_job_pause(job); 222 223 while (!job->paused && !job->completed) { 224 block_job_drain(job); 225 } 226 227 block_job_unref(job); 228 } 229 230 static char *child_job_get_parent_desc(BdrvChild *c) 231 { 232 BlockJob *job = c->opaque; 233 return g_strdup_printf("%s job '%s'", 234 BlockJobType_str(job->driver->job_type), 235 job->id); 236 } 237 238 static void child_job_drained_begin(BdrvChild *c) 239 { 240 BlockJob *job = c->opaque; 241 block_job_pause(job); 242 } 243 244 static void child_job_drained_end(BdrvChild *c) 245 { 246 BlockJob *job = c->opaque; 247 block_job_resume(job); 248 } 249 250 static const BdrvChildRole child_job = { 251 .get_parent_desc = child_job_get_parent_desc, 252 .drained_begin = child_job_drained_begin, 253 .drained_end = child_job_drained_end, 254 .stay_at_node = true, 255 }; 256 257 void block_job_remove_all_bdrv(BlockJob *job) 258 { 259 GSList *l; 260 for (l = job->nodes; l; l = l->next) { 261 BdrvChild *c = l->data; 262 bdrv_op_unblock_all(c->bs, job->blocker); 263 bdrv_root_unref_child(c); 264 } 265 g_slist_free(job->nodes); 266 job->nodes = NULL; 267 } 268 269 int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, 270 uint64_t perm, uint64_t shared_perm, Error **errp) 271 { 272 BdrvChild *c; 273 274 c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm, 275 job, errp); 276 if (c == NULL) { 277 return -EPERM; 278 } 279 280 job->nodes = g_slist_prepend(job->nodes, c); 281 bdrv_ref(bs); 282 bdrv_op_block_all(bs, job->blocker); 283 284 return 0; 285 } 286 287 bool block_job_is_internal(BlockJob *job) 288 { 289 return (job->id == NULL); 290 } 291 292 static bool block_job_started(BlockJob *job) 293 { 294 return job->co; 295 } 296 297 /** 298 * All jobs must allow a pause point before entering their job proper. This 299 * ensures that jobs can be paused prior to being started, then resumed later. 300 */ 301 static void coroutine_fn block_job_co_entry(void *opaque) 302 { 303 BlockJob *job = opaque; 304 305 assert(job && job->driver && job->driver->start); 306 block_job_pause_point(job); 307 job->driver->start(job); 308 } 309 310 static void block_job_sleep_timer_cb(void *opaque) 311 { 312 BlockJob *job = opaque; 313 314 block_job_enter(job); 315 } 316 317 void block_job_start(BlockJob *job) 318 { 319 assert(job && !block_job_started(job) && job->paused && 320 job->driver && job->driver->start); 321 job->co = qemu_coroutine_create(block_job_co_entry, job); 322 job->pause_count--; 323 job->busy = true; 324 job->paused = false; 325 bdrv_coroutine_enter(blk_bs(job->blk), job->co); 326 } 327 328 static void block_job_completed_single(BlockJob *job) 329 { 330 assert(job->completed); 331 332 if (!job->ret) { 333 if (job->driver->commit) { 334 job->driver->commit(job); 335 } 336 } else { 337 if (job->driver->abort) { 338 job->driver->abort(job); 339 } 340 } 341 if (job->driver->clean) { 342 job->driver->clean(job); 343 } 344 345 if (job->cb) { 346 job->cb(job->opaque, job->ret); 347 } 348 349 /* Emit events only if we actually started */ 350 if (block_job_started(job)) { 351 if (block_job_is_cancelled(job)) { 352 block_job_event_cancelled(job); 353 } else { 354 const char *msg = NULL; 355 if (job->ret < 0) { 356 msg = strerror(-job->ret); 357 } 358 block_job_event_completed(job, msg); 359 } 360 } 361 362 if (job->txn) { 363 QLIST_REMOVE(job, txn_list); 364 block_job_txn_unref(job->txn); 365 } 366 block_job_unref(job); 367 } 368 369 static void block_job_cancel_async(BlockJob *job) 370 { 371 if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) { 372 block_job_iostatus_reset(job); 373 } 374 if (job->user_paused) { 375 /* Do not call block_job_enter here, the caller will handle it. */ 376 job->user_paused = false; 377 job->pause_count--; 378 } 379 job->cancelled = true; 380 } 381 382 static int block_job_finish_sync(BlockJob *job, 383 void (*finish)(BlockJob *, Error **errp), 384 Error **errp) 385 { 386 Error *local_err = NULL; 387 int ret; 388 389 assert(blk_bs(job->blk)->job == job); 390 391 block_job_ref(job); 392 393 if (finish) { 394 finish(job, &local_err); 395 } 396 if (local_err) { 397 error_propagate(errp, local_err); 398 block_job_unref(job); 399 return -EBUSY; 400 } 401 /* block_job_drain calls block_job_enter, and it should be enough to 402 * induce progress until the job completes or moves to the main thread. 403 */ 404 while (!job->deferred_to_main_loop && !job->completed) { 405 block_job_drain(job); 406 } 407 while (!job->completed) { 408 aio_poll(qemu_get_aio_context(), true); 409 } 410 ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret; 411 block_job_unref(job); 412 return ret; 413 } 414 415 static void block_job_completed_txn_abort(BlockJob *job) 416 { 417 AioContext *ctx; 418 BlockJobTxn *txn = job->txn; 419 BlockJob *other_job; 420 421 if (txn->aborting) { 422 /* 423 * We are cancelled by another job, which will handle everything. 424 */ 425 return; 426 } 427 txn->aborting = true; 428 block_job_txn_ref(txn); 429 430 /* We are the first failed job. Cancel other jobs. */ 431 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 432 ctx = blk_get_aio_context(other_job->blk); 433 aio_context_acquire(ctx); 434 } 435 436 /* Other jobs are effectively cancelled by us, set the status for 437 * them; this job, however, may or may not be cancelled, depending 438 * on the caller, so leave it. */ 439 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 440 if (other_job != job) { 441 block_job_cancel_async(other_job); 442 } 443 } 444 while (!QLIST_EMPTY(&txn->jobs)) { 445 other_job = QLIST_FIRST(&txn->jobs); 446 ctx = blk_get_aio_context(other_job->blk); 447 if (!other_job->completed) { 448 assert(other_job->cancelled); 449 block_job_finish_sync(other_job, NULL, NULL); 450 } 451 block_job_completed_single(other_job); 452 aio_context_release(ctx); 453 } 454 455 block_job_txn_unref(txn); 456 } 457 458 static void block_job_completed_txn_success(BlockJob *job) 459 { 460 AioContext *ctx; 461 BlockJobTxn *txn = job->txn; 462 BlockJob *other_job, *next; 463 /* 464 * Successful completion, see if there are other running jobs in this 465 * txn. 466 */ 467 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 468 if (!other_job->completed) { 469 return; 470 } 471 } 472 /* We are the last completed job, commit the transaction. */ 473 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { 474 ctx = blk_get_aio_context(other_job->blk); 475 aio_context_acquire(ctx); 476 assert(other_job->ret == 0); 477 block_job_completed_single(other_job); 478 aio_context_release(ctx); 479 } 480 } 481 482 /* Assumes the block_job_mutex is held */ 483 static bool block_job_timer_pending(BlockJob *job) 484 { 485 return timer_pending(&job->sleep_timer); 486 } 487 488 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) 489 { 490 Error *local_err = NULL; 491 int64_t old_speed = job->speed; 492 493 if (!job->driver->set_speed) { 494 error_setg(errp, QERR_UNSUPPORTED); 495 return; 496 } 497 job->driver->set_speed(job, speed, &local_err); 498 if (local_err) { 499 error_propagate(errp, local_err); 500 return; 501 } 502 503 job->speed = speed; 504 if (speed <= old_speed) { 505 return; 506 } 507 508 /* kick only if a timer is pending */ 509 block_job_enter_cond(job, block_job_timer_pending); 510 } 511 512 void block_job_complete(BlockJob *job, Error **errp) 513 { 514 /* Should not be reachable via external interface for internal jobs */ 515 assert(job->id); 516 if (job->pause_count || job->cancelled || 517 !block_job_started(job) || !job->driver->complete) { 518 error_setg(errp, "The active block job '%s' cannot be completed", 519 job->id); 520 return; 521 } 522 523 job->driver->complete(job, errp); 524 } 525 526 void block_job_user_pause(BlockJob *job) 527 { 528 job->user_paused = true; 529 block_job_pause(job); 530 } 531 532 bool block_job_user_paused(BlockJob *job) 533 { 534 return job->user_paused; 535 } 536 537 void block_job_user_resume(BlockJob *job) 538 { 539 if (job && job->user_paused && job->pause_count > 0) { 540 block_job_iostatus_reset(job); 541 job->user_paused = false; 542 block_job_resume(job); 543 } 544 } 545 546 void block_job_cancel(BlockJob *job) 547 { 548 if (block_job_started(job)) { 549 block_job_cancel_async(job); 550 block_job_enter(job); 551 } else { 552 block_job_completed(job, -ECANCELED); 553 } 554 } 555 556 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be 557 * used with block_job_finish_sync() without the need for (rather nasty) 558 * function pointer casts there. */ 559 static void block_job_cancel_err(BlockJob *job, Error **errp) 560 { 561 block_job_cancel(job); 562 } 563 564 int block_job_cancel_sync(BlockJob *job) 565 { 566 return block_job_finish_sync(job, &block_job_cancel_err, NULL); 567 } 568 569 void block_job_cancel_sync_all(void) 570 { 571 BlockJob *job; 572 AioContext *aio_context; 573 574 while ((job = QLIST_FIRST(&block_jobs))) { 575 aio_context = blk_get_aio_context(job->blk); 576 aio_context_acquire(aio_context); 577 block_job_cancel_sync(job); 578 aio_context_release(aio_context); 579 } 580 } 581 582 int block_job_complete_sync(BlockJob *job, Error **errp) 583 { 584 return block_job_finish_sync(job, &block_job_complete, errp); 585 } 586 587 BlockJobInfo *block_job_query(BlockJob *job, Error **errp) 588 { 589 BlockJobInfo *info; 590 591 if (block_job_is_internal(job)) { 592 error_setg(errp, "Cannot query QEMU internal jobs"); 593 return NULL; 594 } 595 info = g_new0(BlockJobInfo, 1); 596 info->type = g_strdup(BlockJobType_str(job->driver->job_type)); 597 info->device = g_strdup(job->id); 598 info->len = job->len; 599 info->busy = atomic_read(&job->busy); 600 info->paused = job->pause_count > 0; 601 info->offset = job->offset; 602 info->speed = job->speed; 603 info->io_status = job->iostatus; 604 info->ready = job->ready; 605 return info; 606 } 607 608 static void block_job_iostatus_set_err(BlockJob *job, int error) 609 { 610 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 611 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 612 BLOCK_DEVICE_IO_STATUS_FAILED; 613 } 614 } 615 616 static void block_job_event_cancelled(BlockJob *job) 617 { 618 if (block_job_is_internal(job)) { 619 return; 620 } 621 622 qapi_event_send_block_job_cancelled(job->driver->job_type, 623 job->id, 624 job->len, 625 job->offset, 626 job->speed, 627 &error_abort); 628 } 629 630 static void block_job_event_completed(BlockJob *job, const char *msg) 631 { 632 if (block_job_is_internal(job)) { 633 return; 634 } 635 636 qapi_event_send_block_job_completed(job->driver->job_type, 637 job->id, 638 job->len, 639 job->offset, 640 job->speed, 641 !!msg, 642 msg, 643 &error_abort); 644 } 645 646 /* 647 * API for block job drivers and the block layer. These functions are 648 * declared in blockjob_int.h. 649 */ 650 651 void *block_job_create(const char *job_id, const BlockJobDriver *driver, 652 BlockDriverState *bs, uint64_t perm, 653 uint64_t shared_perm, int64_t speed, int flags, 654 BlockCompletionFunc *cb, void *opaque, Error **errp) 655 { 656 BlockBackend *blk; 657 BlockJob *job; 658 int ret; 659 660 if (bs->job) { 661 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); 662 return NULL; 663 } 664 665 if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) { 666 job_id = bdrv_get_device_name(bs); 667 if (!*job_id) { 668 error_setg(errp, "An explicit job ID is required for this node"); 669 return NULL; 670 } 671 } 672 673 if (job_id) { 674 if (flags & BLOCK_JOB_INTERNAL) { 675 error_setg(errp, "Cannot specify job ID for internal block job"); 676 return NULL; 677 } 678 679 if (!id_wellformed(job_id)) { 680 error_setg(errp, "Invalid job ID '%s'", job_id); 681 return NULL; 682 } 683 684 if (block_job_get(job_id)) { 685 error_setg(errp, "Job ID '%s' already in use", job_id); 686 return NULL; 687 } 688 } 689 690 blk = blk_new(perm, shared_perm); 691 ret = blk_insert_bs(blk, bs, errp); 692 if (ret < 0) { 693 blk_unref(blk); 694 return NULL; 695 } 696 697 job = g_malloc0(driver->instance_size); 698 job->driver = driver; 699 job->id = g_strdup(job_id); 700 job->blk = blk; 701 job->cb = cb; 702 job->opaque = opaque; 703 job->busy = false; 704 job->paused = true; 705 job->pause_count = 1; 706 job->refcnt = 1; 707 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, 708 QEMU_CLOCK_REALTIME, SCALE_NS, 709 block_job_sleep_timer_cb, job); 710 711 error_setg(&job->blocker, "block device is in use by block job: %s", 712 BlockJobType_str(driver->job_type)); 713 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort); 714 bs->job = job; 715 716 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); 717 718 QLIST_INSERT_HEAD(&block_jobs, job, job_list); 719 720 blk_add_aio_context_notifier(blk, block_job_attached_aio_context, 721 block_job_detach_aio_context, job); 722 723 /* Only set speed when necessary to avoid NotSupported error */ 724 if (speed != 0) { 725 Error *local_err = NULL; 726 727 block_job_set_speed(job, speed, &local_err); 728 if (local_err) { 729 block_job_unref(job); 730 error_propagate(errp, local_err); 731 return NULL; 732 } 733 } 734 return job; 735 } 736 737 void block_job_pause_all(void) 738 { 739 BlockJob *job = NULL; 740 while ((job = block_job_next(job))) { 741 AioContext *aio_context = blk_get_aio_context(job->blk); 742 743 aio_context_acquire(aio_context); 744 block_job_ref(job); 745 block_job_pause(job); 746 aio_context_release(aio_context); 747 } 748 } 749 750 void block_job_early_fail(BlockJob *job) 751 { 752 block_job_unref(job); 753 } 754 755 void block_job_completed(BlockJob *job, int ret) 756 { 757 assert(blk_bs(job->blk)->job == job); 758 assert(!job->completed); 759 job->completed = true; 760 job->ret = ret; 761 if (!job->txn) { 762 block_job_completed_single(job); 763 } else if (ret < 0 || block_job_is_cancelled(job)) { 764 block_job_completed_txn_abort(job); 765 } else { 766 block_job_completed_txn_success(job); 767 } 768 } 769 770 static bool block_job_should_pause(BlockJob *job) 771 { 772 return job->pause_count > 0; 773 } 774 775 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. 776 * Reentering the job coroutine with block_job_enter() before the timer has 777 * expired is allowed and cancels the timer. 778 * 779 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be 780 * called explicitly. */ 781 static void block_job_do_yield(BlockJob *job, uint64_t ns) 782 { 783 block_job_lock(); 784 if (ns != -1) { 785 timer_mod(&job->sleep_timer, ns); 786 } 787 job->busy = false; 788 block_job_unlock(); 789 qemu_coroutine_yield(); 790 791 /* Set by block_job_enter before re-entering the coroutine. */ 792 assert(job->busy); 793 } 794 795 void coroutine_fn block_job_pause_point(BlockJob *job) 796 { 797 assert(job && block_job_started(job)); 798 799 if (!block_job_should_pause(job)) { 800 return; 801 } 802 if (block_job_is_cancelled(job)) { 803 return; 804 } 805 806 if (job->driver->pause) { 807 job->driver->pause(job); 808 } 809 810 if (block_job_should_pause(job) && !block_job_is_cancelled(job)) { 811 job->paused = true; 812 block_job_do_yield(job, -1); 813 job->paused = false; 814 } 815 816 if (job->driver->resume) { 817 job->driver->resume(job); 818 } 819 } 820 821 void block_job_resume_all(void) 822 { 823 BlockJob *job, *next; 824 825 QLIST_FOREACH_SAFE(job, &block_jobs, job_list, next) { 826 AioContext *aio_context = blk_get_aio_context(job->blk); 827 828 aio_context_acquire(aio_context); 829 block_job_resume(job); 830 block_job_unref(job); 831 aio_context_release(aio_context); 832 } 833 } 834 835 /* 836 * Conditionally enter a block_job pending a call to fn() while 837 * under the block_job_lock critical section. 838 */ 839 static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)) 840 { 841 if (!block_job_started(job)) { 842 return; 843 } 844 if (job->deferred_to_main_loop) { 845 return; 846 } 847 848 block_job_lock(); 849 if (job->busy) { 850 block_job_unlock(); 851 return; 852 } 853 854 if (fn && !fn(job)) { 855 block_job_unlock(); 856 return; 857 } 858 859 assert(!job->deferred_to_main_loop); 860 timer_del(&job->sleep_timer); 861 job->busy = true; 862 block_job_unlock(); 863 aio_co_wake(job->co); 864 } 865 866 void block_job_enter(BlockJob *job) 867 { 868 block_job_enter_cond(job, NULL); 869 } 870 871 bool block_job_is_cancelled(BlockJob *job) 872 { 873 return job->cancelled; 874 } 875 876 void block_job_sleep_ns(BlockJob *job, int64_t ns) 877 { 878 assert(job->busy); 879 880 /* Check cancellation *before* setting busy = false, too! */ 881 if (block_job_is_cancelled(job)) { 882 return; 883 } 884 885 if (!block_job_should_pause(job)) { 886 block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); 887 } 888 889 block_job_pause_point(job); 890 } 891 892 void block_job_yield(BlockJob *job) 893 { 894 assert(job->busy); 895 896 /* Check cancellation *before* setting busy = false, too! */ 897 if (block_job_is_cancelled(job)) { 898 return; 899 } 900 901 if (!block_job_should_pause(job)) { 902 block_job_do_yield(job, -1); 903 } 904 905 block_job_pause_point(job); 906 } 907 908 void block_job_iostatus_reset(BlockJob *job) 909 { 910 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 911 return; 912 } 913 assert(job->user_paused && job->pause_count > 0); 914 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 915 } 916 917 void block_job_event_ready(BlockJob *job) 918 { 919 job->ready = true; 920 921 if (block_job_is_internal(job)) { 922 return; 923 } 924 925 qapi_event_send_block_job_ready(job->driver->job_type, 926 job->id, 927 job->len, 928 job->offset, 929 job->speed, &error_abort); 930 } 931 932 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, 933 int is_read, int error) 934 { 935 BlockErrorAction action; 936 937 switch (on_err) { 938 case BLOCKDEV_ON_ERROR_ENOSPC: 939 case BLOCKDEV_ON_ERROR_AUTO: 940 action = (error == ENOSPC) ? 941 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 942 break; 943 case BLOCKDEV_ON_ERROR_STOP: 944 action = BLOCK_ERROR_ACTION_STOP; 945 break; 946 case BLOCKDEV_ON_ERROR_REPORT: 947 action = BLOCK_ERROR_ACTION_REPORT; 948 break; 949 case BLOCKDEV_ON_ERROR_IGNORE: 950 action = BLOCK_ERROR_ACTION_IGNORE; 951 break; 952 default: 953 abort(); 954 } 955 if (!block_job_is_internal(job)) { 956 qapi_event_send_block_job_error(job->id, 957 is_read ? IO_OPERATION_TYPE_READ : 958 IO_OPERATION_TYPE_WRITE, 959 action, &error_abort); 960 } 961 if (action == BLOCK_ERROR_ACTION_STOP) { 962 /* make the pause user visible, which will be resumed from QMP. */ 963 block_job_user_pause(job); 964 block_job_iostatus_set_err(job, error); 965 } 966 return action; 967 } 968 969 typedef struct { 970 BlockJob *job; 971 AioContext *aio_context; 972 BlockJobDeferToMainLoopFn *fn; 973 void *opaque; 974 } BlockJobDeferToMainLoopData; 975 976 static void block_job_defer_to_main_loop_bh(void *opaque) 977 { 978 BlockJobDeferToMainLoopData *data = opaque; 979 AioContext *aio_context; 980 981 /* Prevent race with block_job_defer_to_main_loop() */ 982 aio_context_acquire(data->aio_context); 983 984 /* Fetch BDS AioContext again, in case it has changed */ 985 aio_context = blk_get_aio_context(data->job->blk); 986 if (aio_context != data->aio_context) { 987 aio_context_acquire(aio_context); 988 } 989 990 data->fn(data->job, data->opaque); 991 992 if (aio_context != data->aio_context) { 993 aio_context_release(aio_context); 994 } 995 996 aio_context_release(data->aio_context); 997 998 g_free(data); 999 } 1000 1001 void block_job_defer_to_main_loop(BlockJob *job, 1002 BlockJobDeferToMainLoopFn *fn, 1003 void *opaque) 1004 { 1005 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data)); 1006 data->job = job; 1007 data->aio_context = blk_get_aio_context(job->blk); 1008 data->fn = fn; 1009 data->opaque = opaque; 1010 job->deferred_to_main_loop = true; 1011 1012 aio_bh_schedule_oneshot(qemu_get_aio_context(), 1013 block_job_defer_to_main_loop_bh, data); 1014 } 1015