1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2011 IBM Corp. 5 * Copyright (c) 2012 Red Hat, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qemu-common.h" 28 #include "block/block.h" 29 #include "block/blockjob_int.h" 30 #include "block/block_int.h" 31 #include "sysemu/block-backend.h" 32 #include "qapi/qmp/qerror.h" 33 #include "qapi/qmp/qjson.h" 34 #include "qemu/coroutine.h" 35 #include "qemu/id.h" 36 #include "qmp-commands.h" 37 #include "qemu/timer.h" 38 #include "qapi-event.h" 39 40 static void block_job_event_cancelled(BlockJob *job); 41 static void block_job_event_completed(BlockJob *job, const char *msg); 42 43 /* Transactional group of block jobs */ 44 struct BlockJobTxn { 45 46 /* Is this txn being cancelled? */ 47 bool aborting; 48 49 /* List of jobs */ 50 QLIST_HEAD(, BlockJob) jobs; 51 52 /* Reference count */ 53 int refcnt; 54 }; 55 56 static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs); 57 58 /* 59 * The block job API is composed of two categories of functions. 60 * 61 * The first includes functions used by the monitor. The monitor is 62 * peculiar in that it accesses the block job list with block_job_get, and 63 * therefore needs consistency across block_job_get and the actual operation 64 * (e.g. block_job_set_speed). The consistency is achieved with 65 * aio_context_acquire/release. These functions are declared in blockjob.h. 66 * 67 * The second includes functions used by the block job drivers and sometimes 68 * by the core block layer. These do not care about locking, because the 69 * whole coroutine runs under the AioContext lock, and are declared in 70 * blockjob_int.h. 71 */ 72 73 BlockJob *block_job_next(BlockJob *job) 74 { 75 if (!job) { 76 return QLIST_FIRST(&block_jobs); 77 } 78 return QLIST_NEXT(job, job_list); 79 } 80 81 BlockJob *block_job_get(const char *id) 82 { 83 BlockJob *job; 84 85 QLIST_FOREACH(job, &block_jobs, job_list) { 86 if (job->id && !strcmp(id, job->id)) { 87 return job; 88 } 89 } 90 91 return NULL; 92 } 93 94 BlockJobTxn *block_job_txn_new(void) 95 { 96 BlockJobTxn *txn = g_new0(BlockJobTxn, 1); 97 QLIST_INIT(&txn->jobs); 98 txn->refcnt = 1; 99 return txn; 100 } 101 102 static void block_job_txn_ref(BlockJobTxn *txn) 103 { 104 txn->refcnt++; 105 } 106 107 void block_job_txn_unref(BlockJobTxn *txn) 108 { 109 if (txn && --txn->refcnt == 0) { 110 g_free(txn); 111 } 112 } 113 114 void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job) 115 { 116 if (!txn) { 117 return; 118 } 119 120 assert(!job->txn); 121 job->txn = txn; 122 123 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); 124 block_job_txn_ref(txn); 125 } 126 127 static void block_job_pause(BlockJob *job) 128 { 129 job->pause_count++; 130 } 131 132 static void block_job_resume(BlockJob *job) 133 { 134 assert(job->pause_count > 0); 135 job->pause_count--; 136 if (job->pause_count) { 137 return; 138 } 139 block_job_enter(job); 140 } 141 142 void block_job_ref(BlockJob *job) 143 { 144 ++job->refcnt; 145 } 146 147 static void block_job_attached_aio_context(AioContext *new_context, 148 void *opaque); 149 static void block_job_detach_aio_context(void *opaque); 150 151 void block_job_unref(BlockJob *job) 152 { 153 if (--job->refcnt == 0) { 154 BlockDriverState *bs = blk_bs(job->blk); 155 bs->job = NULL; 156 block_job_remove_all_bdrv(job); 157 blk_remove_aio_context_notifier(job->blk, 158 block_job_attached_aio_context, 159 block_job_detach_aio_context, job); 160 blk_unref(job->blk); 161 error_free(job->blocker); 162 g_free(job->id); 163 QLIST_REMOVE(job, job_list); 164 g_free(job); 165 } 166 } 167 168 static void block_job_attached_aio_context(AioContext *new_context, 169 void *opaque) 170 { 171 BlockJob *job = opaque; 172 173 if (job->driver->attached_aio_context) { 174 job->driver->attached_aio_context(job, new_context); 175 } 176 177 block_job_resume(job); 178 } 179 180 static void block_job_drain(BlockJob *job) 181 { 182 /* If job is !job->busy this kicks it into the next pause point. */ 183 block_job_enter(job); 184 185 blk_drain(job->blk); 186 if (job->driver->drain) { 187 job->driver->drain(job); 188 } 189 } 190 191 static void block_job_detach_aio_context(void *opaque) 192 { 193 BlockJob *job = opaque; 194 195 /* In case the job terminates during aio_poll()... */ 196 block_job_ref(job); 197 198 block_job_pause(job); 199 200 while (!job->paused && !job->completed) { 201 block_job_drain(job); 202 } 203 204 block_job_unref(job); 205 } 206 207 static char *child_job_get_parent_desc(BdrvChild *c) 208 { 209 BlockJob *job = c->opaque; 210 return g_strdup_printf("%s job '%s'", 211 BlockJobType_lookup[job->driver->job_type], 212 job->id); 213 } 214 215 static const BdrvChildRole child_job = { 216 .get_parent_desc = child_job_get_parent_desc, 217 .stay_at_node = true, 218 }; 219 220 static void block_job_drained_begin(void *opaque) 221 { 222 BlockJob *job = opaque; 223 block_job_pause(job); 224 } 225 226 static void block_job_drained_end(void *opaque) 227 { 228 BlockJob *job = opaque; 229 block_job_resume(job); 230 } 231 232 static const BlockDevOps block_job_dev_ops = { 233 .drained_begin = block_job_drained_begin, 234 .drained_end = block_job_drained_end, 235 }; 236 237 void block_job_remove_all_bdrv(BlockJob *job) 238 { 239 GSList *l; 240 for (l = job->nodes; l; l = l->next) { 241 BdrvChild *c = l->data; 242 bdrv_op_unblock_all(c->bs, job->blocker); 243 bdrv_root_unref_child(c); 244 } 245 g_slist_free(job->nodes); 246 job->nodes = NULL; 247 } 248 249 int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, 250 uint64_t perm, uint64_t shared_perm, Error **errp) 251 { 252 BdrvChild *c; 253 254 c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm, 255 job, errp); 256 if (c == NULL) { 257 return -EPERM; 258 } 259 260 job->nodes = g_slist_prepend(job->nodes, c); 261 bdrv_ref(bs); 262 bdrv_op_block_all(bs, job->blocker); 263 264 return 0; 265 } 266 267 bool block_job_is_internal(BlockJob *job) 268 { 269 return (job->id == NULL); 270 } 271 272 static bool block_job_started(BlockJob *job) 273 { 274 return job->co; 275 } 276 277 /** 278 * All jobs must allow a pause point before entering their job proper. This 279 * ensures that jobs can be paused prior to being started, then resumed later. 280 */ 281 static void coroutine_fn block_job_co_entry(void *opaque) 282 { 283 BlockJob *job = opaque; 284 285 assert(job && job->driver && job->driver->start); 286 block_job_pause_point(job); 287 job->driver->start(job); 288 } 289 290 void block_job_start(BlockJob *job) 291 { 292 assert(job && !block_job_started(job) && job->paused && 293 job->driver && job->driver->start); 294 job->co = qemu_coroutine_create(block_job_co_entry, job); 295 job->pause_count--; 296 job->busy = true; 297 job->paused = false; 298 bdrv_coroutine_enter(blk_bs(job->blk), job->co); 299 } 300 301 static void block_job_completed_single(BlockJob *job) 302 { 303 assert(job->completed); 304 305 if (!job->ret) { 306 if (job->driver->commit) { 307 job->driver->commit(job); 308 } 309 } else { 310 if (job->driver->abort) { 311 job->driver->abort(job); 312 } 313 } 314 if (job->driver->clean) { 315 job->driver->clean(job); 316 } 317 318 if (job->cb) { 319 job->cb(job->opaque, job->ret); 320 } 321 322 /* Emit events only if we actually started */ 323 if (block_job_started(job)) { 324 if (block_job_is_cancelled(job)) { 325 block_job_event_cancelled(job); 326 } else { 327 const char *msg = NULL; 328 if (job->ret < 0) { 329 msg = strerror(-job->ret); 330 } 331 block_job_event_completed(job, msg); 332 } 333 } 334 335 if (job->txn) { 336 QLIST_REMOVE(job, txn_list); 337 block_job_txn_unref(job->txn); 338 } 339 block_job_unref(job); 340 } 341 342 static void block_job_cancel_async(BlockJob *job) 343 { 344 if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) { 345 block_job_iostatus_reset(job); 346 } 347 if (job->user_paused) { 348 /* Do not call block_job_enter here, the caller will handle it. */ 349 job->user_paused = false; 350 job->pause_count--; 351 } 352 job->cancelled = true; 353 } 354 355 static int block_job_finish_sync(BlockJob *job, 356 void (*finish)(BlockJob *, Error **errp), 357 Error **errp) 358 { 359 Error *local_err = NULL; 360 int ret; 361 362 assert(blk_bs(job->blk)->job == job); 363 364 block_job_ref(job); 365 366 if (finish) { 367 finish(job, &local_err); 368 } 369 if (local_err) { 370 error_propagate(errp, local_err); 371 block_job_unref(job); 372 return -EBUSY; 373 } 374 /* block_job_drain calls block_job_enter, and it should be enough to 375 * induce progress until the job completes or moves to the main thread. 376 */ 377 while (!job->deferred_to_main_loop && !job->completed) { 378 block_job_drain(job); 379 } 380 while (!job->completed) { 381 aio_poll(qemu_get_aio_context(), true); 382 } 383 ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret; 384 block_job_unref(job); 385 return ret; 386 } 387 388 static void block_job_completed_txn_abort(BlockJob *job) 389 { 390 AioContext *ctx; 391 BlockJobTxn *txn = job->txn; 392 BlockJob *other_job; 393 394 if (txn->aborting) { 395 /* 396 * We are cancelled by another job, which will handle everything. 397 */ 398 return; 399 } 400 txn->aborting = true; 401 block_job_txn_ref(txn); 402 403 /* We are the first failed job. Cancel other jobs. */ 404 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 405 ctx = blk_get_aio_context(other_job->blk); 406 aio_context_acquire(ctx); 407 } 408 409 /* Other jobs are effectively cancelled by us, set the status for 410 * them; this job, however, may or may not be cancelled, depending 411 * on the caller, so leave it. */ 412 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 413 if (other_job != job) { 414 block_job_cancel_async(other_job); 415 } 416 } 417 while (!QLIST_EMPTY(&txn->jobs)) { 418 other_job = QLIST_FIRST(&txn->jobs); 419 ctx = blk_get_aio_context(other_job->blk); 420 if (!other_job->completed) { 421 assert(other_job->cancelled); 422 block_job_finish_sync(other_job, NULL, NULL); 423 } 424 block_job_completed_single(other_job); 425 aio_context_release(ctx); 426 } 427 428 block_job_txn_unref(txn); 429 } 430 431 static void block_job_completed_txn_success(BlockJob *job) 432 { 433 AioContext *ctx; 434 BlockJobTxn *txn = job->txn; 435 BlockJob *other_job, *next; 436 /* 437 * Successful completion, see if there are other running jobs in this 438 * txn. 439 */ 440 QLIST_FOREACH(other_job, &txn->jobs, txn_list) { 441 if (!other_job->completed) { 442 return; 443 } 444 } 445 /* We are the last completed job, commit the transaction. */ 446 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { 447 ctx = blk_get_aio_context(other_job->blk); 448 aio_context_acquire(ctx); 449 assert(other_job->ret == 0); 450 block_job_completed_single(other_job); 451 aio_context_release(ctx); 452 } 453 } 454 455 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) 456 { 457 Error *local_err = NULL; 458 459 if (!job->driver->set_speed) { 460 error_setg(errp, QERR_UNSUPPORTED); 461 return; 462 } 463 job->driver->set_speed(job, speed, &local_err); 464 if (local_err) { 465 error_propagate(errp, local_err); 466 return; 467 } 468 469 job->speed = speed; 470 } 471 472 void block_job_complete(BlockJob *job, Error **errp) 473 { 474 /* Should not be reachable via external interface for internal jobs */ 475 assert(job->id); 476 if (job->pause_count || job->cancelled || 477 !block_job_started(job) || !job->driver->complete) { 478 error_setg(errp, "The active block job '%s' cannot be completed", 479 job->id); 480 return; 481 } 482 483 job->driver->complete(job, errp); 484 } 485 486 void block_job_user_pause(BlockJob *job) 487 { 488 job->user_paused = true; 489 block_job_pause(job); 490 } 491 492 bool block_job_user_paused(BlockJob *job) 493 { 494 return job->user_paused; 495 } 496 497 void block_job_user_resume(BlockJob *job) 498 { 499 if (job && job->user_paused && job->pause_count > 0) { 500 block_job_iostatus_reset(job); 501 job->user_paused = false; 502 block_job_resume(job); 503 } 504 } 505 506 void block_job_cancel(BlockJob *job) 507 { 508 if (block_job_started(job)) { 509 block_job_cancel_async(job); 510 block_job_enter(job); 511 } else { 512 block_job_completed(job, -ECANCELED); 513 } 514 } 515 516 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be 517 * used with block_job_finish_sync() without the need for (rather nasty) 518 * function pointer casts there. */ 519 static void block_job_cancel_err(BlockJob *job, Error **errp) 520 { 521 block_job_cancel(job); 522 } 523 524 int block_job_cancel_sync(BlockJob *job) 525 { 526 return block_job_finish_sync(job, &block_job_cancel_err, NULL); 527 } 528 529 void block_job_cancel_sync_all(void) 530 { 531 BlockJob *job; 532 AioContext *aio_context; 533 534 while ((job = QLIST_FIRST(&block_jobs))) { 535 aio_context = blk_get_aio_context(job->blk); 536 aio_context_acquire(aio_context); 537 block_job_cancel_sync(job); 538 aio_context_release(aio_context); 539 } 540 } 541 542 int block_job_complete_sync(BlockJob *job, Error **errp) 543 { 544 return block_job_finish_sync(job, &block_job_complete, errp); 545 } 546 547 BlockJobInfo *block_job_query(BlockJob *job, Error **errp) 548 { 549 BlockJobInfo *info; 550 551 if (block_job_is_internal(job)) { 552 error_setg(errp, "Cannot query QEMU internal jobs"); 553 return NULL; 554 } 555 info = g_new0(BlockJobInfo, 1); 556 info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]); 557 info->device = g_strdup(job->id); 558 info->len = job->len; 559 info->busy = job->busy; 560 info->paused = job->pause_count > 0; 561 info->offset = job->offset; 562 info->speed = job->speed; 563 info->io_status = job->iostatus; 564 info->ready = job->ready; 565 return info; 566 } 567 568 static void block_job_iostatus_set_err(BlockJob *job, int error) 569 { 570 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 571 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 572 BLOCK_DEVICE_IO_STATUS_FAILED; 573 } 574 } 575 576 static void block_job_event_cancelled(BlockJob *job) 577 { 578 if (block_job_is_internal(job)) { 579 return; 580 } 581 582 qapi_event_send_block_job_cancelled(job->driver->job_type, 583 job->id, 584 job->len, 585 job->offset, 586 job->speed, 587 &error_abort); 588 } 589 590 static void block_job_event_completed(BlockJob *job, const char *msg) 591 { 592 if (block_job_is_internal(job)) { 593 return; 594 } 595 596 qapi_event_send_block_job_completed(job->driver->job_type, 597 job->id, 598 job->len, 599 job->offset, 600 job->speed, 601 !!msg, 602 msg, 603 &error_abort); 604 } 605 606 /* 607 * API for block job drivers and the block layer. These functions are 608 * declared in blockjob_int.h. 609 */ 610 611 void *block_job_create(const char *job_id, const BlockJobDriver *driver, 612 BlockDriverState *bs, uint64_t perm, 613 uint64_t shared_perm, int64_t speed, int flags, 614 BlockCompletionFunc *cb, void *opaque, Error **errp) 615 { 616 BlockBackend *blk; 617 BlockJob *job; 618 int ret; 619 620 if (bs->job) { 621 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); 622 return NULL; 623 } 624 625 if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) { 626 job_id = bdrv_get_device_name(bs); 627 if (!*job_id) { 628 error_setg(errp, "An explicit job ID is required for this node"); 629 return NULL; 630 } 631 } 632 633 if (job_id) { 634 if (flags & BLOCK_JOB_INTERNAL) { 635 error_setg(errp, "Cannot specify job ID for internal block job"); 636 return NULL; 637 } 638 639 if (!id_wellformed(job_id)) { 640 error_setg(errp, "Invalid job ID '%s'", job_id); 641 return NULL; 642 } 643 644 if (block_job_get(job_id)) { 645 error_setg(errp, "Job ID '%s' already in use", job_id); 646 return NULL; 647 } 648 } 649 650 blk = blk_new(perm, shared_perm); 651 ret = blk_insert_bs(blk, bs, errp); 652 if (ret < 0) { 653 blk_unref(blk); 654 return NULL; 655 } 656 657 job = g_malloc0(driver->instance_size); 658 job->driver = driver; 659 job->id = g_strdup(job_id); 660 job->blk = blk; 661 job->cb = cb; 662 job->opaque = opaque; 663 job->busy = false; 664 job->paused = true; 665 job->pause_count = 1; 666 job->refcnt = 1; 667 668 error_setg(&job->blocker, "block device is in use by block job: %s", 669 BlockJobType_lookup[driver->job_type]); 670 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort); 671 bs->job = job; 672 673 blk_set_dev_ops(blk, &block_job_dev_ops, job); 674 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); 675 676 QLIST_INSERT_HEAD(&block_jobs, job, job_list); 677 678 blk_add_aio_context_notifier(blk, block_job_attached_aio_context, 679 block_job_detach_aio_context, job); 680 681 /* Only set speed when necessary to avoid NotSupported error */ 682 if (speed != 0) { 683 Error *local_err = NULL; 684 685 block_job_set_speed(job, speed, &local_err); 686 if (local_err) { 687 block_job_unref(job); 688 error_propagate(errp, local_err); 689 return NULL; 690 } 691 } 692 return job; 693 } 694 695 void block_job_pause_all(void) 696 { 697 BlockJob *job = NULL; 698 while ((job = block_job_next(job))) { 699 AioContext *aio_context = blk_get_aio_context(job->blk); 700 701 aio_context_acquire(aio_context); 702 block_job_pause(job); 703 aio_context_release(aio_context); 704 } 705 } 706 707 void block_job_early_fail(BlockJob *job) 708 { 709 block_job_unref(job); 710 } 711 712 void block_job_completed(BlockJob *job, int ret) 713 { 714 assert(blk_bs(job->blk)->job == job); 715 assert(!job->completed); 716 job->completed = true; 717 job->ret = ret; 718 if (!job->txn) { 719 block_job_completed_single(job); 720 } else if (ret < 0 || block_job_is_cancelled(job)) { 721 block_job_completed_txn_abort(job); 722 } else { 723 block_job_completed_txn_success(job); 724 } 725 } 726 727 static bool block_job_should_pause(BlockJob *job) 728 { 729 return job->pause_count > 0; 730 } 731 732 void coroutine_fn block_job_pause_point(BlockJob *job) 733 { 734 assert(job && block_job_started(job)); 735 736 if (!block_job_should_pause(job)) { 737 return; 738 } 739 if (block_job_is_cancelled(job)) { 740 return; 741 } 742 743 if (job->driver->pause) { 744 job->driver->pause(job); 745 } 746 747 if (block_job_should_pause(job) && !block_job_is_cancelled(job)) { 748 job->paused = true; 749 job->busy = false; 750 qemu_coroutine_yield(); /* wait for block_job_resume() */ 751 job->busy = true; 752 job->paused = false; 753 } 754 755 if (job->driver->resume) { 756 job->driver->resume(job); 757 } 758 } 759 760 void block_job_resume_all(void) 761 { 762 BlockJob *job = NULL; 763 while ((job = block_job_next(job))) { 764 AioContext *aio_context = blk_get_aio_context(job->blk); 765 766 aio_context_acquire(aio_context); 767 block_job_resume(job); 768 aio_context_release(aio_context); 769 } 770 } 771 772 void block_job_enter(BlockJob *job) 773 { 774 if (!block_job_started(job)) { 775 return; 776 } 777 if (job->deferred_to_main_loop) { 778 return; 779 } 780 781 if (!job->busy) { 782 bdrv_coroutine_enter(blk_bs(job->blk), job->co); 783 } 784 } 785 786 bool block_job_is_cancelled(BlockJob *job) 787 { 788 return job->cancelled; 789 } 790 791 void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns) 792 { 793 assert(job->busy); 794 795 /* Check cancellation *before* setting busy = false, too! */ 796 if (block_job_is_cancelled(job)) { 797 return; 798 } 799 800 job->busy = false; 801 if (!block_job_should_pause(job)) { 802 co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns); 803 } 804 job->busy = true; 805 806 block_job_pause_point(job); 807 } 808 809 void block_job_yield(BlockJob *job) 810 { 811 assert(job->busy); 812 813 /* Check cancellation *before* setting busy = false, too! */ 814 if (block_job_is_cancelled(job)) { 815 return; 816 } 817 818 job->busy = false; 819 if (!block_job_should_pause(job)) { 820 qemu_coroutine_yield(); 821 } 822 job->busy = true; 823 824 block_job_pause_point(job); 825 } 826 827 void block_job_iostatus_reset(BlockJob *job) 828 { 829 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 830 return; 831 } 832 assert(job->user_paused && job->pause_count > 0); 833 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 834 } 835 836 void block_job_event_ready(BlockJob *job) 837 { 838 job->ready = true; 839 840 if (block_job_is_internal(job)) { 841 return; 842 } 843 844 qapi_event_send_block_job_ready(job->driver->job_type, 845 job->id, 846 job->len, 847 job->offset, 848 job->speed, &error_abort); 849 } 850 851 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, 852 int is_read, int error) 853 { 854 BlockErrorAction action; 855 856 switch (on_err) { 857 case BLOCKDEV_ON_ERROR_ENOSPC: 858 case BLOCKDEV_ON_ERROR_AUTO: 859 action = (error == ENOSPC) ? 860 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 861 break; 862 case BLOCKDEV_ON_ERROR_STOP: 863 action = BLOCK_ERROR_ACTION_STOP; 864 break; 865 case BLOCKDEV_ON_ERROR_REPORT: 866 action = BLOCK_ERROR_ACTION_REPORT; 867 break; 868 case BLOCKDEV_ON_ERROR_IGNORE: 869 action = BLOCK_ERROR_ACTION_IGNORE; 870 break; 871 default: 872 abort(); 873 } 874 if (!block_job_is_internal(job)) { 875 qapi_event_send_block_job_error(job->id, 876 is_read ? IO_OPERATION_TYPE_READ : 877 IO_OPERATION_TYPE_WRITE, 878 action, &error_abort); 879 } 880 if (action == BLOCK_ERROR_ACTION_STOP) { 881 /* make the pause user visible, which will be resumed from QMP. */ 882 block_job_user_pause(job); 883 block_job_iostatus_set_err(job, error); 884 } 885 return action; 886 } 887 888 typedef struct { 889 BlockJob *job; 890 AioContext *aio_context; 891 BlockJobDeferToMainLoopFn *fn; 892 void *opaque; 893 } BlockJobDeferToMainLoopData; 894 895 static void block_job_defer_to_main_loop_bh(void *opaque) 896 { 897 BlockJobDeferToMainLoopData *data = opaque; 898 AioContext *aio_context; 899 900 /* Prevent race with block_job_defer_to_main_loop() */ 901 aio_context_acquire(data->aio_context); 902 903 /* Fetch BDS AioContext again, in case it has changed */ 904 aio_context = blk_get_aio_context(data->job->blk); 905 if (aio_context != data->aio_context) { 906 aio_context_acquire(aio_context); 907 } 908 909 data->fn(data->job, data->opaque); 910 911 if (aio_context != data->aio_context) { 912 aio_context_release(aio_context); 913 } 914 915 aio_context_release(data->aio_context); 916 917 g_free(data); 918 } 919 920 void block_job_defer_to_main_loop(BlockJob *job, 921 BlockJobDeferToMainLoopFn *fn, 922 void *opaque) 923 { 924 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data)); 925 data->job = job; 926 data->aio_context = blk_get_aio_context(job->blk); 927 data->fn = fn; 928 data->opaque = opaque; 929 job->deferred_to_main_loop = true; 930 931 aio_bh_schedule_oneshot(qemu_get_aio_context(), 932 block_job_defer_to_main_loop_bh, data); 933 } 934