1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "block/write-threshold.h" 34 #include "qemu/cutils.h" 35 #include "qapi/error.h" 36 #include "qemu/error-report.h" 37 #include "qemu/main-loop.h" 38 #include "sysemu/replay.h" 39 40 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 41 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 42 43 static void bdrv_parent_cb_resize(BlockDriverState *bs); 44 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 45 int64_t offset, int64_t bytes, BdrvRequestFlags flags); 46 47 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 48 bool ignore_bds_parents) 49 { 50 BdrvChild *c, *next; 51 52 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 53 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 54 continue; 55 } 56 bdrv_parent_drained_begin_single(c, false); 57 } 58 } 59 60 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, 61 int *drained_end_counter) 62 { 63 assert(c->parent_quiesce_counter > 0); 64 c->parent_quiesce_counter--; 65 if (c->klass->drained_end) { 66 c->klass->drained_end(c, drained_end_counter); 67 } 68 } 69 70 void bdrv_parent_drained_end_single(BdrvChild *c) 71 { 72 int drained_end_counter = 0; 73 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); 74 BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); 75 } 76 77 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 78 bool ignore_bds_parents, 79 int *drained_end_counter) 80 { 81 BdrvChild *c; 82 83 QLIST_FOREACH(c, &bs->parents, next_parent) { 84 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 85 continue; 86 } 87 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter); 88 } 89 } 90 91 static bool bdrv_parent_drained_poll_single(BdrvChild *c) 92 { 93 if (c->klass->drained_poll) { 94 return c->klass->drained_poll(c); 95 } 96 return false; 97 } 98 99 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 100 bool ignore_bds_parents) 101 { 102 BdrvChild *c, *next; 103 bool busy = false; 104 105 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 106 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 107 continue; 108 } 109 busy |= bdrv_parent_drained_poll_single(c); 110 } 111 112 return busy; 113 } 114 115 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) 116 { 117 c->parent_quiesce_counter++; 118 if (c->klass->drained_begin) { 119 c->klass->drained_begin(c); 120 } 121 if (poll) { 122 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); 123 } 124 } 125 126 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 127 { 128 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 129 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 130 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 131 src->opt_mem_alignment); 132 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 133 src->min_mem_alignment); 134 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 135 } 136 137 typedef struct BdrvRefreshLimitsState { 138 BlockDriverState *bs; 139 BlockLimits old_bl; 140 } BdrvRefreshLimitsState; 141 142 static void bdrv_refresh_limits_abort(void *opaque) 143 { 144 BdrvRefreshLimitsState *s = opaque; 145 146 s->bs->bl = s->old_bl; 147 } 148 149 static TransactionActionDrv bdrv_refresh_limits_drv = { 150 .abort = bdrv_refresh_limits_abort, 151 .clean = g_free, 152 }; 153 154 /* @tran is allowed to be NULL, in this case no rollback is possible. */ 155 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) 156 { 157 ERRP_GUARD(); 158 BlockDriver *drv = bs->drv; 159 BdrvChild *c; 160 bool have_limits; 161 162 if (tran) { 163 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); 164 *s = (BdrvRefreshLimitsState) { 165 .bs = bs, 166 .old_bl = bs->bl, 167 }; 168 tran_add(tran, &bdrv_refresh_limits_drv, s); 169 } 170 171 memset(&bs->bl, 0, sizeof(bs->bl)); 172 173 if (!drv) { 174 return; 175 } 176 177 /* Default alignment based on whether driver has byte interface */ 178 bs->bl.request_alignment = (drv->bdrv_co_preadv || 179 drv->bdrv_aio_preadv || 180 drv->bdrv_co_preadv_part) ? 1 : 512; 181 182 /* Take some limits from the children as a default */ 183 have_limits = false; 184 QLIST_FOREACH(c, &bs->children, next) { 185 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 186 { 187 bdrv_refresh_limits(c->bs, tran, errp); 188 if (*errp) { 189 return; 190 } 191 bdrv_merge_limits(&bs->bl, &c->bs->bl); 192 have_limits = true; 193 } 194 } 195 196 if (!have_limits) { 197 bs->bl.min_mem_alignment = 512; 198 bs->bl.opt_mem_alignment = qemu_real_host_page_size; 199 200 /* Safe default since most protocols use readv()/writev()/etc */ 201 bs->bl.max_iov = IOV_MAX; 202 } 203 204 /* Then let the driver override it */ 205 if (drv->bdrv_refresh_limits) { 206 drv->bdrv_refresh_limits(bs, errp); 207 if (*errp) { 208 return; 209 } 210 } 211 212 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 213 error_setg(errp, "Driver requires too large request alignment"); 214 } 215 } 216 217 /** 218 * The copy-on-read flag is actually a reference count so multiple users may 219 * use the feature without worrying about clobbering its previous state. 220 * Copy-on-read stays enabled until all users have called to disable it. 221 */ 222 void bdrv_enable_copy_on_read(BlockDriverState *bs) 223 { 224 qatomic_inc(&bs->copy_on_read); 225 } 226 227 void bdrv_disable_copy_on_read(BlockDriverState *bs) 228 { 229 int old = qatomic_fetch_dec(&bs->copy_on_read); 230 assert(old >= 1); 231 } 232 233 typedef struct { 234 Coroutine *co; 235 BlockDriverState *bs; 236 bool done; 237 bool begin; 238 bool recursive; 239 bool poll; 240 BdrvChild *parent; 241 bool ignore_bds_parents; 242 int *drained_end_counter; 243 } BdrvCoDrainData; 244 245 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) 246 { 247 BdrvCoDrainData *data = opaque; 248 BlockDriverState *bs = data->bs; 249 250 if (data->begin) { 251 bs->drv->bdrv_co_drain_begin(bs); 252 } else { 253 bs->drv->bdrv_co_drain_end(bs); 254 } 255 256 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ 257 qatomic_mb_set(&data->done, true); 258 if (!data->begin) { 259 qatomic_dec(data->drained_end_counter); 260 } 261 bdrv_dec_in_flight(bs); 262 263 g_free(data); 264 } 265 266 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ 267 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, 268 int *drained_end_counter) 269 { 270 BdrvCoDrainData *data; 271 272 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || 273 (!begin && !bs->drv->bdrv_co_drain_end)) { 274 return; 275 } 276 277 data = g_new(BdrvCoDrainData, 1); 278 *data = (BdrvCoDrainData) { 279 .bs = bs, 280 .done = false, 281 .begin = begin, 282 .drained_end_counter = drained_end_counter, 283 }; 284 285 if (!begin) { 286 qatomic_inc(drained_end_counter); 287 } 288 289 /* Make sure the driver callback completes during the polling phase for 290 * drain_begin. */ 291 bdrv_inc_in_flight(bs); 292 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); 293 aio_co_schedule(bdrv_get_aio_context(bs), data->co); 294 } 295 296 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 297 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 298 BdrvChild *ignore_parent, bool ignore_bds_parents) 299 { 300 BdrvChild *child, *next; 301 302 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 303 return true; 304 } 305 306 if (qatomic_read(&bs->in_flight)) { 307 return true; 308 } 309 310 if (recursive) { 311 assert(!ignore_bds_parents); 312 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 313 if (bdrv_drain_poll(child->bs, recursive, child, false)) { 314 return true; 315 } 316 } 317 } 318 319 return false; 320 } 321 322 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, 323 BdrvChild *ignore_parent) 324 { 325 return bdrv_drain_poll(bs, recursive, ignore_parent, false); 326 } 327 328 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 329 BdrvChild *parent, bool ignore_bds_parents, 330 bool poll); 331 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 332 BdrvChild *parent, bool ignore_bds_parents, 333 int *drained_end_counter); 334 335 static void bdrv_co_drain_bh_cb(void *opaque) 336 { 337 BdrvCoDrainData *data = opaque; 338 Coroutine *co = data->co; 339 BlockDriverState *bs = data->bs; 340 341 if (bs) { 342 AioContext *ctx = bdrv_get_aio_context(bs); 343 aio_context_acquire(ctx); 344 bdrv_dec_in_flight(bs); 345 if (data->begin) { 346 assert(!data->drained_end_counter); 347 bdrv_do_drained_begin(bs, data->recursive, data->parent, 348 data->ignore_bds_parents, data->poll); 349 } else { 350 assert(!data->poll); 351 bdrv_do_drained_end(bs, data->recursive, data->parent, 352 data->ignore_bds_parents, 353 data->drained_end_counter); 354 } 355 aio_context_release(ctx); 356 } else { 357 assert(data->begin); 358 bdrv_drain_all_begin(); 359 } 360 361 data->done = true; 362 aio_co_wake(co); 363 } 364 365 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 366 bool begin, bool recursive, 367 BdrvChild *parent, 368 bool ignore_bds_parents, 369 bool poll, 370 int *drained_end_counter) 371 { 372 BdrvCoDrainData data; 373 Coroutine *self = qemu_coroutine_self(); 374 AioContext *ctx = bdrv_get_aio_context(bs); 375 AioContext *co_ctx = qemu_coroutine_get_aio_context(self); 376 377 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 378 * other coroutines run if they were queued by aio_co_enter(). */ 379 380 assert(qemu_in_coroutine()); 381 data = (BdrvCoDrainData) { 382 .co = self, 383 .bs = bs, 384 .done = false, 385 .begin = begin, 386 .recursive = recursive, 387 .parent = parent, 388 .ignore_bds_parents = ignore_bds_parents, 389 .poll = poll, 390 .drained_end_counter = drained_end_counter, 391 }; 392 393 if (bs) { 394 bdrv_inc_in_flight(bs); 395 } 396 397 /* 398 * Temporarily drop the lock across yield or we would get deadlocks. 399 * bdrv_co_drain_bh_cb() reaquires the lock as needed. 400 * 401 * When we yield below, the lock for the current context will be 402 * released, so if this is actually the lock that protects bs, don't drop 403 * it a second time. 404 */ 405 if (ctx != co_ctx) { 406 aio_context_release(ctx); 407 } 408 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); 409 410 qemu_coroutine_yield(); 411 /* If we are resumed from some other event (such as an aio completion or a 412 * timer callback), it is a bug in the caller that should be fixed. */ 413 assert(data.done); 414 415 /* Reaquire the AioContext of bs if we dropped it */ 416 if (ctx != co_ctx) { 417 aio_context_acquire(ctx); 418 } 419 } 420 421 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 422 BdrvChild *parent, bool ignore_bds_parents) 423 { 424 assert(!qemu_in_coroutine()); 425 426 /* Stop things in parent-to-child order */ 427 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 428 aio_disable_external(bdrv_get_aio_context(bs)); 429 } 430 431 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); 432 bdrv_drain_invoke(bs, true, NULL); 433 } 434 435 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 436 BdrvChild *parent, bool ignore_bds_parents, 437 bool poll) 438 { 439 BdrvChild *child, *next; 440 441 if (qemu_in_coroutine()) { 442 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, 443 poll, NULL); 444 return; 445 } 446 447 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); 448 449 if (recursive) { 450 assert(!ignore_bds_parents); 451 bs->recursive_quiesce_counter++; 452 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 453 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, 454 false); 455 } 456 } 457 458 /* 459 * Wait for drained requests to finish. 460 * 461 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 462 * call is needed so things in this AioContext can make progress even 463 * though we don't return to the main AioContext loop - this automatically 464 * includes other nodes in the same AioContext and therefore all child 465 * nodes. 466 */ 467 if (poll) { 468 assert(!ignore_bds_parents); 469 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); 470 } 471 } 472 473 void bdrv_drained_begin(BlockDriverState *bs) 474 { 475 bdrv_do_drained_begin(bs, false, NULL, false, true); 476 } 477 478 void bdrv_subtree_drained_begin(BlockDriverState *bs) 479 { 480 bdrv_do_drained_begin(bs, true, NULL, false, true); 481 } 482 483 /** 484 * This function does not poll, nor must any of its recursively called 485 * functions. The *drained_end_counter pointee will be incremented 486 * once for every background operation scheduled, and decremented once 487 * the operation settles. Therefore, the pointer must remain valid 488 * until the pointee reaches 0. That implies that whoever sets up the 489 * pointee has to poll until it is 0. 490 * 491 * We use atomic operations to access *drained_end_counter, because 492 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of 493 * @bs may contain nodes in different AioContexts, 494 * (2) bdrv_drain_all_end() uses the same counter for all nodes, 495 * regardless of which AioContext they are in. 496 */ 497 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 498 BdrvChild *parent, bool ignore_bds_parents, 499 int *drained_end_counter) 500 { 501 BdrvChild *child; 502 int old_quiesce_counter; 503 504 assert(drained_end_counter != NULL); 505 506 if (qemu_in_coroutine()) { 507 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, 508 false, drained_end_counter); 509 return; 510 } 511 assert(bs->quiesce_counter > 0); 512 513 /* Re-enable things in child-to-parent order */ 514 bdrv_drain_invoke(bs, false, drained_end_counter); 515 bdrv_parent_drained_end(bs, parent, ignore_bds_parents, 516 drained_end_counter); 517 518 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 519 if (old_quiesce_counter == 1) { 520 aio_enable_external(bdrv_get_aio_context(bs)); 521 } 522 523 if (recursive) { 524 assert(!ignore_bds_parents); 525 bs->recursive_quiesce_counter--; 526 QLIST_FOREACH(child, &bs->children, next) { 527 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents, 528 drained_end_counter); 529 } 530 } 531 } 532 533 void bdrv_drained_end(BlockDriverState *bs) 534 { 535 int drained_end_counter = 0; 536 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); 537 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 538 } 539 540 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) 541 { 542 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); 543 } 544 545 void bdrv_subtree_drained_end(BlockDriverState *bs) 546 { 547 int drained_end_counter = 0; 548 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); 549 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 550 } 551 552 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) 553 { 554 int i; 555 556 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { 557 bdrv_do_drained_begin(child->bs, true, child, false, true); 558 } 559 } 560 561 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) 562 { 563 int drained_end_counter = 0; 564 int i; 565 566 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { 567 bdrv_do_drained_end(child->bs, true, child, false, 568 &drained_end_counter); 569 } 570 571 BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); 572 } 573 574 /* 575 * Wait for pending requests to complete on a single BlockDriverState subtree, 576 * and suspend block driver's internal I/O until next request arrives. 577 * 578 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 579 * AioContext. 580 */ 581 void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 582 { 583 assert(qemu_in_coroutine()); 584 bdrv_drained_begin(bs); 585 bdrv_drained_end(bs); 586 } 587 588 void bdrv_drain(BlockDriverState *bs) 589 { 590 bdrv_drained_begin(bs); 591 bdrv_drained_end(bs); 592 } 593 594 static void bdrv_drain_assert_idle(BlockDriverState *bs) 595 { 596 BdrvChild *child, *next; 597 598 assert(qatomic_read(&bs->in_flight) == 0); 599 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 600 bdrv_drain_assert_idle(child->bs); 601 } 602 } 603 604 unsigned int bdrv_drain_all_count = 0; 605 606 static bool bdrv_drain_all_poll(void) 607 { 608 BlockDriverState *bs = NULL; 609 bool result = false; 610 611 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 612 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 613 while ((bs = bdrv_next_all_states(bs))) { 614 AioContext *aio_context = bdrv_get_aio_context(bs); 615 aio_context_acquire(aio_context); 616 result |= bdrv_drain_poll(bs, false, NULL, true); 617 aio_context_release(aio_context); 618 } 619 620 return result; 621 } 622 623 /* 624 * Wait for pending requests to complete across all BlockDriverStates 625 * 626 * This function does not flush data to disk, use bdrv_flush_all() for that 627 * after calling this function. 628 * 629 * This pauses all block jobs and disables external clients. It must 630 * be paired with bdrv_drain_all_end(). 631 * 632 * NOTE: no new block jobs or BlockDriverStates can be created between 633 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 634 */ 635 void bdrv_drain_all_begin(void) 636 { 637 BlockDriverState *bs = NULL; 638 639 if (qemu_in_coroutine()) { 640 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); 641 return; 642 } 643 644 /* 645 * bdrv queue is managed by record/replay, 646 * waiting for finishing the I/O requests may 647 * be infinite 648 */ 649 if (replay_events_enabled()) { 650 return; 651 } 652 653 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 654 * loop AioContext, so make sure we're in the main context. */ 655 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 656 assert(bdrv_drain_all_count < INT_MAX); 657 bdrv_drain_all_count++; 658 659 /* Quiesce all nodes, without polling in-flight requests yet. The graph 660 * cannot change during this loop. */ 661 while ((bs = bdrv_next_all_states(bs))) { 662 AioContext *aio_context = bdrv_get_aio_context(bs); 663 664 aio_context_acquire(aio_context); 665 bdrv_do_drained_begin(bs, false, NULL, true, false); 666 aio_context_release(aio_context); 667 } 668 669 /* Now poll the in-flight requests */ 670 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 671 672 while ((bs = bdrv_next_all_states(bs))) { 673 bdrv_drain_assert_idle(bs); 674 } 675 } 676 677 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 678 { 679 int drained_end_counter = 0; 680 681 g_assert(bs->quiesce_counter > 0); 682 g_assert(!bs->refcnt); 683 684 while (bs->quiesce_counter) { 685 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 686 } 687 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 688 } 689 690 void bdrv_drain_all_end(void) 691 { 692 BlockDriverState *bs = NULL; 693 int drained_end_counter = 0; 694 695 /* 696 * bdrv queue is managed by record/replay, 697 * waiting for finishing the I/O requests may 698 * be endless 699 */ 700 if (replay_events_enabled()) { 701 return; 702 } 703 704 while ((bs = bdrv_next_all_states(bs))) { 705 AioContext *aio_context = bdrv_get_aio_context(bs); 706 707 aio_context_acquire(aio_context); 708 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 709 aio_context_release(aio_context); 710 } 711 712 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 713 AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0); 714 715 assert(bdrv_drain_all_count > 0); 716 bdrv_drain_all_count--; 717 } 718 719 void bdrv_drain_all(void) 720 { 721 bdrv_drain_all_begin(); 722 bdrv_drain_all_end(); 723 } 724 725 /** 726 * Remove an active request from the tracked requests list 727 * 728 * This function should be called when a tracked request is completing. 729 */ 730 static void tracked_request_end(BdrvTrackedRequest *req) 731 { 732 if (req->serialising) { 733 qatomic_dec(&req->bs->serialising_in_flight); 734 } 735 736 qemu_co_mutex_lock(&req->bs->reqs_lock); 737 QLIST_REMOVE(req, list); 738 qemu_co_queue_restart_all(&req->wait_queue); 739 qemu_co_mutex_unlock(&req->bs->reqs_lock); 740 } 741 742 /** 743 * Add an active request to the tracked requests list 744 */ 745 static void tracked_request_begin(BdrvTrackedRequest *req, 746 BlockDriverState *bs, 747 int64_t offset, 748 int64_t bytes, 749 enum BdrvTrackedRequestType type) 750 { 751 bdrv_check_request(offset, bytes, &error_abort); 752 753 *req = (BdrvTrackedRequest){ 754 .bs = bs, 755 .offset = offset, 756 .bytes = bytes, 757 .type = type, 758 .co = qemu_coroutine_self(), 759 .serialising = false, 760 .overlap_offset = offset, 761 .overlap_bytes = bytes, 762 }; 763 764 qemu_co_queue_init(&req->wait_queue); 765 766 qemu_co_mutex_lock(&bs->reqs_lock); 767 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 768 qemu_co_mutex_unlock(&bs->reqs_lock); 769 } 770 771 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 772 int64_t offset, int64_t bytes) 773 { 774 bdrv_check_request(offset, bytes, &error_abort); 775 776 /* aaaa bbbb */ 777 if (offset >= req->overlap_offset + req->overlap_bytes) { 778 return false; 779 } 780 /* bbbb aaaa */ 781 if (req->overlap_offset >= offset + bytes) { 782 return false; 783 } 784 return true; 785 } 786 787 /* Called with self->bs->reqs_lock held */ 788 static BdrvTrackedRequest * 789 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 790 { 791 BdrvTrackedRequest *req; 792 793 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 794 if (req == self || (!req->serialising && !self->serialising)) { 795 continue; 796 } 797 if (tracked_request_overlaps(req, self->overlap_offset, 798 self->overlap_bytes)) 799 { 800 /* 801 * Hitting this means there was a reentrant request, for 802 * example, a block driver issuing nested requests. This must 803 * never happen since it means deadlock. 804 */ 805 assert(qemu_coroutine_self() != req->co); 806 807 /* 808 * If the request is already (indirectly) waiting for us, or 809 * will wait for us as soon as it wakes up, then just go on 810 * (instead of producing a deadlock in the former case). 811 */ 812 if (!req->waiting_for) { 813 return req; 814 } 815 } 816 } 817 818 return NULL; 819 } 820 821 /* Called with self->bs->reqs_lock held */ 822 static bool coroutine_fn 823 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 824 { 825 BdrvTrackedRequest *req; 826 bool waited = false; 827 828 while ((req = bdrv_find_conflicting_request(self))) { 829 self->waiting_for = req; 830 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 831 self->waiting_for = NULL; 832 waited = true; 833 } 834 835 return waited; 836 } 837 838 /* Called with req->bs->reqs_lock held */ 839 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 840 uint64_t align) 841 { 842 int64_t overlap_offset = req->offset & ~(align - 1); 843 int64_t overlap_bytes = 844 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; 845 846 bdrv_check_request(req->offset, req->bytes, &error_abort); 847 848 if (!req->serialising) { 849 qatomic_inc(&req->bs->serialising_in_flight); 850 req->serialising = true; 851 } 852 853 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 854 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 855 } 856 857 /** 858 * Return the tracked request on @bs for the current coroutine, or 859 * NULL if there is none. 860 */ 861 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 862 { 863 BdrvTrackedRequest *req; 864 Coroutine *self = qemu_coroutine_self(); 865 866 QLIST_FOREACH(req, &bs->tracked_requests, list) { 867 if (req->co == self) { 868 return req; 869 } 870 } 871 872 return NULL; 873 } 874 875 /** 876 * Round a region to cluster boundaries 877 */ 878 void bdrv_round_to_clusters(BlockDriverState *bs, 879 int64_t offset, int64_t bytes, 880 int64_t *cluster_offset, 881 int64_t *cluster_bytes) 882 { 883 BlockDriverInfo bdi; 884 885 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 886 *cluster_offset = offset; 887 *cluster_bytes = bytes; 888 } else { 889 int64_t c = bdi.cluster_size; 890 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 891 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 892 } 893 } 894 895 static int bdrv_get_cluster_size(BlockDriverState *bs) 896 { 897 BlockDriverInfo bdi; 898 int ret; 899 900 ret = bdrv_get_info(bs, &bdi); 901 if (ret < 0 || bdi.cluster_size == 0) { 902 return bs->bl.request_alignment; 903 } else { 904 return bdi.cluster_size; 905 } 906 } 907 908 void bdrv_inc_in_flight(BlockDriverState *bs) 909 { 910 qatomic_inc(&bs->in_flight); 911 } 912 913 void bdrv_wakeup(BlockDriverState *bs) 914 { 915 aio_wait_kick(); 916 } 917 918 void bdrv_dec_in_flight(BlockDriverState *bs) 919 { 920 qatomic_dec(&bs->in_flight); 921 bdrv_wakeup(bs); 922 } 923 924 static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 925 { 926 BlockDriverState *bs = self->bs; 927 bool waited = false; 928 929 if (!qatomic_read(&bs->serialising_in_flight)) { 930 return false; 931 } 932 933 qemu_co_mutex_lock(&bs->reqs_lock); 934 waited = bdrv_wait_serialising_requests_locked(self); 935 qemu_co_mutex_unlock(&bs->reqs_lock); 936 937 return waited; 938 } 939 940 bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 941 uint64_t align) 942 { 943 bool waited; 944 945 qemu_co_mutex_lock(&req->bs->reqs_lock); 946 947 tracked_request_set_serialising(req, align); 948 waited = bdrv_wait_serialising_requests_locked(req); 949 950 qemu_co_mutex_unlock(&req->bs->reqs_lock); 951 952 return waited; 953 } 954 955 static int bdrv_check_qiov_request(int64_t offset, int64_t bytes, 956 QEMUIOVector *qiov, size_t qiov_offset, 957 Error **errp) 958 { 959 /* 960 * Check generic offset/bytes correctness 961 */ 962 963 if (offset < 0) { 964 error_setg(errp, "offset is negative: %" PRIi64, offset); 965 return -EIO; 966 } 967 968 if (bytes < 0) { 969 error_setg(errp, "bytes is negative: %" PRIi64, bytes); 970 return -EIO; 971 } 972 973 if (bytes > BDRV_MAX_LENGTH) { 974 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 975 bytes, BDRV_MAX_LENGTH); 976 return -EIO; 977 } 978 979 if (offset > BDRV_MAX_LENGTH) { 980 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 981 offset, BDRV_MAX_LENGTH); 982 return -EIO; 983 } 984 985 if (offset > BDRV_MAX_LENGTH - bytes) { 986 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") " 987 "exceeds maximum(%" PRIi64 ")", offset, bytes, 988 BDRV_MAX_LENGTH); 989 return -EIO; 990 } 991 992 if (!qiov) { 993 return 0; 994 } 995 996 /* 997 * Check qiov and qiov_offset 998 */ 999 1000 if (qiov_offset > qiov->size) { 1001 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)", 1002 qiov_offset, qiov->size); 1003 return -EIO; 1004 } 1005 1006 if (bytes > qiov->size - qiov_offset) { 1007 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io " 1008 "vector size(%zu)", bytes, qiov_offset, qiov->size); 1009 return -EIO; 1010 } 1011 1012 return 0; 1013 } 1014 1015 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp) 1016 { 1017 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp); 1018 } 1019 1020 static int bdrv_check_request32(int64_t offset, int64_t bytes, 1021 QEMUIOVector *qiov, size_t qiov_offset) 1022 { 1023 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 1024 if (ret < 0) { 1025 return ret; 1026 } 1027 1028 if (bytes > BDRV_REQUEST_MAX_BYTES) { 1029 return -EIO; 1030 } 1031 1032 return 0; 1033 } 1034 1035 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 1036 int64_t bytes, BdrvRequestFlags flags) 1037 { 1038 return bdrv_pwritev(child, offset, bytes, NULL, 1039 BDRV_REQ_ZERO_WRITE | flags); 1040 } 1041 1042 /* 1043 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 1044 * The operation is sped up by checking the block status and only writing 1045 * zeroes to the device if they currently do not return zeroes. Optional 1046 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 1047 * BDRV_REQ_FUA). 1048 * 1049 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 1050 */ 1051 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 1052 { 1053 int ret; 1054 int64_t target_size, bytes, offset = 0; 1055 BlockDriverState *bs = child->bs; 1056 1057 target_size = bdrv_getlength(bs); 1058 if (target_size < 0) { 1059 return target_size; 1060 } 1061 1062 for (;;) { 1063 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 1064 if (bytes <= 0) { 1065 return 0; 1066 } 1067 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 1068 if (ret < 0) { 1069 return ret; 1070 } 1071 if (ret & BDRV_BLOCK_ZERO) { 1072 offset += bytes; 1073 continue; 1074 } 1075 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 1076 if (ret < 0) { 1077 return ret; 1078 } 1079 offset += bytes; 1080 } 1081 } 1082 1083 /* See bdrv_pwrite() for the return codes */ 1084 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes) 1085 { 1086 int ret; 1087 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1088 1089 if (bytes < 0) { 1090 return -EINVAL; 1091 } 1092 1093 ret = bdrv_preadv(child, offset, bytes, &qiov, 0); 1094 1095 return ret < 0 ? ret : bytes; 1096 } 1097 1098 /* Return no. of bytes on success or < 0 on error. Important errors are: 1099 -EIO generic I/O error (may happen for all errors) 1100 -ENOMEDIUM No media inserted. 1101 -EINVAL Invalid offset or number of bytes 1102 -EACCES Trying to write a read-only device 1103 */ 1104 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, 1105 int64_t bytes) 1106 { 1107 int ret; 1108 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1109 1110 if (bytes < 0) { 1111 return -EINVAL; 1112 } 1113 1114 ret = bdrv_pwritev(child, offset, bytes, &qiov, 0); 1115 1116 return ret < 0 ? ret : bytes; 1117 } 1118 1119 /* 1120 * Writes to the file and ensures that no writes are reordered across this 1121 * request (acts as a barrier) 1122 * 1123 * Returns 0 on success, -errno in error cases. 1124 */ 1125 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 1126 const void *buf, int64_t count) 1127 { 1128 int ret; 1129 1130 ret = bdrv_pwrite(child, offset, buf, count); 1131 if (ret < 0) { 1132 return ret; 1133 } 1134 1135 ret = bdrv_flush(child->bs); 1136 if (ret < 0) { 1137 return ret; 1138 } 1139 1140 return 0; 1141 } 1142 1143 typedef struct CoroutineIOCompletion { 1144 Coroutine *coroutine; 1145 int ret; 1146 } CoroutineIOCompletion; 1147 1148 static void bdrv_co_io_em_complete(void *opaque, int ret) 1149 { 1150 CoroutineIOCompletion *co = opaque; 1151 1152 co->ret = ret; 1153 aio_co_wake(co->coroutine); 1154 } 1155 1156 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 1157 int64_t offset, int64_t bytes, 1158 QEMUIOVector *qiov, 1159 size_t qiov_offset, int flags) 1160 { 1161 BlockDriver *drv = bs->drv; 1162 int64_t sector_num; 1163 unsigned int nb_sectors; 1164 QEMUIOVector local_qiov; 1165 int ret; 1166 1167 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1168 assert(!(flags & ~BDRV_REQ_MASK)); 1169 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1170 1171 if (!drv) { 1172 return -ENOMEDIUM; 1173 } 1174 1175 if (drv->bdrv_co_preadv_part) { 1176 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 1177 flags); 1178 } 1179 1180 if (qiov_offset > 0 || bytes != qiov->size) { 1181 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1182 qiov = &local_qiov; 1183 } 1184 1185 if (drv->bdrv_co_preadv) { 1186 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 1187 goto out; 1188 } 1189 1190 if (drv->bdrv_aio_preadv) { 1191 BlockAIOCB *acb; 1192 CoroutineIOCompletion co = { 1193 .coroutine = qemu_coroutine_self(), 1194 }; 1195 1196 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1197 bdrv_co_io_em_complete, &co); 1198 if (acb == NULL) { 1199 ret = -EIO; 1200 goto out; 1201 } else { 1202 qemu_coroutine_yield(); 1203 ret = co.ret; 1204 goto out; 1205 } 1206 } 1207 1208 sector_num = offset >> BDRV_SECTOR_BITS; 1209 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1210 1211 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1212 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1213 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1214 assert(drv->bdrv_co_readv); 1215 1216 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1217 1218 out: 1219 if (qiov == &local_qiov) { 1220 qemu_iovec_destroy(&local_qiov); 1221 } 1222 1223 return ret; 1224 } 1225 1226 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 1227 int64_t offset, int64_t bytes, 1228 QEMUIOVector *qiov, 1229 size_t qiov_offset, int flags) 1230 { 1231 BlockDriver *drv = bs->drv; 1232 int64_t sector_num; 1233 unsigned int nb_sectors; 1234 QEMUIOVector local_qiov; 1235 int ret; 1236 1237 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1238 assert(!(flags & ~BDRV_REQ_MASK)); 1239 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1240 1241 if (!drv) { 1242 return -ENOMEDIUM; 1243 } 1244 1245 if (drv->bdrv_co_pwritev_part) { 1246 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1247 flags & bs->supported_write_flags); 1248 flags &= ~bs->supported_write_flags; 1249 goto emulate_flags; 1250 } 1251 1252 if (qiov_offset > 0 || bytes != qiov->size) { 1253 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1254 qiov = &local_qiov; 1255 } 1256 1257 if (drv->bdrv_co_pwritev) { 1258 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 1259 flags & bs->supported_write_flags); 1260 flags &= ~bs->supported_write_flags; 1261 goto emulate_flags; 1262 } 1263 1264 if (drv->bdrv_aio_pwritev) { 1265 BlockAIOCB *acb; 1266 CoroutineIOCompletion co = { 1267 .coroutine = qemu_coroutine_self(), 1268 }; 1269 1270 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, 1271 flags & bs->supported_write_flags, 1272 bdrv_co_io_em_complete, &co); 1273 flags &= ~bs->supported_write_flags; 1274 if (acb == NULL) { 1275 ret = -EIO; 1276 } else { 1277 qemu_coroutine_yield(); 1278 ret = co.ret; 1279 } 1280 goto emulate_flags; 1281 } 1282 1283 sector_num = offset >> BDRV_SECTOR_BITS; 1284 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1285 1286 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1287 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1288 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1289 1290 assert(drv->bdrv_co_writev); 1291 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, 1292 flags & bs->supported_write_flags); 1293 flags &= ~bs->supported_write_flags; 1294 1295 emulate_flags: 1296 if (ret == 0 && (flags & BDRV_REQ_FUA)) { 1297 ret = bdrv_co_flush(bs); 1298 } 1299 1300 if (qiov == &local_qiov) { 1301 qemu_iovec_destroy(&local_qiov); 1302 } 1303 1304 return ret; 1305 } 1306 1307 static int coroutine_fn 1308 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, 1309 int64_t bytes, QEMUIOVector *qiov, 1310 size_t qiov_offset) 1311 { 1312 BlockDriver *drv = bs->drv; 1313 QEMUIOVector local_qiov; 1314 int ret; 1315 1316 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1317 1318 if (!drv) { 1319 return -ENOMEDIUM; 1320 } 1321 1322 if (!block_driver_can_compress(drv)) { 1323 return -ENOTSUP; 1324 } 1325 1326 if (drv->bdrv_co_pwritev_compressed_part) { 1327 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1328 qiov, qiov_offset); 1329 } 1330 1331 if (qiov_offset == 0) { 1332 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1333 } 1334 1335 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1336 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1337 qemu_iovec_destroy(&local_qiov); 1338 1339 return ret; 1340 } 1341 1342 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, 1343 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1344 size_t qiov_offset, int flags) 1345 { 1346 BlockDriverState *bs = child->bs; 1347 1348 /* Perform I/O through a temporary buffer so that users who scribble over 1349 * their read buffer while the operation is in progress do not end up 1350 * modifying the image file. This is critical for zero-copy guest I/O 1351 * where anything might happen inside guest memory. 1352 */ 1353 void *bounce_buffer = NULL; 1354 1355 BlockDriver *drv = bs->drv; 1356 int64_t cluster_offset; 1357 int64_t cluster_bytes; 1358 int64_t skip_bytes; 1359 int ret; 1360 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1361 BDRV_REQUEST_MAX_BYTES); 1362 int64_t progress = 0; 1363 bool skip_write; 1364 1365 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1366 1367 if (!drv) { 1368 return -ENOMEDIUM; 1369 } 1370 1371 /* 1372 * Do not write anything when the BDS is inactive. That is not 1373 * allowed, and it would not help. 1374 */ 1375 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1376 1377 /* FIXME We cannot require callers to have write permissions when all they 1378 * are doing is a read request. If we did things right, write permissions 1379 * would be obtained anyway, but internally by the copy-on-read code. As 1380 * long as it is implemented here rather than in a separate filter driver, 1381 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1382 * it could request permissions. Therefore we have to bypass the permission 1383 * system for the moment. */ 1384 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1385 1386 /* Cover entire cluster so no additional backing file I/O is required when 1387 * allocating cluster in the image file. Note that this value may exceed 1388 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1389 * is one reason we loop rather than doing it all at once. 1390 */ 1391 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1392 skip_bytes = offset - cluster_offset; 1393 1394 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1395 cluster_offset, cluster_bytes); 1396 1397 while (cluster_bytes) { 1398 int64_t pnum; 1399 1400 if (skip_write) { 1401 ret = 1; /* "already allocated", so nothing will be copied */ 1402 pnum = MIN(cluster_bytes, max_transfer); 1403 } else { 1404 ret = bdrv_is_allocated(bs, cluster_offset, 1405 MIN(cluster_bytes, max_transfer), &pnum); 1406 if (ret < 0) { 1407 /* 1408 * Safe to treat errors in querying allocation as if 1409 * unallocated; we'll probably fail again soon on the 1410 * read, but at least that will set a decent errno. 1411 */ 1412 pnum = MIN(cluster_bytes, max_transfer); 1413 } 1414 1415 /* Stop at EOF if the image ends in the middle of the cluster */ 1416 if (ret == 0 && pnum == 0) { 1417 assert(progress >= bytes); 1418 break; 1419 } 1420 1421 assert(skip_bytes < pnum); 1422 } 1423 1424 if (ret <= 0) { 1425 QEMUIOVector local_qiov; 1426 1427 /* Must copy-on-read; use the bounce buffer */ 1428 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1429 if (!bounce_buffer) { 1430 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1431 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1432 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1433 1434 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1435 if (!bounce_buffer) { 1436 ret = -ENOMEM; 1437 goto err; 1438 } 1439 } 1440 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1441 1442 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1443 &local_qiov, 0, 0); 1444 if (ret < 0) { 1445 goto err; 1446 } 1447 1448 bdrv_debug_event(bs, BLKDBG_COR_WRITE); 1449 if (drv->bdrv_co_pwrite_zeroes && 1450 buffer_is_zero(bounce_buffer, pnum)) { 1451 /* FIXME: Should we (perhaps conditionally) be setting 1452 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1453 * that still correctly reads as zero? */ 1454 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1455 BDRV_REQ_WRITE_UNCHANGED); 1456 } else { 1457 /* This does not change the data on the disk, it is not 1458 * necessary to flush even in cache=writethrough mode. 1459 */ 1460 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1461 &local_qiov, 0, 1462 BDRV_REQ_WRITE_UNCHANGED); 1463 } 1464 1465 if (ret < 0) { 1466 /* It might be okay to ignore write errors for guest 1467 * requests. If this is a deliberate copy-on-read 1468 * then we don't want to ignore the error. Simply 1469 * report it in all cases. 1470 */ 1471 goto err; 1472 } 1473 1474 if (!(flags & BDRV_REQ_PREFETCH)) { 1475 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1476 bounce_buffer + skip_bytes, 1477 MIN(pnum - skip_bytes, bytes - progress)); 1478 } 1479 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1480 /* Read directly into the destination */ 1481 ret = bdrv_driver_preadv(bs, offset + progress, 1482 MIN(pnum - skip_bytes, bytes - progress), 1483 qiov, qiov_offset + progress, 0); 1484 if (ret < 0) { 1485 goto err; 1486 } 1487 } 1488 1489 cluster_offset += pnum; 1490 cluster_bytes -= pnum; 1491 progress += pnum - skip_bytes; 1492 skip_bytes = 0; 1493 } 1494 ret = 0; 1495 1496 err: 1497 qemu_vfree(bounce_buffer); 1498 return ret; 1499 } 1500 1501 /* 1502 * Forwards an already correctly aligned request to the BlockDriver. This 1503 * handles copy on read, zeroing after EOF, and fragmentation of large 1504 * reads; any other features must be implemented by the caller. 1505 */ 1506 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, 1507 BdrvTrackedRequest *req, int64_t offset, int64_t bytes, 1508 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1509 { 1510 BlockDriverState *bs = child->bs; 1511 int64_t total_bytes, max_bytes; 1512 int ret = 0; 1513 int64_t bytes_remaining = bytes; 1514 int max_transfer; 1515 1516 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1517 assert(is_power_of_2(align)); 1518 assert((offset & (align - 1)) == 0); 1519 assert((bytes & (align - 1)) == 0); 1520 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1521 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1522 align); 1523 1524 /* TODO: We would need a per-BDS .supported_read_flags and 1525 * potential fallback support, if we ever implement any read flags 1526 * to pass through to drivers. For now, there aren't any 1527 * passthrough flags. */ 1528 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH))); 1529 1530 /* Handle Copy on Read and associated serialisation */ 1531 if (flags & BDRV_REQ_COPY_ON_READ) { 1532 /* If we touch the same cluster it counts as an overlap. This 1533 * guarantees that allocating writes will be serialized and not race 1534 * with each other for the same cluster. For example, in copy-on-read 1535 * it ensures that the CoR read and write operations are atomic and 1536 * guest writes cannot interleave between them. */ 1537 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1538 } else { 1539 bdrv_wait_serialising_requests(req); 1540 } 1541 1542 if (flags & BDRV_REQ_COPY_ON_READ) { 1543 int64_t pnum; 1544 1545 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */ 1546 flags &= ~BDRV_REQ_COPY_ON_READ; 1547 1548 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1549 if (ret < 0) { 1550 goto out; 1551 } 1552 1553 if (!ret || pnum != bytes) { 1554 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1555 qiov, qiov_offset, flags); 1556 goto out; 1557 } else if (flags & BDRV_REQ_PREFETCH) { 1558 goto out; 1559 } 1560 } 1561 1562 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1563 total_bytes = bdrv_getlength(bs); 1564 if (total_bytes < 0) { 1565 ret = total_bytes; 1566 goto out; 1567 } 1568 1569 assert(!(flags & ~bs->supported_read_flags)); 1570 1571 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1572 if (bytes <= max_bytes && bytes <= max_transfer) { 1573 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); 1574 goto out; 1575 } 1576 1577 while (bytes_remaining) { 1578 int64_t num; 1579 1580 if (max_bytes) { 1581 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1582 assert(num); 1583 1584 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1585 num, qiov, 1586 qiov_offset + bytes - bytes_remaining, 1587 flags); 1588 max_bytes -= num; 1589 } else { 1590 num = bytes_remaining; 1591 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1592 0, bytes_remaining); 1593 } 1594 if (ret < 0) { 1595 goto out; 1596 } 1597 bytes_remaining -= num; 1598 } 1599 1600 out: 1601 return ret < 0 ? ret : 0; 1602 } 1603 1604 /* 1605 * Request padding 1606 * 1607 * |<---- align ----->| |<----- align ---->| 1608 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1609 * | | | | | | 1610 * -*----------$-------*-------- ... --------*-----$------------*--- 1611 * | | | | | | 1612 * | offset | | end | 1613 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1614 * [buf ... ) [tail_buf ) 1615 * 1616 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1617 * is placed at the beginning of @buf and @tail at the @end. 1618 * 1619 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1620 * around tail, if tail exists. 1621 * 1622 * @merge_reads is true for small requests, 1623 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1624 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1625 */ 1626 typedef struct BdrvRequestPadding { 1627 uint8_t *buf; 1628 size_t buf_len; 1629 uint8_t *tail_buf; 1630 size_t head; 1631 size_t tail; 1632 bool merge_reads; 1633 QEMUIOVector local_qiov; 1634 } BdrvRequestPadding; 1635 1636 static bool bdrv_init_padding(BlockDriverState *bs, 1637 int64_t offset, int64_t bytes, 1638 BdrvRequestPadding *pad) 1639 { 1640 int64_t align = bs->bl.request_alignment; 1641 int64_t sum; 1642 1643 bdrv_check_request(offset, bytes, &error_abort); 1644 assert(align <= INT_MAX); /* documented in block/block_int.h */ 1645 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */ 1646 1647 memset(pad, 0, sizeof(*pad)); 1648 1649 pad->head = offset & (align - 1); 1650 pad->tail = ((offset + bytes) & (align - 1)); 1651 if (pad->tail) { 1652 pad->tail = align - pad->tail; 1653 } 1654 1655 if (!pad->head && !pad->tail) { 1656 return false; 1657 } 1658 1659 assert(bytes); /* Nothing good in aligning zero-length requests */ 1660 1661 sum = pad->head + bytes + pad->tail; 1662 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1663 pad->buf = qemu_blockalign(bs, pad->buf_len); 1664 pad->merge_reads = sum == pad->buf_len; 1665 if (pad->tail) { 1666 pad->tail_buf = pad->buf + pad->buf_len - align; 1667 } 1668 1669 return true; 1670 } 1671 1672 static int bdrv_padding_rmw_read(BdrvChild *child, 1673 BdrvTrackedRequest *req, 1674 BdrvRequestPadding *pad, 1675 bool zero_middle) 1676 { 1677 QEMUIOVector local_qiov; 1678 BlockDriverState *bs = child->bs; 1679 uint64_t align = bs->bl.request_alignment; 1680 int ret; 1681 1682 assert(req->serialising && pad->buf); 1683 1684 if (pad->head || pad->merge_reads) { 1685 int64_t bytes = pad->merge_reads ? pad->buf_len : align; 1686 1687 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1688 1689 if (pad->head) { 1690 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1691 } 1692 if (pad->merge_reads && pad->tail) { 1693 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1694 } 1695 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1696 align, &local_qiov, 0, 0); 1697 if (ret < 0) { 1698 return ret; 1699 } 1700 if (pad->head) { 1701 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1702 } 1703 if (pad->merge_reads && pad->tail) { 1704 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1705 } 1706 1707 if (pad->merge_reads) { 1708 goto zero_mem; 1709 } 1710 } 1711 1712 if (pad->tail) { 1713 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1714 1715 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1716 ret = bdrv_aligned_preadv( 1717 child, req, 1718 req->overlap_offset + req->overlap_bytes - align, 1719 align, align, &local_qiov, 0, 0); 1720 if (ret < 0) { 1721 return ret; 1722 } 1723 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1724 } 1725 1726 zero_mem: 1727 if (zero_middle) { 1728 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1729 } 1730 1731 return 0; 1732 } 1733 1734 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1735 { 1736 if (pad->buf) { 1737 qemu_vfree(pad->buf); 1738 qemu_iovec_destroy(&pad->local_qiov); 1739 } 1740 memset(pad, 0, sizeof(*pad)); 1741 } 1742 1743 /* 1744 * bdrv_pad_request 1745 * 1746 * Exchange request parameters with padded request if needed. Don't include RMW 1747 * read of padding, bdrv_padding_rmw_read() should be called separately if 1748 * needed. 1749 * 1750 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: 1751 * - on function start they represent original request 1752 * - on failure or when padding is not needed they are unchanged 1753 * - on success when padding is needed they represent padded request 1754 */ 1755 static int bdrv_pad_request(BlockDriverState *bs, 1756 QEMUIOVector **qiov, size_t *qiov_offset, 1757 int64_t *offset, int64_t *bytes, 1758 BdrvRequestPadding *pad, bool *padded) 1759 { 1760 int ret; 1761 1762 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort); 1763 1764 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1765 if (padded) { 1766 *padded = false; 1767 } 1768 return 0; 1769 } 1770 1771 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1772 *qiov, *qiov_offset, *bytes, 1773 pad->buf + pad->buf_len - pad->tail, 1774 pad->tail); 1775 if (ret < 0) { 1776 bdrv_padding_destroy(pad); 1777 return ret; 1778 } 1779 *bytes += pad->head + pad->tail; 1780 *offset -= pad->head; 1781 *qiov = &pad->local_qiov; 1782 *qiov_offset = 0; 1783 if (padded) { 1784 *padded = true; 1785 } 1786 1787 return 0; 1788 } 1789 1790 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1791 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1792 BdrvRequestFlags flags) 1793 { 1794 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1795 } 1796 1797 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1798 int64_t offset, int64_t bytes, 1799 QEMUIOVector *qiov, size_t qiov_offset, 1800 BdrvRequestFlags flags) 1801 { 1802 BlockDriverState *bs = child->bs; 1803 BdrvTrackedRequest req; 1804 BdrvRequestPadding pad; 1805 int ret; 1806 1807 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); 1808 1809 if (!bdrv_is_inserted(bs)) { 1810 return -ENOMEDIUM; 1811 } 1812 1813 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 1814 if (ret < 0) { 1815 return ret; 1816 } 1817 1818 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1819 /* 1820 * Aligning zero request is nonsense. Even if driver has special meaning 1821 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1822 * it to driver due to request_alignment. 1823 * 1824 * Still, no reason to return an error if someone do unaligned 1825 * zero-length read occasionally. 1826 */ 1827 return 0; 1828 } 1829 1830 bdrv_inc_in_flight(bs); 1831 1832 /* Don't do copy-on-read if we read data before write operation */ 1833 if (qatomic_read(&bs->copy_on_read)) { 1834 flags |= BDRV_REQ_COPY_ON_READ; 1835 } 1836 1837 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 1838 NULL); 1839 if (ret < 0) { 1840 return ret; 1841 } 1842 1843 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1844 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1845 bs->bl.request_alignment, 1846 qiov, qiov_offset, flags); 1847 tracked_request_end(&req); 1848 bdrv_dec_in_flight(bs); 1849 1850 bdrv_padding_destroy(&pad); 1851 1852 return ret; 1853 } 1854 1855 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1856 int64_t offset, int64_t bytes, BdrvRequestFlags flags) 1857 { 1858 BlockDriver *drv = bs->drv; 1859 QEMUIOVector qiov; 1860 void *buf = NULL; 1861 int ret = 0; 1862 bool need_flush = false; 1863 int head = 0; 1864 int tail = 0; 1865 1866 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1867 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1868 bs->bl.request_alignment); 1869 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1870 1871 bdrv_check_request(offset, bytes, &error_abort); 1872 1873 if (!drv) { 1874 return -ENOMEDIUM; 1875 } 1876 1877 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1878 return -ENOTSUP; 1879 } 1880 1881 assert(alignment % bs->bl.request_alignment == 0); 1882 head = offset % alignment; 1883 tail = (offset + bytes) % alignment; 1884 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1885 assert(max_write_zeroes >= bs->bl.request_alignment); 1886 1887 while (bytes > 0 && !ret) { 1888 int64_t num = bytes; 1889 1890 /* Align request. Block drivers can expect the "bulk" of the request 1891 * to be aligned, and that unaligned requests do not cross cluster 1892 * boundaries. 1893 */ 1894 if (head) { 1895 /* Make a small request up to the first aligned sector. For 1896 * convenience, limit this request to max_transfer even if 1897 * we don't need to fall back to writes. */ 1898 num = MIN(MIN(bytes, max_transfer), alignment - head); 1899 head = (head + num) % alignment; 1900 assert(num < max_write_zeroes); 1901 } else if (tail && num > alignment) { 1902 /* Shorten the request to the last aligned sector. */ 1903 num -= tail; 1904 } 1905 1906 /* limit request size */ 1907 if (num > max_write_zeroes) { 1908 num = max_write_zeroes; 1909 } 1910 1911 ret = -ENOTSUP; 1912 /* First try the efficient write zeroes operation */ 1913 if (drv->bdrv_co_pwrite_zeroes) { 1914 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1915 flags & bs->supported_zero_flags); 1916 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1917 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1918 need_flush = true; 1919 } 1920 } else { 1921 assert(!bs->supported_zero_flags); 1922 } 1923 1924 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1925 /* Fall back to bounce buffer if write zeroes is unsupported */ 1926 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1927 1928 if ((flags & BDRV_REQ_FUA) && 1929 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1930 /* No need for bdrv_driver_pwrite() to do a fallback 1931 * flush on each chunk; use just one at the end */ 1932 write_flags &= ~BDRV_REQ_FUA; 1933 need_flush = true; 1934 } 1935 num = MIN(num, max_transfer); 1936 if (buf == NULL) { 1937 buf = qemu_try_blockalign0(bs, num); 1938 if (buf == NULL) { 1939 ret = -ENOMEM; 1940 goto fail; 1941 } 1942 } 1943 qemu_iovec_init_buf(&qiov, buf, num); 1944 1945 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1946 1947 /* Keep bounce buffer around if it is big enough for all 1948 * all future requests. 1949 */ 1950 if (num < max_transfer) { 1951 qemu_vfree(buf); 1952 buf = NULL; 1953 } 1954 } 1955 1956 offset += num; 1957 bytes -= num; 1958 } 1959 1960 fail: 1961 if (ret == 0 && need_flush) { 1962 ret = bdrv_co_flush(bs); 1963 } 1964 qemu_vfree(buf); 1965 return ret; 1966 } 1967 1968 static inline int coroutine_fn 1969 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, 1970 BdrvTrackedRequest *req, int flags) 1971 { 1972 BlockDriverState *bs = child->bs; 1973 1974 bdrv_check_request(offset, bytes, &error_abort); 1975 1976 if (bdrv_is_read_only(bs)) { 1977 return -EPERM; 1978 } 1979 1980 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1981 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1982 assert(!(flags & ~BDRV_REQ_MASK)); 1983 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 1984 1985 if (flags & BDRV_REQ_SERIALISING) { 1986 QEMU_LOCK_GUARD(&bs->reqs_lock); 1987 1988 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 1989 1990 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 1991 return -EBUSY; 1992 } 1993 1994 bdrv_wait_serialising_requests_locked(req); 1995 } else { 1996 bdrv_wait_serialising_requests(req); 1997 } 1998 1999 assert(req->overlap_offset <= offset); 2000 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 2001 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || 2002 child->perm & BLK_PERM_RESIZE); 2003 2004 switch (req->type) { 2005 case BDRV_TRACKED_WRITE: 2006 case BDRV_TRACKED_DISCARD: 2007 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 2008 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 2009 } else { 2010 assert(child->perm & BLK_PERM_WRITE); 2011 } 2012 bdrv_write_threshold_check_write(bs, offset, bytes); 2013 return 0; 2014 case BDRV_TRACKED_TRUNCATE: 2015 assert(child->perm & BLK_PERM_RESIZE); 2016 return 0; 2017 default: 2018 abort(); 2019 } 2020 } 2021 2022 static inline void coroutine_fn 2023 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, 2024 BdrvTrackedRequest *req, int ret) 2025 { 2026 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 2027 BlockDriverState *bs = child->bs; 2028 2029 bdrv_check_request(offset, bytes, &error_abort); 2030 2031 qatomic_inc(&bs->write_gen); 2032 2033 /* 2034 * Discard cannot extend the image, but in error handling cases, such as 2035 * when reverting a qcow2 cluster allocation, the discarded range can pass 2036 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 2037 * here. Instead, just skip it, since semantically a discard request 2038 * beyond EOF cannot expand the image anyway. 2039 */ 2040 if (ret == 0 && 2041 (req->type == BDRV_TRACKED_TRUNCATE || 2042 end_sector > bs->total_sectors) && 2043 req->type != BDRV_TRACKED_DISCARD) { 2044 bs->total_sectors = end_sector; 2045 bdrv_parent_cb_resize(bs); 2046 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 2047 } 2048 if (req->bytes) { 2049 switch (req->type) { 2050 case BDRV_TRACKED_WRITE: 2051 stat64_max(&bs->wr_highest_offset, offset + bytes); 2052 /* fall through, to set dirty bits */ 2053 case BDRV_TRACKED_DISCARD: 2054 bdrv_set_dirty(bs, offset, bytes); 2055 break; 2056 default: 2057 break; 2058 } 2059 } 2060 } 2061 2062 /* 2063 * Forwards an already correctly aligned write request to the BlockDriver, 2064 * after possibly fragmenting it. 2065 */ 2066 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, 2067 BdrvTrackedRequest *req, int64_t offset, int64_t bytes, 2068 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 2069 { 2070 BlockDriverState *bs = child->bs; 2071 BlockDriver *drv = bs->drv; 2072 int ret; 2073 2074 int64_t bytes_remaining = bytes; 2075 int max_transfer; 2076 2077 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 2078 2079 if (!drv) { 2080 return -ENOMEDIUM; 2081 } 2082 2083 if (bdrv_has_readonly_bitmaps(bs)) { 2084 return -EPERM; 2085 } 2086 2087 assert(is_power_of_2(align)); 2088 assert((offset & (align - 1)) == 0); 2089 assert((bytes & (align - 1)) == 0); 2090 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 2091 align); 2092 2093 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 2094 2095 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 2096 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 2097 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 2098 flags |= BDRV_REQ_ZERO_WRITE; 2099 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 2100 flags |= BDRV_REQ_MAY_UNMAP; 2101 } 2102 } 2103 2104 if (ret < 0) { 2105 /* Do nothing, write notifier decided to fail this request */ 2106 } else if (flags & BDRV_REQ_ZERO_WRITE) { 2107 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 2108 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 2109 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 2110 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 2111 qiov, qiov_offset); 2112 } else if (bytes <= max_transfer) { 2113 bdrv_debug_event(bs, BLKDBG_PWRITEV); 2114 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 2115 } else { 2116 bdrv_debug_event(bs, BLKDBG_PWRITEV); 2117 while (bytes_remaining) { 2118 int num = MIN(bytes_remaining, max_transfer); 2119 int local_flags = flags; 2120 2121 assert(num); 2122 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 2123 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 2124 /* If FUA is going to be emulated by flush, we only 2125 * need to flush on the last iteration */ 2126 local_flags &= ~BDRV_REQ_FUA; 2127 } 2128 2129 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 2130 num, qiov, 2131 qiov_offset + bytes - bytes_remaining, 2132 local_flags); 2133 if (ret < 0) { 2134 break; 2135 } 2136 bytes_remaining -= num; 2137 } 2138 } 2139 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 2140 2141 if (ret >= 0) { 2142 ret = 0; 2143 } 2144 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 2145 2146 return ret; 2147 } 2148 2149 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, 2150 int64_t offset, 2151 int64_t bytes, 2152 BdrvRequestFlags flags, 2153 BdrvTrackedRequest *req) 2154 { 2155 BlockDriverState *bs = child->bs; 2156 QEMUIOVector local_qiov; 2157 uint64_t align = bs->bl.request_alignment; 2158 int ret = 0; 2159 bool padding; 2160 BdrvRequestPadding pad; 2161 2162 padding = bdrv_init_padding(bs, offset, bytes, &pad); 2163 if (padding) { 2164 bdrv_make_request_serialising(req, align); 2165 2166 bdrv_padding_rmw_read(child, req, &pad, true); 2167 2168 if (pad.head || pad.merge_reads) { 2169 int64_t aligned_offset = offset & ~(align - 1); 2170 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2171 2172 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2173 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2174 align, &local_qiov, 0, 2175 flags & ~BDRV_REQ_ZERO_WRITE); 2176 if (ret < 0 || pad.merge_reads) { 2177 /* Error or all work is done */ 2178 goto out; 2179 } 2180 offset += write_bytes - pad.head; 2181 bytes -= write_bytes - pad.head; 2182 } 2183 } 2184 2185 assert(!bytes || (offset & (align - 1)) == 0); 2186 if (bytes >= align) { 2187 /* Write the aligned part in the middle. */ 2188 int64_t aligned_bytes = bytes & ~(align - 1); 2189 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2190 NULL, 0, flags); 2191 if (ret < 0) { 2192 goto out; 2193 } 2194 bytes -= aligned_bytes; 2195 offset += aligned_bytes; 2196 } 2197 2198 assert(!bytes || (offset & (align - 1)) == 0); 2199 if (bytes) { 2200 assert(align == pad.tail + bytes); 2201 2202 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2203 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2204 &local_qiov, 0, 2205 flags & ~BDRV_REQ_ZERO_WRITE); 2206 } 2207 2208 out: 2209 bdrv_padding_destroy(&pad); 2210 2211 return ret; 2212 } 2213 2214 /* 2215 * Handle a write request in coroutine context 2216 */ 2217 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2218 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 2219 BdrvRequestFlags flags) 2220 { 2221 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2222 } 2223 2224 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2225 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, 2226 BdrvRequestFlags flags) 2227 { 2228 BlockDriverState *bs = child->bs; 2229 BdrvTrackedRequest req; 2230 uint64_t align = bs->bl.request_alignment; 2231 BdrvRequestPadding pad; 2232 int ret; 2233 bool padded = false; 2234 2235 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); 2236 2237 if (!bdrv_is_inserted(bs)) { 2238 return -ENOMEDIUM; 2239 } 2240 2241 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 2242 if (ret < 0) { 2243 return ret; 2244 } 2245 2246 /* If the request is misaligned then we can't make it efficient */ 2247 if ((flags & BDRV_REQ_NO_FALLBACK) && 2248 !QEMU_IS_ALIGNED(offset | bytes, align)) 2249 { 2250 return -ENOTSUP; 2251 } 2252 2253 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2254 /* 2255 * Aligning zero request is nonsense. Even if driver has special meaning 2256 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2257 * it to driver due to request_alignment. 2258 * 2259 * Still, no reason to return an error if someone do unaligned 2260 * zero-length write occasionally. 2261 */ 2262 return 0; 2263 } 2264 2265 if (!(flags & BDRV_REQ_ZERO_WRITE)) { 2266 /* 2267 * Pad request for following read-modify-write cycle. 2268 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do 2269 * alignment only if there is no ZERO flag. 2270 */ 2271 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 2272 &padded); 2273 if (ret < 0) { 2274 return ret; 2275 } 2276 } 2277 2278 bdrv_inc_in_flight(bs); 2279 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2280 2281 if (flags & BDRV_REQ_ZERO_WRITE) { 2282 assert(!padded); 2283 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2284 goto out; 2285 } 2286 2287 if (padded) { 2288 /* 2289 * Request was unaligned to request_alignment and therefore 2290 * padded. We are going to do read-modify-write, and must 2291 * serialize the request to prevent interactions of the 2292 * widened region with other transactions. 2293 */ 2294 bdrv_make_request_serialising(&req, align); 2295 bdrv_padding_rmw_read(child, &req, &pad, false); 2296 } 2297 2298 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2299 qiov, qiov_offset, flags); 2300 2301 bdrv_padding_destroy(&pad); 2302 2303 out: 2304 tracked_request_end(&req); 2305 bdrv_dec_in_flight(bs); 2306 2307 return ret; 2308 } 2309 2310 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2311 int64_t bytes, BdrvRequestFlags flags) 2312 { 2313 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2314 2315 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2316 flags &= ~BDRV_REQ_MAY_UNMAP; 2317 } 2318 2319 return bdrv_co_pwritev(child, offset, bytes, NULL, 2320 BDRV_REQ_ZERO_WRITE | flags); 2321 } 2322 2323 /* 2324 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2325 */ 2326 int bdrv_flush_all(void) 2327 { 2328 BdrvNextIterator it; 2329 BlockDriverState *bs = NULL; 2330 int result = 0; 2331 2332 /* 2333 * bdrv queue is managed by record/replay, 2334 * creating new flush request for stopping 2335 * the VM may break the determinism 2336 */ 2337 if (replay_events_enabled()) { 2338 return result; 2339 } 2340 2341 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2342 AioContext *aio_context = bdrv_get_aio_context(bs); 2343 int ret; 2344 2345 aio_context_acquire(aio_context); 2346 ret = bdrv_flush(bs); 2347 if (ret < 0 && !result) { 2348 result = ret; 2349 } 2350 aio_context_release(aio_context); 2351 } 2352 2353 return result; 2354 } 2355 2356 /* 2357 * Returns the allocation status of the specified sectors. 2358 * Drivers not implementing the functionality are assumed to not support 2359 * backing files, hence all their sectors are reported as allocated. 2360 * 2361 * If 'want_zero' is true, the caller is querying for mapping 2362 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2363 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2364 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2365 * 2366 * If 'offset' is beyond the end of the disk image the return value is 2367 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2368 * 2369 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2370 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2371 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2372 * 2373 * 'pnum' is set to the number of bytes (including and immediately 2374 * following the specified offset) that are easily known to be in the 2375 * same allocated/unallocated state. Note that a second call starting 2376 * at the original offset plus returned pnum may have the same status. 2377 * The returned value is non-zero on success except at end-of-file. 2378 * 2379 * Returns negative errno on failure. Otherwise, if the 2380 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2381 * set to the host mapping and BDS corresponding to the guest offset. 2382 */ 2383 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, 2384 bool want_zero, 2385 int64_t offset, int64_t bytes, 2386 int64_t *pnum, int64_t *map, 2387 BlockDriverState **file) 2388 { 2389 int64_t total_size; 2390 int64_t n; /* bytes */ 2391 int ret; 2392 int64_t local_map = 0; 2393 BlockDriverState *local_file = NULL; 2394 int64_t aligned_offset, aligned_bytes; 2395 uint32_t align; 2396 bool has_filtered_child; 2397 2398 assert(pnum); 2399 *pnum = 0; 2400 total_size = bdrv_getlength(bs); 2401 if (total_size < 0) { 2402 ret = total_size; 2403 goto early_out; 2404 } 2405 2406 if (offset >= total_size) { 2407 ret = BDRV_BLOCK_EOF; 2408 goto early_out; 2409 } 2410 if (!bytes) { 2411 ret = 0; 2412 goto early_out; 2413 } 2414 2415 n = total_size - offset; 2416 if (n < bytes) { 2417 bytes = n; 2418 } 2419 2420 /* Must be non-NULL or bdrv_getlength() would have failed */ 2421 assert(bs->drv); 2422 has_filtered_child = bdrv_filter_child(bs); 2423 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2424 *pnum = bytes; 2425 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2426 if (offset + bytes == total_size) { 2427 ret |= BDRV_BLOCK_EOF; 2428 } 2429 if (bs->drv->protocol_name) { 2430 ret |= BDRV_BLOCK_OFFSET_VALID; 2431 local_map = offset; 2432 local_file = bs; 2433 } 2434 goto early_out; 2435 } 2436 2437 bdrv_inc_in_flight(bs); 2438 2439 /* Round out to request_alignment boundaries */ 2440 align = bs->bl.request_alignment; 2441 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2442 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2443 2444 if (bs->drv->bdrv_co_block_status) { 2445 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2446 aligned_bytes, pnum, &local_map, 2447 &local_file); 2448 } else { 2449 /* Default code for filters */ 2450 2451 local_file = bdrv_filter_bs(bs); 2452 assert(local_file); 2453 2454 *pnum = aligned_bytes; 2455 local_map = aligned_offset; 2456 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2457 } 2458 if (ret < 0) { 2459 *pnum = 0; 2460 goto out; 2461 } 2462 2463 /* 2464 * The driver's result must be a non-zero multiple of request_alignment. 2465 * Clamp pnum and adjust map to original request. 2466 */ 2467 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2468 align > offset - aligned_offset); 2469 if (ret & BDRV_BLOCK_RECURSE) { 2470 assert(ret & BDRV_BLOCK_DATA); 2471 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2472 assert(!(ret & BDRV_BLOCK_ZERO)); 2473 } 2474 2475 *pnum -= offset - aligned_offset; 2476 if (*pnum > bytes) { 2477 *pnum = bytes; 2478 } 2479 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2480 local_map += offset - aligned_offset; 2481 } 2482 2483 if (ret & BDRV_BLOCK_RAW) { 2484 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2485 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2486 *pnum, pnum, &local_map, &local_file); 2487 goto out; 2488 } 2489 2490 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2491 ret |= BDRV_BLOCK_ALLOCATED; 2492 } else if (bs->drv->supports_backing) { 2493 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2494 2495 if (!cow_bs) { 2496 ret |= BDRV_BLOCK_ZERO; 2497 } else if (want_zero) { 2498 int64_t size2 = bdrv_getlength(cow_bs); 2499 2500 if (size2 >= 0 && offset >= size2) { 2501 ret |= BDRV_BLOCK_ZERO; 2502 } 2503 } 2504 } 2505 2506 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2507 local_file && local_file != bs && 2508 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2509 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2510 int64_t file_pnum; 2511 int ret2; 2512 2513 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2514 *pnum, &file_pnum, NULL, NULL); 2515 if (ret2 >= 0) { 2516 /* Ignore errors. This is just providing extra information, it 2517 * is useful but not necessary. 2518 */ 2519 if (ret2 & BDRV_BLOCK_EOF && 2520 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2521 /* 2522 * It is valid for the format block driver to read 2523 * beyond the end of the underlying file's current 2524 * size; such areas read as zero. 2525 */ 2526 ret |= BDRV_BLOCK_ZERO; 2527 } else { 2528 /* Limit request to the range reported by the protocol driver */ 2529 *pnum = file_pnum; 2530 ret |= (ret2 & BDRV_BLOCK_ZERO); 2531 } 2532 } 2533 } 2534 2535 out: 2536 bdrv_dec_in_flight(bs); 2537 if (ret >= 0 && offset + *pnum == total_size) { 2538 ret |= BDRV_BLOCK_EOF; 2539 } 2540 early_out: 2541 if (file) { 2542 *file = local_file; 2543 } 2544 if (map) { 2545 *map = local_map; 2546 } 2547 return ret; 2548 } 2549 2550 int coroutine_fn 2551 bdrv_co_common_block_status_above(BlockDriverState *bs, 2552 BlockDriverState *base, 2553 bool include_base, 2554 bool want_zero, 2555 int64_t offset, 2556 int64_t bytes, 2557 int64_t *pnum, 2558 int64_t *map, 2559 BlockDriverState **file, 2560 int *depth) 2561 { 2562 int ret; 2563 BlockDriverState *p; 2564 int64_t eof = 0; 2565 int dummy; 2566 2567 assert(!include_base || base); /* Can't include NULL base */ 2568 2569 if (!depth) { 2570 depth = &dummy; 2571 } 2572 *depth = 0; 2573 2574 if (!include_base && bs == base) { 2575 *pnum = bytes; 2576 return 0; 2577 } 2578 2579 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); 2580 ++*depth; 2581 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2582 return ret; 2583 } 2584 2585 if (ret & BDRV_BLOCK_EOF) { 2586 eof = offset + *pnum; 2587 } 2588 2589 assert(*pnum <= bytes); 2590 bytes = *pnum; 2591 2592 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2593 p = bdrv_filter_or_cow_bs(p)) 2594 { 2595 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2596 file); 2597 ++*depth; 2598 if (ret < 0) { 2599 return ret; 2600 } 2601 if (*pnum == 0) { 2602 /* 2603 * The top layer deferred to this layer, and because this layer is 2604 * short, any zeroes that we synthesize beyond EOF behave as if they 2605 * were allocated at this layer. 2606 * 2607 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2608 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2609 * below. 2610 */ 2611 assert(ret & BDRV_BLOCK_EOF); 2612 *pnum = bytes; 2613 if (file) { 2614 *file = p; 2615 } 2616 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2617 break; 2618 } 2619 if (ret & BDRV_BLOCK_ALLOCATED) { 2620 /* 2621 * We've found the node and the status, we must break. 2622 * 2623 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2624 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2625 * below. 2626 */ 2627 ret &= ~BDRV_BLOCK_EOF; 2628 break; 2629 } 2630 2631 if (p == base) { 2632 assert(include_base); 2633 break; 2634 } 2635 2636 /* 2637 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2638 * let's continue the diving. 2639 */ 2640 assert(*pnum <= bytes); 2641 bytes = *pnum; 2642 } 2643 2644 if (offset + *pnum == eof) { 2645 ret |= BDRV_BLOCK_EOF; 2646 } 2647 2648 return ret; 2649 } 2650 2651 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2652 int64_t offset, int64_t bytes, int64_t *pnum, 2653 int64_t *map, BlockDriverState **file) 2654 { 2655 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, 2656 pnum, map, file, NULL); 2657 } 2658 2659 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2660 int64_t *pnum, int64_t *map, BlockDriverState **file) 2661 { 2662 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2663 offset, bytes, pnum, map, file); 2664 } 2665 2666 /* 2667 * Check @bs (and its backing chain) to see if the range defined 2668 * by @offset and @bytes is known to read as zeroes. 2669 * Return 1 if that is the case, 0 otherwise and -errno on error. 2670 * This test is meant to be fast rather than accurate so returning 0 2671 * does not guarantee non-zero data. 2672 */ 2673 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2674 int64_t bytes) 2675 { 2676 int ret; 2677 int64_t pnum = bytes; 2678 2679 if (!bytes) { 2680 return 1; 2681 } 2682 2683 ret = bdrv_common_block_status_above(bs, NULL, false, false, offset, 2684 bytes, &pnum, NULL, NULL, NULL); 2685 2686 if (ret < 0) { 2687 return ret; 2688 } 2689 2690 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2691 } 2692 2693 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, 2694 int64_t bytes, int64_t *pnum) 2695 { 2696 int ret; 2697 int64_t dummy; 2698 2699 ret = bdrv_common_block_status_above(bs, bs, true, false, offset, 2700 bytes, pnum ? pnum : &dummy, NULL, 2701 NULL, NULL); 2702 if (ret < 0) { 2703 return ret; 2704 } 2705 return !!(ret & BDRV_BLOCK_ALLOCATED); 2706 } 2707 2708 /* 2709 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2710 * 2711 * Return a positive depth if (a prefix of) the given range is allocated 2712 * in any image between BASE and TOP (BASE is only included if include_base 2713 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2714 * BASE can be NULL to check if the given offset is allocated in any 2715 * image of the chain. Return 0 otherwise, or negative errno on 2716 * failure. 2717 * 2718 * 'pnum' is set to the number of bytes (including and immediately 2719 * following the specified offset) that are known to be in the same 2720 * allocated/unallocated state. Note that a subsequent call starting 2721 * at 'offset + *pnum' may return the same allocation status (in other 2722 * words, the result is not necessarily the maximum possible range); 2723 * but 'pnum' will only be 0 when end of file is reached. 2724 */ 2725 int bdrv_is_allocated_above(BlockDriverState *top, 2726 BlockDriverState *base, 2727 bool include_base, int64_t offset, 2728 int64_t bytes, int64_t *pnum) 2729 { 2730 int depth; 2731 int ret = bdrv_common_block_status_above(top, base, include_base, false, 2732 offset, bytes, pnum, NULL, NULL, 2733 &depth); 2734 if (ret < 0) { 2735 return ret; 2736 } 2737 2738 if (ret & BDRV_BLOCK_ALLOCATED) { 2739 return depth; 2740 } 2741 return 0; 2742 } 2743 2744 int coroutine_fn 2745 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2746 { 2747 BlockDriver *drv = bs->drv; 2748 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2749 int ret = -ENOTSUP; 2750 2751 if (!drv) { 2752 return -ENOMEDIUM; 2753 } 2754 2755 bdrv_inc_in_flight(bs); 2756 2757 if (drv->bdrv_load_vmstate) { 2758 ret = drv->bdrv_load_vmstate(bs, qiov, pos); 2759 } else if (child_bs) { 2760 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2761 } 2762 2763 bdrv_dec_in_flight(bs); 2764 2765 return ret; 2766 } 2767 2768 int coroutine_fn 2769 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2770 { 2771 BlockDriver *drv = bs->drv; 2772 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2773 int ret = -ENOTSUP; 2774 2775 if (!drv) { 2776 return -ENOMEDIUM; 2777 } 2778 2779 bdrv_inc_in_flight(bs); 2780 2781 if (drv->bdrv_save_vmstate) { 2782 ret = drv->bdrv_save_vmstate(bs, qiov, pos); 2783 } else if (child_bs) { 2784 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2785 } 2786 2787 bdrv_dec_in_flight(bs); 2788 2789 return ret; 2790 } 2791 2792 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2793 int64_t pos, int size) 2794 { 2795 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2796 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2797 2798 return ret < 0 ? ret : size; 2799 } 2800 2801 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2802 int64_t pos, int size) 2803 { 2804 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2805 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2806 2807 return ret < 0 ? ret : size; 2808 } 2809 2810 /**************************************************************/ 2811 /* async I/Os */ 2812 2813 void bdrv_aio_cancel(BlockAIOCB *acb) 2814 { 2815 qemu_aio_ref(acb); 2816 bdrv_aio_cancel_async(acb); 2817 while (acb->refcnt > 1) { 2818 if (acb->aiocb_info->get_aio_context) { 2819 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2820 } else if (acb->bs) { 2821 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2822 * assert that we're not using an I/O thread. Thread-safe 2823 * code should use bdrv_aio_cancel_async exclusively. 2824 */ 2825 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2826 aio_poll(bdrv_get_aio_context(acb->bs), true); 2827 } else { 2828 abort(); 2829 } 2830 } 2831 qemu_aio_unref(acb); 2832 } 2833 2834 /* Async version of aio cancel. The caller is not blocked if the acb implements 2835 * cancel_async, otherwise we do nothing and let the request normally complete. 2836 * In either case the completion callback must be called. */ 2837 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2838 { 2839 if (acb->aiocb_info->cancel_async) { 2840 acb->aiocb_info->cancel_async(acb); 2841 } 2842 } 2843 2844 /**************************************************************/ 2845 /* Coroutine block device emulation */ 2846 2847 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2848 { 2849 BdrvChild *primary_child = bdrv_primary_child(bs); 2850 BdrvChild *child; 2851 int current_gen; 2852 int ret = 0; 2853 2854 bdrv_inc_in_flight(bs); 2855 2856 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 2857 bdrv_is_sg(bs)) { 2858 goto early_exit; 2859 } 2860 2861 qemu_co_mutex_lock(&bs->reqs_lock); 2862 current_gen = qatomic_read(&bs->write_gen); 2863 2864 /* Wait until any previous flushes are completed */ 2865 while (bs->active_flush_req) { 2866 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2867 } 2868 2869 /* Flushes reach this point in nondecreasing current_gen order. */ 2870 bs->active_flush_req = true; 2871 qemu_co_mutex_unlock(&bs->reqs_lock); 2872 2873 /* Write back all layers by calling one driver function */ 2874 if (bs->drv->bdrv_co_flush) { 2875 ret = bs->drv->bdrv_co_flush(bs); 2876 goto out; 2877 } 2878 2879 /* Write back cached data to the OS even with cache=unsafe */ 2880 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2881 if (bs->drv->bdrv_co_flush_to_os) { 2882 ret = bs->drv->bdrv_co_flush_to_os(bs); 2883 if (ret < 0) { 2884 goto out; 2885 } 2886 } 2887 2888 /* But don't actually force it to the disk with cache=unsafe */ 2889 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2890 goto flush_children; 2891 } 2892 2893 /* Check if we really need to flush anything */ 2894 if (bs->flushed_gen == current_gen) { 2895 goto flush_children; 2896 } 2897 2898 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 2899 if (!bs->drv) { 2900 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2901 * (even in case of apparent success) */ 2902 ret = -ENOMEDIUM; 2903 goto out; 2904 } 2905 if (bs->drv->bdrv_co_flush_to_disk) { 2906 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2907 } else if (bs->drv->bdrv_aio_flush) { 2908 BlockAIOCB *acb; 2909 CoroutineIOCompletion co = { 2910 .coroutine = qemu_coroutine_self(), 2911 }; 2912 2913 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2914 if (acb == NULL) { 2915 ret = -EIO; 2916 } else { 2917 qemu_coroutine_yield(); 2918 ret = co.ret; 2919 } 2920 } else { 2921 /* 2922 * Some block drivers always operate in either writethrough or unsafe 2923 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2924 * know how the server works (because the behaviour is hardcoded or 2925 * depends on server-side configuration), so we can't ensure that 2926 * everything is safe on disk. Returning an error doesn't work because 2927 * that would break guests even if the server operates in writethrough 2928 * mode. 2929 * 2930 * Let's hope the user knows what he's doing. 2931 */ 2932 ret = 0; 2933 } 2934 2935 if (ret < 0) { 2936 goto out; 2937 } 2938 2939 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2940 * in the case of cache=unsafe, so there are no useless flushes. 2941 */ 2942 flush_children: 2943 ret = 0; 2944 QLIST_FOREACH(child, &bs->children, next) { 2945 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 2946 int this_child_ret = bdrv_co_flush(child->bs); 2947 if (!ret) { 2948 ret = this_child_ret; 2949 } 2950 } 2951 } 2952 2953 out: 2954 /* Notify any pending flushes that we have completed */ 2955 if (ret == 0) { 2956 bs->flushed_gen = current_gen; 2957 } 2958 2959 qemu_co_mutex_lock(&bs->reqs_lock); 2960 bs->active_flush_req = false; 2961 /* Return value is ignored - it's ok if wait queue is empty */ 2962 qemu_co_queue_next(&bs->flush_queue); 2963 qemu_co_mutex_unlock(&bs->reqs_lock); 2964 2965 early_exit: 2966 bdrv_dec_in_flight(bs); 2967 return ret; 2968 } 2969 2970 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2971 int64_t bytes) 2972 { 2973 BdrvTrackedRequest req; 2974 int max_pdiscard, ret; 2975 int head, tail, align; 2976 BlockDriverState *bs = child->bs; 2977 2978 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { 2979 return -ENOMEDIUM; 2980 } 2981 2982 if (bdrv_has_readonly_bitmaps(bs)) { 2983 return -EPERM; 2984 } 2985 2986 ret = bdrv_check_request(offset, bytes, NULL); 2987 if (ret < 0) { 2988 return ret; 2989 } 2990 2991 /* Do nothing if disabled. */ 2992 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2993 return 0; 2994 } 2995 2996 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2997 return 0; 2998 } 2999 3000 /* Discard is advisory, but some devices track and coalesce 3001 * unaligned requests, so we must pass everything down rather than 3002 * round here. Still, most devices will just silently ignore 3003 * unaligned requests (by returning -ENOTSUP), so we must fragment 3004 * the request accordingly. */ 3005 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 3006 assert(align % bs->bl.request_alignment == 0); 3007 head = offset % align; 3008 tail = (offset + bytes) % align; 3009 3010 bdrv_inc_in_flight(bs); 3011 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 3012 3013 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 3014 if (ret < 0) { 3015 goto out; 3016 } 3017 3018 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), 3019 align); 3020 assert(max_pdiscard >= bs->bl.request_alignment); 3021 3022 while (bytes > 0) { 3023 int64_t num = bytes; 3024 3025 if (head) { 3026 /* Make small requests to get to alignment boundaries. */ 3027 num = MIN(bytes, align - head); 3028 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 3029 num %= bs->bl.request_alignment; 3030 } 3031 head = (head + num) % align; 3032 assert(num < max_pdiscard); 3033 } else if (tail) { 3034 if (num > align) { 3035 /* Shorten the request to the last aligned cluster. */ 3036 num -= tail; 3037 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 3038 tail > bs->bl.request_alignment) { 3039 tail %= bs->bl.request_alignment; 3040 num -= tail; 3041 } 3042 } 3043 /* limit request size */ 3044 if (num > max_pdiscard) { 3045 num = max_pdiscard; 3046 } 3047 3048 if (!bs->drv) { 3049 ret = -ENOMEDIUM; 3050 goto out; 3051 } 3052 if (bs->drv->bdrv_co_pdiscard) { 3053 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 3054 } else { 3055 BlockAIOCB *acb; 3056 CoroutineIOCompletion co = { 3057 .coroutine = qemu_coroutine_self(), 3058 }; 3059 3060 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 3061 bdrv_co_io_em_complete, &co); 3062 if (acb == NULL) { 3063 ret = -EIO; 3064 goto out; 3065 } else { 3066 qemu_coroutine_yield(); 3067 ret = co.ret; 3068 } 3069 } 3070 if (ret && ret != -ENOTSUP) { 3071 goto out; 3072 } 3073 3074 offset += num; 3075 bytes -= num; 3076 } 3077 ret = 0; 3078 out: 3079 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 3080 tracked_request_end(&req); 3081 bdrv_dec_in_flight(bs); 3082 return ret; 3083 } 3084 3085 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3086 { 3087 BlockDriver *drv = bs->drv; 3088 CoroutineIOCompletion co = { 3089 .coroutine = qemu_coroutine_self(), 3090 }; 3091 BlockAIOCB *acb; 3092 3093 bdrv_inc_in_flight(bs); 3094 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3095 co.ret = -ENOTSUP; 3096 goto out; 3097 } 3098 3099 if (drv->bdrv_co_ioctl) { 3100 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3101 } else { 3102 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3103 if (!acb) { 3104 co.ret = -ENOTSUP; 3105 goto out; 3106 } 3107 qemu_coroutine_yield(); 3108 } 3109 out: 3110 bdrv_dec_in_flight(bs); 3111 return co.ret; 3112 } 3113 3114 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3115 { 3116 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3117 } 3118 3119 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3120 { 3121 return memset(qemu_blockalign(bs, size), 0, size); 3122 } 3123 3124 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3125 { 3126 size_t align = bdrv_opt_mem_align(bs); 3127 3128 /* Ensure that NULL is never returned on success */ 3129 assert(align > 0); 3130 if (size == 0) { 3131 size = align; 3132 } 3133 3134 return qemu_try_memalign(align, size); 3135 } 3136 3137 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3138 { 3139 void *mem = qemu_try_blockalign(bs, size); 3140 3141 if (mem) { 3142 memset(mem, 0, size); 3143 } 3144 3145 return mem; 3146 } 3147 3148 /* 3149 * Check if all memory in this vector is sector aligned. 3150 */ 3151 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 3152 { 3153 int i; 3154 size_t alignment = bdrv_min_mem_align(bs); 3155 3156 for (i = 0; i < qiov->niov; i++) { 3157 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 3158 return false; 3159 } 3160 if (qiov->iov[i].iov_len % alignment) { 3161 return false; 3162 } 3163 } 3164 3165 return true; 3166 } 3167 3168 void bdrv_io_plug(BlockDriverState *bs) 3169 { 3170 BdrvChild *child; 3171 3172 QLIST_FOREACH(child, &bs->children, next) { 3173 bdrv_io_plug(child->bs); 3174 } 3175 3176 if (qatomic_fetch_inc(&bs->io_plugged) == 0) { 3177 BlockDriver *drv = bs->drv; 3178 if (drv && drv->bdrv_io_plug) { 3179 drv->bdrv_io_plug(bs); 3180 } 3181 } 3182 } 3183 3184 void bdrv_io_unplug(BlockDriverState *bs) 3185 { 3186 BdrvChild *child; 3187 3188 assert(bs->io_plugged); 3189 if (qatomic_fetch_dec(&bs->io_plugged) == 1) { 3190 BlockDriver *drv = bs->drv; 3191 if (drv && drv->bdrv_io_unplug) { 3192 drv->bdrv_io_unplug(bs); 3193 } 3194 } 3195 3196 QLIST_FOREACH(child, &bs->children, next) { 3197 bdrv_io_unplug(child->bs); 3198 } 3199 } 3200 3201 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) 3202 { 3203 BdrvChild *child; 3204 3205 if (bs->drv && bs->drv->bdrv_register_buf) { 3206 bs->drv->bdrv_register_buf(bs, host, size); 3207 } 3208 QLIST_FOREACH(child, &bs->children, next) { 3209 bdrv_register_buf(child->bs, host, size); 3210 } 3211 } 3212 3213 void bdrv_unregister_buf(BlockDriverState *bs, void *host) 3214 { 3215 BdrvChild *child; 3216 3217 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3218 bs->drv->bdrv_unregister_buf(bs, host); 3219 } 3220 QLIST_FOREACH(child, &bs->children, next) { 3221 bdrv_unregister_buf(child->bs, host); 3222 } 3223 } 3224 3225 static int coroutine_fn bdrv_co_copy_range_internal( 3226 BdrvChild *src, int64_t src_offset, BdrvChild *dst, 3227 int64_t dst_offset, int64_t bytes, 3228 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3229 bool recurse_src) 3230 { 3231 BdrvTrackedRequest req; 3232 int ret; 3233 3234 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3235 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3236 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3237 3238 if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) { 3239 return -ENOMEDIUM; 3240 } 3241 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0); 3242 if (ret) { 3243 return ret; 3244 } 3245 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3246 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3247 } 3248 3249 if (!src || !src->bs || !bdrv_is_inserted(src->bs)) { 3250 return -ENOMEDIUM; 3251 } 3252 ret = bdrv_check_request32(src_offset, bytes, NULL, 0); 3253 if (ret) { 3254 return ret; 3255 } 3256 3257 if (!src->bs->drv->bdrv_co_copy_range_from 3258 || !dst->bs->drv->bdrv_co_copy_range_to 3259 || src->bs->encrypted || dst->bs->encrypted) { 3260 return -ENOTSUP; 3261 } 3262 3263 if (recurse_src) { 3264 bdrv_inc_in_flight(src->bs); 3265 tracked_request_begin(&req, src->bs, src_offset, bytes, 3266 BDRV_TRACKED_READ); 3267 3268 /* BDRV_REQ_SERIALISING is only for write operation */ 3269 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3270 bdrv_wait_serialising_requests(&req); 3271 3272 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3273 src, src_offset, 3274 dst, dst_offset, 3275 bytes, 3276 read_flags, write_flags); 3277 3278 tracked_request_end(&req); 3279 bdrv_dec_in_flight(src->bs); 3280 } else { 3281 bdrv_inc_in_flight(dst->bs); 3282 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3283 BDRV_TRACKED_WRITE); 3284 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3285 write_flags); 3286 if (!ret) { 3287 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3288 src, src_offset, 3289 dst, dst_offset, 3290 bytes, 3291 read_flags, write_flags); 3292 } 3293 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3294 tracked_request_end(&req); 3295 bdrv_dec_in_flight(dst->bs); 3296 } 3297 3298 return ret; 3299 } 3300 3301 /* Copy range from @src to @dst. 3302 * 3303 * See the comment of bdrv_co_copy_range for the parameter and return value 3304 * semantics. */ 3305 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, 3306 BdrvChild *dst, int64_t dst_offset, 3307 int64_t bytes, 3308 BdrvRequestFlags read_flags, 3309 BdrvRequestFlags write_flags) 3310 { 3311 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3312 read_flags, write_flags); 3313 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3314 bytes, read_flags, write_flags, true); 3315 } 3316 3317 /* Copy range from @src to @dst. 3318 * 3319 * See the comment of bdrv_co_copy_range for the parameter and return value 3320 * semantics. */ 3321 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, 3322 BdrvChild *dst, int64_t dst_offset, 3323 int64_t bytes, 3324 BdrvRequestFlags read_flags, 3325 BdrvRequestFlags write_flags) 3326 { 3327 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3328 read_flags, write_flags); 3329 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3330 bytes, read_flags, write_flags, false); 3331 } 3332 3333 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, 3334 BdrvChild *dst, int64_t dst_offset, 3335 int64_t bytes, BdrvRequestFlags read_flags, 3336 BdrvRequestFlags write_flags) 3337 { 3338 return bdrv_co_copy_range_from(src, src_offset, 3339 dst, dst_offset, 3340 bytes, read_flags, write_flags); 3341 } 3342 3343 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3344 { 3345 BdrvChild *c; 3346 QLIST_FOREACH(c, &bs->parents, next_parent) { 3347 if (c->klass->resize) { 3348 c->klass->resize(c); 3349 } 3350 } 3351 } 3352 3353 /** 3354 * Truncate file to 'offset' bytes (needed only for file protocols) 3355 * 3356 * If 'exact' is true, the file must be resized to exactly the given 3357 * 'offset'. Otherwise, it is sufficient for the node to be at least 3358 * 'offset' bytes in length. 3359 */ 3360 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3361 PreallocMode prealloc, BdrvRequestFlags flags, 3362 Error **errp) 3363 { 3364 BlockDriverState *bs = child->bs; 3365 BdrvChild *filtered, *backing; 3366 BlockDriver *drv = bs->drv; 3367 BdrvTrackedRequest req; 3368 int64_t old_size, new_bytes; 3369 int ret; 3370 3371 3372 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3373 if (!drv) { 3374 error_setg(errp, "No medium inserted"); 3375 return -ENOMEDIUM; 3376 } 3377 if (offset < 0) { 3378 error_setg(errp, "Image size cannot be negative"); 3379 return -EINVAL; 3380 } 3381 3382 ret = bdrv_check_request(offset, 0, errp); 3383 if (ret < 0) { 3384 return ret; 3385 } 3386 3387 old_size = bdrv_getlength(bs); 3388 if (old_size < 0) { 3389 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3390 return old_size; 3391 } 3392 3393 if (offset > old_size) { 3394 new_bytes = offset - old_size; 3395 } else { 3396 new_bytes = 0; 3397 } 3398 3399 bdrv_inc_in_flight(bs); 3400 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3401 BDRV_TRACKED_TRUNCATE); 3402 3403 /* If we are growing the image and potentially using preallocation for the 3404 * new area, we need to make sure that no write requests are made to it 3405 * concurrently or they might be overwritten by preallocation. */ 3406 if (new_bytes) { 3407 bdrv_make_request_serialising(&req, 1); 3408 } 3409 if (bdrv_is_read_only(bs)) { 3410 error_setg(errp, "Image is read-only"); 3411 ret = -EACCES; 3412 goto out; 3413 } 3414 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3415 0); 3416 if (ret < 0) { 3417 error_setg_errno(errp, -ret, 3418 "Failed to prepare request for truncation"); 3419 goto out; 3420 } 3421 3422 filtered = bdrv_filter_child(bs); 3423 backing = bdrv_cow_child(bs); 3424 3425 /* 3426 * If the image has a backing file that is large enough that it would 3427 * provide data for the new area, we cannot leave it unallocated because 3428 * then the backing file content would become visible. Instead, zero-fill 3429 * the new area. 3430 * 3431 * Note that if the image has a backing file, but was opened without the 3432 * backing file, taking care of keeping things consistent with that backing 3433 * file is the user's responsibility. 3434 */ 3435 if (new_bytes && backing) { 3436 int64_t backing_len; 3437 3438 backing_len = bdrv_getlength(backing->bs); 3439 if (backing_len < 0) { 3440 ret = backing_len; 3441 error_setg_errno(errp, -ret, "Could not get backing file size"); 3442 goto out; 3443 } 3444 3445 if (backing_len > old_size) { 3446 flags |= BDRV_REQ_ZERO_WRITE; 3447 } 3448 } 3449 3450 if (drv->bdrv_co_truncate) { 3451 if (flags & ~bs->supported_truncate_flags) { 3452 error_setg(errp, "Block driver does not support requested flags"); 3453 ret = -ENOTSUP; 3454 goto out; 3455 } 3456 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3457 } else if (filtered) { 3458 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3459 } else { 3460 error_setg(errp, "Image format driver does not support resize"); 3461 ret = -ENOTSUP; 3462 goto out; 3463 } 3464 if (ret < 0) { 3465 goto out; 3466 } 3467 3468 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3469 if (ret < 0) { 3470 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3471 } else { 3472 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3473 } 3474 /* It's possible that truncation succeeded but refresh_total_sectors 3475 * failed, but the latter doesn't affect how we should finish the request. 3476 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */ 3477 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3478 3479 out: 3480 tracked_request_end(&req); 3481 bdrv_dec_in_flight(bs); 3482 3483 return ret; 3484 } 3485 3486 void bdrv_cancel_in_flight(BlockDriverState *bs) 3487 { 3488 if (!bs || !bs->drv) { 3489 return; 3490 } 3491 3492 if (bs->drv->bdrv_cancel_in_flight) { 3493 bs->drv->bdrv_cancel_in_flight(bs); 3494 } 3495 } 3496