1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "qemu/cutils.h" 34 #include "qapi/error.h" 35 #include "qemu/error-report.h" 36 #include "qemu/main-loop.h" 37 #include "sysemu/replay.h" 38 39 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 40 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 41 42 static void bdrv_parent_cb_resize(BlockDriverState *bs); 43 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 44 int64_t offset, int bytes, BdrvRequestFlags flags); 45 46 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 47 bool ignore_bds_parents) 48 { 49 BdrvChild *c, *next; 50 51 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 52 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 53 continue; 54 } 55 bdrv_parent_drained_begin_single(c, false); 56 } 57 } 58 59 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, 60 int *drained_end_counter) 61 { 62 assert(c->parent_quiesce_counter > 0); 63 c->parent_quiesce_counter--; 64 if (c->klass->drained_end) { 65 c->klass->drained_end(c, drained_end_counter); 66 } 67 } 68 69 void bdrv_parent_drained_end_single(BdrvChild *c) 70 { 71 int drained_end_counter = 0; 72 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); 73 BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); 74 } 75 76 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 77 bool ignore_bds_parents, 78 int *drained_end_counter) 79 { 80 BdrvChild *c; 81 82 QLIST_FOREACH(c, &bs->parents, next_parent) { 83 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 84 continue; 85 } 86 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter); 87 } 88 } 89 90 static bool bdrv_parent_drained_poll_single(BdrvChild *c) 91 { 92 if (c->klass->drained_poll) { 93 return c->klass->drained_poll(c); 94 } 95 return false; 96 } 97 98 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 99 bool ignore_bds_parents) 100 { 101 BdrvChild *c, *next; 102 bool busy = false; 103 104 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 105 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 106 continue; 107 } 108 busy |= bdrv_parent_drained_poll_single(c); 109 } 110 111 return busy; 112 } 113 114 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) 115 { 116 c->parent_quiesce_counter++; 117 if (c->klass->drained_begin) { 118 c->klass->drained_begin(c); 119 } 120 if (poll) { 121 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); 122 } 123 } 124 125 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 126 { 127 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 128 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 130 src->opt_mem_alignment); 131 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 132 src->min_mem_alignment); 133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 134 } 135 136 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 137 { 138 ERRP_GUARD(); 139 BlockDriver *drv = bs->drv; 140 BdrvChild *c; 141 bool have_limits; 142 143 memset(&bs->bl, 0, sizeof(bs->bl)); 144 145 if (!drv) { 146 return; 147 } 148 149 /* Default alignment based on whether driver has byte interface */ 150 bs->bl.request_alignment = (drv->bdrv_co_preadv || 151 drv->bdrv_aio_preadv || 152 drv->bdrv_co_preadv_part) ? 1 : 512; 153 154 /* Take some limits from the children as a default */ 155 have_limits = false; 156 QLIST_FOREACH(c, &bs->children, next) { 157 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 158 { 159 bdrv_refresh_limits(c->bs, errp); 160 if (*errp) { 161 return; 162 } 163 bdrv_merge_limits(&bs->bl, &c->bs->bl); 164 have_limits = true; 165 } 166 } 167 168 if (!have_limits) { 169 bs->bl.min_mem_alignment = 512; 170 bs->bl.opt_mem_alignment = qemu_real_host_page_size; 171 172 /* Safe default since most protocols use readv()/writev()/etc */ 173 bs->bl.max_iov = IOV_MAX; 174 } 175 176 /* Then let the driver override it */ 177 if (drv->bdrv_refresh_limits) { 178 drv->bdrv_refresh_limits(bs, errp); 179 if (*errp) { 180 return; 181 } 182 } 183 184 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 185 error_setg(errp, "Driver requires too large request alignment"); 186 } 187 } 188 189 /** 190 * The copy-on-read flag is actually a reference count so multiple users may 191 * use the feature without worrying about clobbering its previous state. 192 * Copy-on-read stays enabled until all users have called to disable it. 193 */ 194 void bdrv_enable_copy_on_read(BlockDriverState *bs) 195 { 196 qatomic_inc(&bs->copy_on_read); 197 } 198 199 void bdrv_disable_copy_on_read(BlockDriverState *bs) 200 { 201 int old = qatomic_fetch_dec(&bs->copy_on_read); 202 assert(old >= 1); 203 } 204 205 typedef struct { 206 Coroutine *co; 207 BlockDriverState *bs; 208 bool done; 209 bool begin; 210 bool recursive; 211 bool poll; 212 BdrvChild *parent; 213 bool ignore_bds_parents; 214 int *drained_end_counter; 215 } BdrvCoDrainData; 216 217 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) 218 { 219 BdrvCoDrainData *data = opaque; 220 BlockDriverState *bs = data->bs; 221 222 if (data->begin) { 223 bs->drv->bdrv_co_drain_begin(bs); 224 } else { 225 bs->drv->bdrv_co_drain_end(bs); 226 } 227 228 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ 229 qatomic_mb_set(&data->done, true); 230 if (!data->begin) { 231 qatomic_dec(data->drained_end_counter); 232 } 233 bdrv_dec_in_flight(bs); 234 235 g_free(data); 236 } 237 238 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ 239 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, 240 int *drained_end_counter) 241 { 242 BdrvCoDrainData *data; 243 244 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || 245 (!begin && !bs->drv->bdrv_co_drain_end)) { 246 return; 247 } 248 249 data = g_new(BdrvCoDrainData, 1); 250 *data = (BdrvCoDrainData) { 251 .bs = bs, 252 .done = false, 253 .begin = begin, 254 .drained_end_counter = drained_end_counter, 255 }; 256 257 if (!begin) { 258 qatomic_inc(drained_end_counter); 259 } 260 261 /* Make sure the driver callback completes during the polling phase for 262 * drain_begin. */ 263 bdrv_inc_in_flight(bs); 264 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); 265 aio_co_schedule(bdrv_get_aio_context(bs), data->co); 266 } 267 268 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 269 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 270 BdrvChild *ignore_parent, bool ignore_bds_parents) 271 { 272 BdrvChild *child, *next; 273 274 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 275 return true; 276 } 277 278 if (qatomic_read(&bs->in_flight)) { 279 return true; 280 } 281 282 if (recursive) { 283 assert(!ignore_bds_parents); 284 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 285 if (bdrv_drain_poll(child->bs, recursive, child, false)) { 286 return true; 287 } 288 } 289 } 290 291 return false; 292 } 293 294 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, 295 BdrvChild *ignore_parent) 296 { 297 return bdrv_drain_poll(bs, recursive, ignore_parent, false); 298 } 299 300 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 301 BdrvChild *parent, bool ignore_bds_parents, 302 bool poll); 303 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 304 BdrvChild *parent, bool ignore_bds_parents, 305 int *drained_end_counter); 306 307 static void bdrv_co_drain_bh_cb(void *opaque) 308 { 309 BdrvCoDrainData *data = opaque; 310 Coroutine *co = data->co; 311 BlockDriverState *bs = data->bs; 312 313 if (bs) { 314 AioContext *ctx = bdrv_get_aio_context(bs); 315 aio_context_acquire(ctx); 316 bdrv_dec_in_flight(bs); 317 if (data->begin) { 318 assert(!data->drained_end_counter); 319 bdrv_do_drained_begin(bs, data->recursive, data->parent, 320 data->ignore_bds_parents, data->poll); 321 } else { 322 assert(!data->poll); 323 bdrv_do_drained_end(bs, data->recursive, data->parent, 324 data->ignore_bds_parents, 325 data->drained_end_counter); 326 } 327 aio_context_release(ctx); 328 } else { 329 assert(data->begin); 330 bdrv_drain_all_begin(); 331 } 332 333 data->done = true; 334 aio_co_wake(co); 335 } 336 337 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 338 bool begin, bool recursive, 339 BdrvChild *parent, 340 bool ignore_bds_parents, 341 bool poll, 342 int *drained_end_counter) 343 { 344 BdrvCoDrainData data; 345 Coroutine *self = qemu_coroutine_self(); 346 AioContext *ctx = bdrv_get_aio_context(bs); 347 AioContext *co_ctx = qemu_coroutine_get_aio_context(self); 348 349 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 350 * other coroutines run if they were queued by aio_co_enter(). */ 351 352 assert(qemu_in_coroutine()); 353 data = (BdrvCoDrainData) { 354 .co = self, 355 .bs = bs, 356 .done = false, 357 .begin = begin, 358 .recursive = recursive, 359 .parent = parent, 360 .ignore_bds_parents = ignore_bds_parents, 361 .poll = poll, 362 .drained_end_counter = drained_end_counter, 363 }; 364 365 if (bs) { 366 bdrv_inc_in_flight(bs); 367 } 368 369 /* 370 * Temporarily drop the lock across yield or we would get deadlocks. 371 * bdrv_co_drain_bh_cb() reaquires the lock as needed. 372 * 373 * When we yield below, the lock for the current context will be 374 * released, so if this is actually the lock that protects bs, don't drop 375 * it a second time. 376 */ 377 if (ctx != co_ctx) { 378 aio_context_release(ctx); 379 } 380 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); 381 382 qemu_coroutine_yield(); 383 /* If we are resumed from some other event (such as an aio completion or a 384 * timer callback), it is a bug in the caller that should be fixed. */ 385 assert(data.done); 386 387 /* Reaquire the AioContext of bs if we dropped it */ 388 if (ctx != co_ctx) { 389 aio_context_acquire(ctx); 390 } 391 } 392 393 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 394 BdrvChild *parent, bool ignore_bds_parents) 395 { 396 assert(!qemu_in_coroutine()); 397 398 /* Stop things in parent-to-child order */ 399 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 400 aio_disable_external(bdrv_get_aio_context(bs)); 401 } 402 403 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); 404 bdrv_drain_invoke(bs, true, NULL); 405 } 406 407 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 408 BdrvChild *parent, bool ignore_bds_parents, 409 bool poll) 410 { 411 BdrvChild *child, *next; 412 413 if (qemu_in_coroutine()) { 414 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, 415 poll, NULL); 416 return; 417 } 418 419 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); 420 421 if (recursive) { 422 assert(!ignore_bds_parents); 423 bs->recursive_quiesce_counter++; 424 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 425 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, 426 false); 427 } 428 } 429 430 /* 431 * Wait for drained requests to finish. 432 * 433 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 434 * call is needed so things in this AioContext can make progress even 435 * though we don't return to the main AioContext loop - this automatically 436 * includes other nodes in the same AioContext and therefore all child 437 * nodes. 438 */ 439 if (poll) { 440 assert(!ignore_bds_parents); 441 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); 442 } 443 } 444 445 void bdrv_drained_begin(BlockDriverState *bs) 446 { 447 bdrv_do_drained_begin(bs, false, NULL, false, true); 448 } 449 450 void bdrv_subtree_drained_begin(BlockDriverState *bs) 451 { 452 bdrv_do_drained_begin(bs, true, NULL, false, true); 453 } 454 455 /** 456 * This function does not poll, nor must any of its recursively called 457 * functions. The *drained_end_counter pointee will be incremented 458 * once for every background operation scheduled, and decremented once 459 * the operation settles. Therefore, the pointer must remain valid 460 * until the pointee reaches 0. That implies that whoever sets up the 461 * pointee has to poll until it is 0. 462 * 463 * We use atomic operations to access *drained_end_counter, because 464 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of 465 * @bs may contain nodes in different AioContexts, 466 * (2) bdrv_drain_all_end() uses the same counter for all nodes, 467 * regardless of which AioContext they are in. 468 */ 469 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 470 BdrvChild *parent, bool ignore_bds_parents, 471 int *drained_end_counter) 472 { 473 BdrvChild *child; 474 int old_quiesce_counter; 475 476 assert(drained_end_counter != NULL); 477 478 if (qemu_in_coroutine()) { 479 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, 480 false, drained_end_counter); 481 return; 482 } 483 assert(bs->quiesce_counter > 0); 484 485 /* Re-enable things in child-to-parent order */ 486 bdrv_drain_invoke(bs, false, drained_end_counter); 487 bdrv_parent_drained_end(bs, parent, ignore_bds_parents, 488 drained_end_counter); 489 490 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 491 if (old_quiesce_counter == 1) { 492 aio_enable_external(bdrv_get_aio_context(bs)); 493 } 494 495 if (recursive) { 496 assert(!ignore_bds_parents); 497 bs->recursive_quiesce_counter--; 498 QLIST_FOREACH(child, &bs->children, next) { 499 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents, 500 drained_end_counter); 501 } 502 } 503 } 504 505 void bdrv_drained_end(BlockDriverState *bs) 506 { 507 int drained_end_counter = 0; 508 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); 509 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 510 } 511 512 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) 513 { 514 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); 515 } 516 517 void bdrv_subtree_drained_end(BlockDriverState *bs) 518 { 519 int drained_end_counter = 0; 520 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); 521 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 522 } 523 524 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) 525 { 526 int i; 527 528 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { 529 bdrv_do_drained_begin(child->bs, true, child, false, true); 530 } 531 } 532 533 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) 534 { 535 int drained_end_counter = 0; 536 int i; 537 538 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { 539 bdrv_do_drained_end(child->bs, true, child, false, 540 &drained_end_counter); 541 } 542 543 BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); 544 } 545 546 /* 547 * Wait for pending requests to complete on a single BlockDriverState subtree, 548 * and suspend block driver's internal I/O until next request arrives. 549 * 550 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 551 * AioContext. 552 */ 553 void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 554 { 555 assert(qemu_in_coroutine()); 556 bdrv_drained_begin(bs); 557 bdrv_drained_end(bs); 558 } 559 560 void bdrv_drain(BlockDriverState *bs) 561 { 562 bdrv_drained_begin(bs); 563 bdrv_drained_end(bs); 564 } 565 566 static void bdrv_drain_assert_idle(BlockDriverState *bs) 567 { 568 BdrvChild *child, *next; 569 570 assert(qatomic_read(&bs->in_flight) == 0); 571 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 572 bdrv_drain_assert_idle(child->bs); 573 } 574 } 575 576 unsigned int bdrv_drain_all_count = 0; 577 578 static bool bdrv_drain_all_poll(void) 579 { 580 BlockDriverState *bs = NULL; 581 bool result = false; 582 583 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 584 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 585 while ((bs = bdrv_next_all_states(bs))) { 586 AioContext *aio_context = bdrv_get_aio_context(bs); 587 aio_context_acquire(aio_context); 588 result |= bdrv_drain_poll(bs, false, NULL, true); 589 aio_context_release(aio_context); 590 } 591 592 return result; 593 } 594 595 /* 596 * Wait for pending requests to complete across all BlockDriverStates 597 * 598 * This function does not flush data to disk, use bdrv_flush_all() for that 599 * after calling this function. 600 * 601 * This pauses all block jobs and disables external clients. It must 602 * be paired with bdrv_drain_all_end(). 603 * 604 * NOTE: no new block jobs or BlockDriverStates can be created between 605 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 606 */ 607 void bdrv_drain_all_begin(void) 608 { 609 BlockDriverState *bs = NULL; 610 611 if (qemu_in_coroutine()) { 612 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); 613 return; 614 } 615 616 /* 617 * bdrv queue is managed by record/replay, 618 * waiting for finishing the I/O requests may 619 * be infinite 620 */ 621 if (replay_events_enabled()) { 622 return; 623 } 624 625 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 626 * loop AioContext, so make sure we're in the main context. */ 627 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 628 assert(bdrv_drain_all_count < INT_MAX); 629 bdrv_drain_all_count++; 630 631 /* Quiesce all nodes, without polling in-flight requests yet. The graph 632 * cannot change during this loop. */ 633 while ((bs = bdrv_next_all_states(bs))) { 634 AioContext *aio_context = bdrv_get_aio_context(bs); 635 636 aio_context_acquire(aio_context); 637 bdrv_do_drained_begin(bs, false, NULL, true, false); 638 aio_context_release(aio_context); 639 } 640 641 /* Now poll the in-flight requests */ 642 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 643 644 while ((bs = bdrv_next_all_states(bs))) { 645 bdrv_drain_assert_idle(bs); 646 } 647 } 648 649 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 650 { 651 int drained_end_counter = 0; 652 653 g_assert(bs->quiesce_counter > 0); 654 g_assert(!bs->refcnt); 655 656 while (bs->quiesce_counter) { 657 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 658 } 659 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 660 } 661 662 void bdrv_drain_all_end(void) 663 { 664 BlockDriverState *bs = NULL; 665 int drained_end_counter = 0; 666 667 /* 668 * bdrv queue is managed by record/replay, 669 * waiting for finishing the I/O requests may 670 * be endless 671 */ 672 if (replay_events_enabled()) { 673 return; 674 } 675 676 while ((bs = bdrv_next_all_states(bs))) { 677 AioContext *aio_context = bdrv_get_aio_context(bs); 678 679 aio_context_acquire(aio_context); 680 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 681 aio_context_release(aio_context); 682 } 683 684 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 685 AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0); 686 687 assert(bdrv_drain_all_count > 0); 688 bdrv_drain_all_count--; 689 } 690 691 void bdrv_drain_all(void) 692 { 693 bdrv_drain_all_begin(); 694 bdrv_drain_all_end(); 695 } 696 697 /** 698 * Remove an active request from the tracked requests list 699 * 700 * This function should be called when a tracked request is completing. 701 */ 702 static void tracked_request_end(BdrvTrackedRequest *req) 703 { 704 if (req->serialising) { 705 qatomic_dec(&req->bs->serialising_in_flight); 706 } 707 708 qemu_co_mutex_lock(&req->bs->reqs_lock); 709 QLIST_REMOVE(req, list); 710 qemu_co_queue_restart_all(&req->wait_queue); 711 qemu_co_mutex_unlock(&req->bs->reqs_lock); 712 } 713 714 /** 715 * Add an active request to the tracked requests list 716 */ 717 static void tracked_request_begin(BdrvTrackedRequest *req, 718 BlockDriverState *bs, 719 int64_t offset, 720 uint64_t bytes, 721 enum BdrvTrackedRequestType type) 722 { 723 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes); 724 725 *req = (BdrvTrackedRequest){ 726 .bs = bs, 727 .offset = offset, 728 .bytes = bytes, 729 .type = type, 730 .co = qemu_coroutine_self(), 731 .serialising = false, 732 .overlap_offset = offset, 733 .overlap_bytes = bytes, 734 }; 735 736 qemu_co_queue_init(&req->wait_queue); 737 738 qemu_co_mutex_lock(&bs->reqs_lock); 739 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 740 qemu_co_mutex_unlock(&bs->reqs_lock); 741 } 742 743 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 744 int64_t offset, uint64_t bytes) 745 { 746 /* aaaa bbbb */ 747 if (offset >= req->overlap_offset + req->overlap_bytes) { 748 return false; 749 } 750 /* bbbb aaaa */ 751 if (req->overlap_offset >= offset + bytes) { 752 return false; 753 } 754 return true; 755 } 756 757 /* Called with self->bs->reqs_lock held */ 758 static BdrvTrackedRequest * 759 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 760 { 761 BdrvTrackedRequest *req; 762 763 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 764 if (req == self || (!req->serialising && !self->serialising)) { 765 continue; 766 } 767 if (tracked_request_overlaps(req, self->overlap_offset, 768 self->overlap_bytes)) 769 { 770 /* 771 * Hitting this means there was a reentrant request, for 772 * example, a block driver issuing nested requests. This must 773 * never happen since it means deadlock. 774 */ 775 assert(qemu_coroutine_self() != req->co); 776 777 /* 778 * If the request is already (indirectly) waiting for us, or 779 * will wait for us as soon as it wakes up, then just go on 780 * (instead of producing a deadlock in the former case). 781 */ 782 if (!req->waiting_for) { 783 return req; 784 } 785 } 786 } 787 788 return NULL; 789 } 790 791 /* Called with self->bs->reqs_lock held */ 792 static bool coroutine_fn 793 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 794 { 795 BdrvTrackedRequest *req; 796 bool waited = false; 797 798 while ((req = bdrv_find_conflicting_request(self))) { 799 self->waiting_for = req; 800 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 801 self->waiting_for = NULL; 802 waited = true; 803 } 804 805 return waited; 806 } 807 808 /* Called with req->bs->reqs_lock held */ 809 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 810 uint64_t align) 811 { 812 int64_t overlap_offset = req->offset & ~(align - 1); 813 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 814 - overlap_offset; 815 816 if (!req->serialising) { 817 qatomic_inc(&req->bs->serialising_in_flight); 818 req->serialising = true; 819 } 820 821 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 822 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 823 } 824 825 /** 826 * Return the tracked request on @bs for the current coroutine, or 827 * NULL if there is none. 828 */ 829 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 830 { 831 BdrvTrackedRequest *req; 832 Coroutine *self = qemu_coroutine_self(); 833 834 QLIST_FOREACH(req, &bs->tracked_requests, list) { 835 if (req->co == self) { 836 return req; 837 } 838 } 839 840 return NULL; 841 } 842 843 /** 844 * Round a region to cluster boundaries 845 */ 846 void bdrv_round_to_clusters(BlockDriverState *bs, 847 int64_t offset, int64_t bytes, 848 int64_t *cluster_offset, 849 int64_t *cluster_bytes) 850 { 851 BlockDriverInfo bdi; 852 853 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 854 *cluster_offset = offset; 855 *cluster_bytes = bytes; 856 } else { 857 int64_t c = bdi.cluster_size; 858 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 859 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 860 } 861 } 862 863 static int bdrv_get_cluster_size(BlockDriverState *bs) 864 { 865 BlockDriverInfo bdi; 866 int ret; 867 868 ret = bdrv_get_info(bs, &bdi); 869 if (ret < 0 || bdi.cluster_size == 0) { 870 return bs->bl.request_alignment; 871 } else { 872 return bdi.cluster_size; 873 } 874 } 875 876 void bdrv_inc_in_flight(BlockDriverState *bs) 877 { 878 qatomic_inc(&bs->in_flight); 879 } 880 881 void bdrv_wakeup(BlockDriverState *bs) 882 { 883 aio_wait_kick(); 884 } 885 886 void bdrv_dec_in_flight(BlockDriverState *bs) 887 { 888 qatomic_dec(&bs->in_flight); 889 bdrv_wakeup(bs); 890 } 891 892 static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 893 { 894 BlockDriverState *bs = self->bs; 895 bool waited = false; 896 897 if (!qatomic_read(&bs->serialising_in_flight)) { 898 return false; 899 } 900 901 qemu_co_mutex_lock(&bs->reqs_lock); 902 waited = bdrv_wait_serialising_requests_locked(self); 903 qemu_co_mutex_unlock(&bs->reqs_lock); 904 905 return waited; 906 } 907 908 bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 909 uint64_t align) 910 { 911 bool waited; 912 913 qemu_co_mutex_lock(&req->bs->reqs_lock); 914 915 tracked_request_set_serialising(req, align); 916 waited = bdrv_wait_serialising_requests_locked(req); 917 918 qemu_co_mutex_unlock(&req->bs->reqs_lock); 919 920 return waited; 921 } 922 923 int bdrv_check_request(int64_t offset, int64_t bytes) 924 { 925 if (offset < 0 || bytes < 0) { 926 return -EIO; 927 } 928 929 if (bytes > BDRV_MAX_LENGTH) { 930 return -EIO; 931 } 932 933 if (offset > BDRV_MAX_LENGTH - bytes) { 934 return -EIO; 935 } 936 937 return 0; 938 } 939 940 static int bdrv_check_request32(int64_t offset, int64_t bytes) 941 { 942 int ret = bdrv_check_request(offset, bytes); 943 if (ret < 0) { 944 return ret; 945 } 946 947 if (bytes > BDRV_REQUEST_MAX_BYTES) { 948 return -EIO; 949 } 950 951 return 0; 952 } 953 954 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 955 int bytes, BdrvRequestFlags flags) 956 { 957 return bdrv_pwritev(child, offset, bytes, NULL, 958 BDRV_REQ_ZERO_WRITE | flags); 959 } 960 961 /* 962 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 963 * The operation is sped up by checking the block status and only writing 964 * zeroes to the device if they currently do not return zeroes. Optional 965 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 966 * BDRV_REQ_FUA). 967 * 968 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 969 */ 970 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 971 { 972 int ret; 973 int64_t target_size, bytes, offset = 0; 974 BlockDriverState *bs = child->bs; 975 976 target_size = bdrv_getlength(bs); 977 if (target_size < 0) { 978 return target_size; 979 } 980 981 for (;;) { 982 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 983 if (bytes <= 0) { 984 return 0; 985 } 986 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 987 if (ret < 0) { 988 return ret; 989 } 990 if (ret & BDRV_BLOCK_ZERO) { 991 offset += bytes; 992 continue; 993 } 994 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 995 if (ret < 0) { 996 return ret; 997 } 998 offset += bytes; 999 } 1000 } 1001 1002 /* See bdrv_pwrite() for the return codes */ 1003 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) 1004 { 1005 int ret; 1006 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1007 1008 if (bytes < 0) { 1009 return -EINVAL; 1010 } 1011 1012 ret = bdrv_preadv(child, offset, bytes, &qiov, 0); 1013 1014 return ret < 0 ? ret : bytes; 1015 } 1016 1017 /* Return no. of bytes on success or < 0 on error. Important errors are: 1018 -EIO generic I/O error (may happen for all errors) 1019 -ENOMEDIUM No media inserted. 1020 -EINVAL Invalid offset or number of bytes 1021 -EACCES Trying to write a read-only device 1022 */ 1023 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes) 1024 { 1025 int ret; 1026 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1027 1028 if (bytes < 0) { 1029 return -EINVAL; 1030 } 1031 1032 ret = bdrv_pwritev(child, offset, bytes, &qiov, 0); 1033 1034 return ret < 0 ? ret : bytes; 1035 } 1036 1037 /* 1038 * Writes to the file and ensures that no writes are reordered across this 1039 * request (acts as a barrier) 1040 * 1041 * Returns 0 on success, -errno in error cases. 1042 */ 1043 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 1044 const void *buf, int count) 1045 { 1046 int ret; 1047 1048 ret = bdrv_pwrite(child, offset, buf, count); 1049 if (ret < 0) { 1050 return ret; 1051 } 1052 1053 ret = bdrv_flush(child->bs); 1054 if (ret < 0) { 1055 return ret; 1056 } 1057 1058 return 0; 1059 } 1060 1061 typedef struct CoroutineIOCompletion { 1062 Coroutine *coroutine; 1063 int ret; 1064 } CoroutineIOCompletion; 1065 1066 static void bdrv_co_io_em_complete(void *opaque, int ret) 1067 { 1068 CoroutineIOCompletion *co = opaque; 1069 1070 co->ret = ret; 1071 aio_co_wake(co->coroutine); 1072 } 1073 1074 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 1075 uint64_t offset, uint64_t bytes, 1076 QEMUIOVector *qiov, 1077 size_t qiov_offset, int flags) 1078 { 1079 BlockDriver *drv = bs->drv; 1080 int64_t sector_num; 1081 unsigned int nb_sectors; 1082 QEMUIOVector local_qiov; 1083 int ret; 1084 1085 assert(!(flags & ~BDRV_REQ_MASK)); 1086 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1087 1088 if (!drv) { 1089 return -ENOMEDIUM; 1090 } 1091 1092 if (drv->bdrv_co_preadv_part) { 1093 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 1094 flags); 1095 } 1096 1097 if (qiov_offset > 0 || bytes != qiov->size) { 1098 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1099 qiov = &local_qiov; 1100 } 1101 1102 if (drv->bdrv_co_preadv) { 1103 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 1104 goto out; 1105 } 1106 1107 if (drv->bdrv_aio_preadv) { 1108 BlockAIOCB *acb; 1109 CoroutineIOCompletion co = { 1110 .coroutine = qemu_coroutine_self(), 1111 }; 1112 1113 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1114 bdrv_co_io_em_complete, &co); 1115 if (acb == NULL) { 1116 ret = -EIO; 1117 goto out; 1118 } else { 1119 qemu_coroutine_yield(); 1120 ret = co.ret; 1121 goto out; 1122 } 1123 } 1124 1125 sector_num = offset >> BDRV_SECTOR_BITS; 1126 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1127 1128 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1129 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1130 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1131 assert(drv->bdrv_co_readv); 1132 1133 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1134 1135 out: 1136 if (qiov == &local_qiov) { 1137 qemu_iovec_destroy(&local_qiov); 1138 } 1139 1140 return ret; 1141 } 1142 1143 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 1144 uint64_t offset, uint64_t bytes, 1145 QEMUIOVector *qiov, 1146 size_t qiov_offset, int flags) 1147 { 1148 BlockDriver *drv = bs->drv; 1149 int64_t sector_num; 1150 unsigned int nb_sectors; 1151 QEMUIOVector local_qiov; 1152 int ret; 1153 1154 assert(!(flags & ~BDRV_REQ_MASK)); 1155 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1156 1157 if (!drv) { 1158 return -ENOMEDIUM; 1159 } 1160 1161 if (drv->bdrv_co_pwritev_part) { 1162 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1163 flags & bs->supported_write_flags); 1164 flags &= ~bs->supported_write_flags; 1165 goto emulate_flags; 1166 } 1167 1168 if (qiov_offset > 0 || bytes != qiov->size) { 1169 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1170 qiov = &local_qiov; 1171 } 1172 1173 if (drv->bdrv_co_pwritev) { 1174 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 1175 flags & bs->supported_write_flags); 1176 flags &= ~bs->supported_write_flags; 1177 goto emulate_flags; 1178 } 1179 1180 if (drv->bdrv_aio_pwritev) { 1181 BlockAIOCB *acb; 1182 CoroutineIOCompletion co = { 1183 .coroutine = qemu_coroutine_self(), 1184 }; 1185 1186 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, 1187 flags & bs->supported_write_flags, 1188 bdrv_co_io_em_complete, &co); 1189 flags &= ~bs->supported_write_flags; 1190 if (acb == NULL) { 1191 ret = -EIO; 1192 } else { 1193 qemu_coroutine_yield(); 1194 ret = co.ret; 1195 } 1196 goto emulate_flags; 1197 } 1198 1199 sector_num = offset >> BDRV_SECTOR_BITS; 1200 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1201 1202 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1203 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1204 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1205 1206 assert(drv->bdrv_co_writev); 1207 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, 1208 flags & bs->supported_write_flags); 1209 flags &= ~bs->supported_write_flags; 1210 1211 emulate_flags: 1212 if (ret == 0 && (flags & BDRV_REQ_FUA)) { 1213 ret = bdrv_co_flush(bs); 1214 } 1215 1216 if (qiov == &local_qiov) { 1217 qemu_iovec_destroy(&local_qiov); 1218 } 1219 1220 return ret; 1221 } 1222 1223 static int coroutine_fn 1224 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 1225 uint64_t bytes, QEMUIOVector *qiov, 1226 size_t qiov_offset) 1227 { 1228 BlockDriver *drv = bs->drv; 1229 QEMUIOVector local_qiov; 1230 int ret; 1231 1232 if (!drv) { 1233 return -ENOMEDIUM; 1234 } 1235 1236 if (!block_driver_can_compress(drv)) { 1237 return -ENOTSUP; 1238 } 1239 1240 if (drv->bdrv_co_pwritev_compressed_part) { 1241 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1242 qiov, qiov_offset); 1243 } 1244 1245 if (qiov_offset == 0) { 1246 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1247 } 1248 1249 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1250 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1251 qemu_iovec_destroy(&local_qiov); 1252 1253 return ret; 1254 } 1255 1256 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, 1257 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1258 size_t qiov_offset, int flags) 1259 { 1260 BlockDriverState *bs = child->bs; 1261 1262 /* Perform I/O through a temporary buffer so that users who scribble over 1263 * their read buffer while the operation is in progress do not end up 1264 * modifying the image file. This is critical for zero-copy guest I/O 1265 * where anything might happen inside guest memory. 1266 */ 1267 void *bounce_buffer = NULL; 1268 1269 BlockDriver *drv = bs->drv; 1270 int64_t cluster_offset; 1271 int64_t cluster_bytes; 1272 size_t skip_bytes; 1273 int ret; 1274 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1275 BDRV_REQUEST_MAX_BYTES); 1276 unsigned int progress = 0; 1277 bool skip_write; 1278 1279 if (!drv) { 1280 return -ENOMEDIUM; 1281 } 1282 1283 /* 1284 * Do not write anything when the BDS is inactive. That is not 1285 * allowed, and it would not help. 1286 */ 1287 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1288 1289 /* FIXME We cannot require callers to have write permissions when all they 1290 * are doing is a read request. If we did things right, write permissions 1291 * would be obtained anyway, but internally by the copy-on-read code. As 1292 * long as it is implemented here rather than in a separate filter driver, 1293 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1294 * it could request permissions. Therefore we have to bypass the permission 1295 * system for the moment. */ 1296 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1297 1298 /* Cover entire cluster so no additional backing file I/O is required when 1299 * allocating cluster in the image file. Note that this value may exceed 1300 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1301 * is one reason we loop rather than doing it all at once. 1302 */ 1303 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1304 skip_bytes = offset - cluster_offset; 1305 1306 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1307 cluster_offset, cluster_bytes); 1308 1309 while (cluster_bytes) { 1310 int64_t pnum; 1311 1312 if (skip_write) { 1313 ret = 1; /* "already allocated", so nothing will be copied */ 1314 pnum = MIN(cluster_bytes, max_transfer); 1315 } else { 1316 ret = bdrv_is_allocated(bs, cluster_offset, 1317 MIN(cluster_bytes, max_transfer), &pnum); 1318 if (ret < 0) { 1319 /* 1320 * Safe to treat errors in querying allocation as if 1321 * unallocated; we'll probably fail again soon on the 1322 * read, but at least that will set a decent errno. 1323 */ 1324 pnum = MIN(cluster_bytes, max_transfer); 1325 } 1326 1327 /* Stop at EOF if the image ends in the middle of the cluster */ 1328 if (ret == 0 && pnum == 0) { 1329 assert(progress >= bytes); 1330 break; 1331 } 1332 1333 assert(skip_bytes < pnum); 1334 } 1335 1336 if (ret <= 0) { 1337 QEMUIOVector local_qiov; 1338 1339 /* Must copy-on-read; use the bounce buffer */ 1340 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1341 if (!bounce_buffer) { 1342 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1343 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1344 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1345 1346 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1347 if (!bounce_buffer) { 1348 ret = -ENOMEM; 1349 goto err; 1350 } 1351 } 1352 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1353 1354 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1355 &local_qiov, 0, 0); 1356 if (ret < 0) { 1357 goto err; 1358 } 1359 1360 bdrv_debug_event(bs, BLKDBG_COR_WRITE); 1361 if (drv->bdrv_co_pwrite_zeroes && 1362 buffer_is_zero(bounce_buffer, pnum)) { 1363 /* FIXME: Should we (perhaps conditionally) be setting 1364 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1365 * that still correctly reads as zero? */ 1366 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1367 BDRV_REQ_WRITE_UNCHANGED); 1368 } else { 1369 /* This does not change the data on the disk, it is not 1370 * necessary to flush even in cache=writethrough mode. 1371 */ 1372 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1373 &local_qiov, 0, 1374 BDRV_REQ_WRITE_UNCHANGED); 1375 } 1376 1377 if (ret < 0) { 1378 /* It might be okay to ignore write errors for guest 1379 * requests. If this is a deliberate copy-on-read 1380 * then we don't want to ignore the error. Simply 1381 * report it in all cases. 1382 */ 1383 goto err; 1384 } 1385 1386 if (!(flags & BDRV_REQ_PREFETCH)) { 1387 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1388 bounce_buffer + skip_bytes, 1389 MIN(pnum - skip_bytes, bytes - progress)); 1390 } 1391 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1392 /* Read directly into the destination */ 1393 ret = bdrv_driver_preadv(bs, offset + progress, 1394 MIN(pnum - skip_bytes, bytes - progress), 1395 qiov, qiov_offset + progress, 0); 1396 if (ret < 0) { 1397 goto err; 1398 } 1399 } 1400 1401 cluster_offset += pnum; 1402 cluster_bytes -= pnum; 1403 progress += pnum - skip_bytes; 1404 skip_bytes = 0; 1405 } 1406 ret = 0; 1407 1408 err: 1409 qemu_vfree(bounce_buffer); 1410 return ret; 1411 } 1412 1413 /* 1414 * Forwards an already correctly aligned request to the BlockDriver. This 1415 * handles copy on read, zeroing after EOF, and fragmentation of large 1416 * reads; any other features must be implemented by the caller. 1417 */ 1418 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, 1419 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1420 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1421 { 1422 BlockDriverState *bs = child->bs; 1423 int64_t total_bytes, max_bytes; 1424 int ret = 0; 1425 uint64_t bytes_remaining = bytes; 1426 int max_transfer; 1427 1428 assert(is_power_of_2(align)); 1429 assert((offset & (align - 1)) == 0); 1430 assert((bytes & (align - 1)) == 0); 1431 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1432 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1433 align); 1434 1435 /* TODO: We would need a per-BDS .supported_read_flags and 1436 * potential fallback support, if we ever implement any read flags 1437 * to pass through to drivers. For now, there aren't any 1438 * passthrough flags. */ 1439 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH))); 1440 1441 /* Handle Copy on Read and associated serialisation */ 1442 if (flags & BDRV_REQ_COPY_ON_READ) { 1443 /* If we touch the same cluster it counts as an overlap. This 1444 * guarantees that allocating writes will be serialized and not race 1445 * with each other for the same cluster. For example, in copy-on-read 1446 * it ensures that the CoR read and write operations are atomic and 1447 * guest writes cannot interleave between them. */ 1448 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1449 } else { 1450 bdrv_wait_serialising_requests(req); 1451 } 1452 1453 if (flags & BDRV_REQ_COPY_ON_READ) { 1454 int64_t pnum; 1455 1456 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1457 if (ret < 0) { 1458 goto out; 1459 } 1460 1461 if (!ret || pnum != bytes) { 1462 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1463 qiov, qiov_offset, flags); 1464 goto out; 1465 } else if (flags & BDRV_REQ_PREFETCH) { 1466 goto out; 1467 } 1468 } 1469 1470 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1471 total_bytes = bdrv_getlength(bs); 1472 if (total_bytes < 0) { 1473 ret = total_bytes; 1474 goto out; 1475 } 1476 1477 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1478 if (bytes <= max_bytes && bytes <= max_transfer) { 1479 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0); 1480 goto out; 1481 } 1482 1483 while (bytes_remaining) { 1484 int num; 1485 1486 if (max_bytes) { 1487 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1488 assert(num); 1489 1490 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1491 num, qiov, 1492 qiov_offset + bytes - bytes_remaining, 0); 1493 max_bytes -= num; 1494 } else { 1495 num = bytes_remaining; 1496 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1497 0, bytes_remaining); 1498 } 1499 if (ret < 0) { 1500 goto out; 1501 } 1502 bytes_remaining -= num; 1503 } 1504 1505 out: 1506 return ret < 0 ? ret : 0; 1507 } 1508 1509 /* 1510 * Request padding 1511 * 1512 * |<---- align ----->| |<----- align ---->| 1513 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1514 * | | | | | | 1515 * -*----------$-------*-------- ... --------*-----$------------*--- 1516 * | | | | | | 1517 * | offset | | end | 1518 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1519 * [buf ... ) [tail_buf ) 1520 * 1521 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1522 * is placed at the beginning of @buf and @tail at the @end. 1523 * 1524 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1525 * around tail, if tail exists. 1526 * 1527 * @merge_reads is true for small requests, 1528 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1529 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1530 */ 1531 typedef struct BdrvRequestPadding { 1532 uint8_t *buf; 1533 size_t buf_len; 1534 uint8_t *tail_buf; 1535 size_t head; 1536 size_t tail; 1537 bool merge_reads; 1538 QEMUIOVector local_qiov; 1539 } BdrvRequestPadding; 1540 1541 static bool bdrv_init_padding(BlockDriverState *bs, 1542 int64_t offset, int64_t bytes, 1543 BdrvRequestPadding *pad) 1544 { 1545 uint64_t align = bs->bl.request_alignment; 1546 size_t sum; 1547 1548 memset(pad, 0, sizeof(*pad)); 1549 1550 pad->head = offset & (align - 1); 1551 pad->tail = ((offset + bytes) & (align - 1)); 1552 if (pad->tail) { 1553 pad->tail = align - pad->tail; 1554 } 1555 1556 if (!pad->head && !pad->tail) { 1557 return false; 1558 } 1559 1560 assert(bytes); /* Nothing good in aligning zero-length requests */ 1561 1562 sum = pad->head + bytes + pad->tail; 1563 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1564 pad->buf = qemu_blockalign(bs, pad->buf_len); 1565 pad->merge_reads = sum == pad->buf_len; 1566 if (pad->tail) { 1567 pad->tail_buf = pad->buf + pad->buf_len - align; 1568 } 1569 1570 return true; 1571 } 1572 1573 static int bdrv_padding_rmw_read(BdrvChild *child, 1574 BdrvTrackedRequest *req, 1575 BdrvRequestPadding *pad, 1576 bool zero_middle) 1577 { 1578 QEMUIOVector local_qiov; 1579 BlockDriverState *bs = child->bs; 1580 uint64_t align = bs->bl.request_alignment; 1581 int ret; 1582 1583 assert(req->serialising && pad->buf); 1584 1585 if (pad->head || pad->merge_reads) { 1586 uint64_t bytes = pad->merge_reads ? pad->buf_len : align; 1587 1588 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1589 1590 if (pad->head) { 1591 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1592 } 1593 if (pad->merge_reads && pad->tail) { 1594 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1595 } 1596 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1597 align, &local_qiov, 0, 0); 1598 if (ret < 0) { 1599 return ret; 1600 } 1601 if (pad->head) { 1602 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1603 } 1604 if (pad->merge_reads && pad->tail) { 1605 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1606 } 1607 1608 if (pad->merge_reads) { 1609 goto zero_mem; 1610 } 1611 } 1612 1613 if (pad->tail) { 1614 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1615 1616 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1617 ret = bdrv_aligned_preadv( 1618 child, req, 1619 req->overlap_offset + req->overlap_bytes - align, 1620 align, align, &local_qiov, 0, 0); 1621 if (ret < 0) { 1622 return ret; 1623 } 1624 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1625 } 1626 1627 zero_mem: 1628 if (zero_middle) { 1629 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1630 } 1631 1632 return 0; 1633 } 1634 1635 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1636 { 1637 if (pad->buf) { 1638 qemu_vfree(pad->buf); 1639 qemu_iovec_destroy(&pad->local_qiov); 1640 } 1641 } 1642 1643 /* 1644 * bdrv_pad_request 1645 * 1646 * Exchange request parameters with padded request if needed. Don't include RMW 1647 * read of padding, bdrv_padding_rmw_read() should be called separately if 1648 * needed. 1649 * 1650 * All parameters except @bs are in-out: they represent original request at 1651 * function call and padded (if padding needed) at function finish. 1652 * 1653 * Function always succeeds. 1654 */ 1655 static bool bdrv_pad_request(BlockDriverState *bs, 1656 QEMUIOVector **qiov, size_t *qiov_offset, 1657 int64_t *offset, unsigned int *bytes, 1658 BdrvRequestPadding *pad) 1659 { 1660 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1661 return false; 1662 } 1663 1664 qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1665 *qiov, *qiov_offset, *bytes, 1666 pad->buf + pad->buf_len - pad->tail, pad->tail); 1667 *bytes += pad->head + pad->tail; 1668 *offset -= pad->head; 1669 *qiov = &pad->local_qiov; 1670 *qiov_offset = 0; 1671 1672 return true; 1673 } 1674 1675 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1676 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1677 BdrvRequestFlags flags) 1678 { 1679 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1680 } 1681 1682 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1683 int64_t offset, unsigned int bytes, 1684 QEMUIOVector *qiov, size_t qiov_offset, 1685 BdrvRequestFlags flags) 1686 { 1687 BlockDriverState *bs = child->bs; 1688 BdrvTrackedRequest req; 1689 BdrvRequestPadding pad; 1690 int ret; 1691 1692 trace_bdrv_co_preadv(bs, offset, bytes, flags); 1693 1694 if (!bdrv_is_inserted(bs)) { 1695 return -ENOMEDIUM; 1696 } 1697 1698 ret = bdrv_check_request32(offset, bytes); 1699 if (ret < 0) { 1700 return ret; 1701 } 1702 1703 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1704 /* 1705 * Aligning zero request is nonsense. Even if driver has special meaning 1706 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1707 * it to driver due to request_alignment. 1708 * 1709 * Still, no reason to return an error if someone do unaligned 1710 * zero-length read occasionally. 1711 */ 1712 return 0; 1713 } 1714 1715 bdrv_inc_in_flight(bs); 1716 1717 /* Don't do copy-on-read if we read data before write operation */ 1718 if (qatomic_read(&bs->copy_on_read)) { 1719 flags |= BDRV_REQ_COPY_ON_READ; 1720 } 1721 1722 bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad); 1723 1724 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1725 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1726 bs->bl.request_alignment, 1727 qiov, qiov_offset, flags); 1728 tracked_request_end(&req); 1729 bdrv_dec_in_flight(bs); 1730 1731 bdrv_padding_destroy(&pad); 1732 1733 return ret; 1734 } 1735 1736 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1737 int64_t offset, int bytes, BdrvRequestFlags flags) 1738 { 1739 BlockDriver *drv = bs->drv; 1740 QEMUIOVector qiov; 1741 void *buf = NULL; 1742 int ret = 0; 1743 bool need_flush = false; 1744 int head = 0; 1745 int tail = 0; 1746 1747 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1748 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1749 bs->bl.request_alignment); 1750 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1751 1752 if (!drv) { 1753 return -ENOMEDIUM; 1754 } 1755 1756 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1757 return -ENOTSUP; 1758 } 1759 1760 assert(alignment % bs->bl.request_alignment == 0); 1761 head = offset % alignment; 1762 tail = (offset + bytes) % alignment; 1763 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1764 assert(max_write_zeroes >= bs->bl.request_alignment); 1765 1766 while (bytes > 0 && !ret) { 1767 int num = bytes; 1768 1769 /* Align request. Block drivers can expect the "bulk" of the request 1770 * to be aligned, and that unaligned requests do not cross cluster 1771 * boundaries. 1772 */ 1773 if (head) { 1774 /* Make a small request up to the first aligned sector. For 1775 * convenience, limit this request to max_transfer even if 1776 * we don't need to fall back to writes. */ 1777 num = MIN(MIN(bytes, max_transfer), alignment - head); 1778 head = (head + num) % alignment; 1779 assert(num < max_write_zeroes); 1780 } else if (tail && num > alignment) { 1781 /* Shorten the request to the last aligned sector. */ 1782 num -= tail; 1783 } 1784 1785 /* limit request size */ 1786 if (num > max_write_zeroes) { 1787 num = max_write_zeroes; 1788 } 1789 1790 ret = -ENOTSUP; 1791 /* First try the efficient write zeroes operation */ 1792 if (drv->bdrv_co_pwrite_zeroes) { 1793 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1794 flags & bs->supported_zero_flags); 1795 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1796 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1797 need_flush = true; 1798 } 1799 } else { 1800 assert(!bs->supported_zero_flags); 1801 } 1802 1803 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1804 /* Fall back to bounce buffer if write zeroes is unsupported */ 1805 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1806 1807 if ((flags & BDRV_REQ_FUA) && 1808 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1809 /* No need for bdrv_driver_pwrite() to do a fallback 1810 * flush on each chunk; use just one at the end */ 1811 write_flags &= ~BDRV_REQ_FUA; 1812 need_flush = true; 1813 } 1814 num = MIN(num, max_transfer); 1815 if (buf == NULL) { 1816 buf = qemu_try_blockalign0(bs, num); 1817 if (buf == NULL) { 1818 ret = -ENOMEM; 1819 goto fail; 1820 } 1821 } 1822 qemu_iovec_init_buf(&qiov, buf, num); 1823 1824 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1825 1826 /* Keep bounce buffer around if it is big enough for all 1827 * all future requests. 1828 */ 1829 if (num < max_transfer) { 1830 qemu_vfree(buf); 1831 buf = NULL; 1832 } 1833 } 1834 1835 offset += num; 1836 bytes -= num; 1837 } 1838 1839 fail: 1840 if (ret == 0 && need_flush) { 1841 ret = bdrv_co_flush(bs); 1842 } 1843 qemu_vfree(buf); 1844 return ret; 1845 } 1846 1847 static inline int coroutine_fn 1848 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, 1849 BdrvTrackedRequest *req, int flags) 1850 { 1851 BlockDriverState *bs = child->bs; 1852 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1853 1854 if (bs->read_only) { 1855 return -EPERM; 1856 } 1857 1858 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1859 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1860 assert(!(flags & ~BDRV_REQ_MASK)); 1861 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 1862 1863 if (flags & BDRV_REQ_SERIALISING) { 1864 QEMU_LOCK_GUARD(&bs->reqs_lock); 1865 1866 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 1867 1868 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 1869 return -EBUSY; 1870 } 1871 1872 bdrv_wait_serialising_requests_locked(req); 1873 } else { 1874 bdrv_wait_serialising_requests(req); 1875 } 1876 1877 assert(req->overlap_offset <= offset); 1878 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1879 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); 1880 1881 switch (req->type) { 1882 case BDRV_TRACKED_WRITE: 1883 case BDRV_TRACKED_DISCARD: 1884 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1885 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1886 } else { 1887 assert(child->perm & BLK_PERM_WRITE); 1888 } 1889 return notifier_with_return_list_notify(&bs->before_write_notifiers, 1890 req); 1891 case BDRV_TRACKED_TRUNCATE: 1892 assert(child->perm & BLK_PERM_RESIZE); 1893 return 0; 1894 default: 1895 abort(); 1896 } 1897 } 1898 1899 static inline void coroutine_fn 1900 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes, 1901 BdrvTrackedRequest *req, int ret) 1902 { 1903 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1904 BlockDriverState *bs = child->bs; 1905 1906 qatomic_inc(&bs->write_gen); 1907 1908 /* 1909 * Discard cannot extend the image, but in error handling cases, such as 1910 * when reverting a qcow2 cluster allocation, the discarded range can pass 1911 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1912 * here. Instead, just skip it, since semantically a discard request 1913 * beyond EOF cannot expand the image anyway. 1914 */ 1915 if (ret == 0 && 1916 (req->type == BDRV_TRACKED_TRUNCATE || 1917 end_sector > bs->total_sectors) && 1918 req->type != BDRV_TRACKED_DISCARD) { 1919 bs->total_sectors = end_sector; 1920 bdrv_parent_cb_resize(bs); 1921 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1922 } 1923 if (req->bytes) { 1924 switch (req->type) { 1925 case BDRV_TRACKED_WRITE: 1926 stat64_max(&bs->wr_highest_offset, offset + bytes); 1927 /* fall through, to set dirty bits */ 1928 case BDRV_TRACKED_DISCARD: 1929 bdrv_set_dirty(bs, offset, bytes); 1930 break; 1931 default: 1932 break; 1933 } 1934 } 1935 } 1936 1937 /* 1938 * Forwards an already correctly aligned write request to the BlockDriver, 1939 * after possibly fragmenting it. 1940 */ 1941 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, 1942 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1943 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1944 { 1945 BlockDriverState *bs = child->bs; 1946 BlockDriver *drv = bs->drv; 1947 int ret; 1948 1949 uint64_t bytes_remaining = bytes; 1950 int max_transfer; 1951 1952 if (!drv) { 1953 return -ENOMEDIUM; 1954 } 1955 1956 if (bdrv_has_readonly_bitmaps(bs)) { 1957 return -EPERM; 1958 } 1959 1960 assert(is_power_of_2(align)); 1961 assert((offset & (align - 1)) == 0); 1962 assert((bytes & (align - 1)) == 0); 1963 assert(!qiov || qiov_offset + bytes <= qiov->size); 1964 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1965 align); 1966 1967 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1968 1969 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1970 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1971 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 1972 flags |= BDRV_REQ_ZERO_WRITE; 1973 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1974 flags |= BDRV_REQ_MAY_UNMAP; 1975 } 1976 } 1977 1978 if (ret < 0) { 1979 /* Do nothing, write notifier decided to fail this request */ 1980 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1981 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1982 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1983 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1984 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 1985 qiov, qiov_offset); 1986 } else if (bytes <= max_transfer) { 1987 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1988 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 1989 } else { 1990 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1991 while (bytes_remaining) { 1992 int num = MIN(bytes_remaining, max_transfer); 1993 int local_flags = flags; 1994 1995 assert(num); 1996 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1997 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1998 /* If FUA is going to be emulated by flush, we only 1999 * need to flush on the last iteration */ 2000 local_flags &= ~BDRV_REQ_FUA; 2001 } 2002 2003 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 2004 num, qiov, 2005 qiov_offset + bytes - bytes_remaining, 2006 local_flags); 2007 if (ret < 0) { 2008 break; 2009 } 2010 bytes_remaining -= num; 2011 } 2012 } 2013 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 2014 2015 if (ret >= 0) { 2016 ret = 0; 2017 } 2018 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 2019 2020 return ret; 2021 } 2022 2023 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, 2024 int64_t offset, 2025 unsigned int bytes, 2026 BdrvRequestFlags flags, 2027 BdrvTrackedRequest *req) 2028 { 2029 BlockDriverState *bs = child->bs; 2030 QEMUIOVector local_qiov; 2031 uint64_t align = bs->bl.request_alignment; 2032 int ret = 0; 2033 bool padding; 2034 BdrvRequestPadding pad; 2035 2036 padding = bdrv_init_padding(bs, offset, bytes, &pad); 2037 if (padding) { 2038 bdrv_make_request_serialising(req, align); 2039 2040 bdrv_padding_rmw_read(child, req, &pad, true); 2041 2042 if (pad.head || pad.merge_reads) { 2043 int64_t aligned_offset = offset & ~(align - 1); 2044 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2045 2046 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2047 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2048 align, &local_qiov, 0, 2049 flags & ~BDRV_REQ_ZERO_WRITE); 2050 if (ret < 0 || pad.merge_reads) { 2051 /* Error or all work is done */ 2052 goto out; 2053 } 2054 offset += write_bytes - pad.head; 2055 bytes -= write_bytes - pad.head; 2056 } 2057 } 2058 2059 assert(!bytes || (offset & (align - 1)) == 0); 2060 if (bytes >= align) { 2061 /* Write the aligned part in the middle. */ 2062 uint64_t aligned_bytes = bytes & ~(align - 1); 2063 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2064 NULL, 0, flags); 2065 if (ret < 0) { 2066 goto out; 2067 } 2068 bytes -= aligned_bytes; 2069 offset += aligned_bytes; 2070 } 2071 2072 assert(!bytes || (offset & (align - 1)) == 0); 2073 if (bytes) { 2074 assert(align == pad.tail + bytes); 2075 2076 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2077 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2078 &local_qiov, 0, 2079 flags & ~BDRV_REQ_ZERO_WRITE); 2080 } 2081 2082 out: 2083 bdrv_padding_destroy(&pad); 2084 2085 return ret; 2086 } 2087 2088 /* 2089 * Handle a write request in coroutine context 2090 */ 2091 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2092 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 2093 BdrvRequestFlags flags) 2094 { 2095 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2096 } 2097 2098 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2099 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, size_t qiov_offset, 2100 BdrvRequestFlags flags) 2101 { 2102 BlockDriverState *bs = child->bs; 2103 BdrvTrackedRequest req; 2104 uint64_t align = bs->bl.request_alignment; 2105 BdrvRequestPadding pad; 2106 int ret; 2107 2108 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags); 2109 2110 if (!bdrv_is_inserted(bs)) { 2111 return -ENOMEDIUM; 2112 } 2113 2114 ret = bdrv_check_request32(offset, bytes); 2115 if (ret < 0) { 2116 return ret; 2117 } 2118 2119 /* If the request is misaligned then we can't make it efficient */ 2120 if ((flags & BDRV_REQ_NO_FALLBACK) && 2121 !QEMU_IS_ALIGNED(offset | bytes, align)) 2122 { 2123 return -ENOTSUP; 2124 } 2125 2126 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2127 /* 2128 * Aligning zero request is nonsense. Even if driver has special meaning 2129 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2130 * it to driver due to request_alignment. 2131 * 2132 * Still, no reason to return an error if someone do unaligned 2133 * zero-length write occasionally. 2134 */ 2135 return 0; 2136 } 2137 2138 bdrv_inc_in_flight(bs); 2139 /* 2140 * Align write if necessary by performing a read-modify-write cycle. 2141 * Pad qiov with the read parts and be sure to have a tracked request not 2142 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 2143 */ 2144 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2145 2146 if (flags & BDRV_REQ_ZERO_WRITE) { 2147 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2148 goto out; 2149 } 2150 2151 if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) { 2152 bdrv_make_request_serialising(&req, align); 2153 bdrv_padding_rmw_read(child, &req, &pad, false); 2154 } 2155 2156 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2157 qiov, qiov_offset, flags); 2158 2159 bdrv_padding_destroy(&pad); 2160 2161 out: 2162 tracked_request_end(&req); 2163 bdrv_dec_in_flight(bs); 2164 2165 return ret; 2166 } 2167 2168 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2169 int bytes, BdrvRequestFlags flags) 2170 { 2171 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2172 2173 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2174 flags &= ~BDRV_REQ_MAY_UNMAP; 2175 } 2176 2177 return bdrv_co_pwritev(child, offset, bytes, NULL, 2178 BDRV_REQ_ZERO_WRITE | flags); 2179 } 2180 2181 /* 2182 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2183 */ 2184 int bdrv_flush_all(void) 2185 { 2186 BdrvNextIterator it; 2187 BlockDriverState *bs = NULL; 2188 int result = 0; 2189 2190 /* 2191 * bdrv queue is managed by record/replay, 2192 * creating new flush request for stopping 2193 * the VM may break the determinism 2194 */ 2195 if (replay_events_enabled()) { 2196 return result; 2197 } 2198 2199 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2200 AioContext *aio_context = bdrv_get_aio_context(bs); 2201 int ret; 2202 2203 aio_context_acquire(aio_context); 2204 ret = bdrv_flush(bs); 2205 if (ret < 0 && !result) { 2206 result = ret; 2207 } 2208 aio_context_release(aio_context); 2209 } 2210 2211 return result; 2212 } 2213 2214 /* 2215 * Returns the allocation status of the specified sectors. 2216 * Drivers not implementing the functionality are assumed to not support 2217 * backing files, hence all their sectors are reported as allocated. 2218 * 2219 * If 'want_zero' is true, the caller is querying for mapping 2220 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2221 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2222 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2223 * 2224 * If 'offset' is beyond the end of the disk image the return value is 2225 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2226 * 2227 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2228 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2229 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2230 * 2231 * 'pnum' is set to the number of bytes (including and immediately 2232 * following the specified offset) that are easily known to be in the 2233 * same allocated/unallocated state. Note that a second call starting 2234 * at the original offset plus returned pnum may have the same status. 2235 * The returned value is non-zero on success except at end-of-file. 2236 * 2237 * Returns negative errno on failure. Otherwise, if the 2238 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2239 * set to the host mapping and BDS corresponding to the guest offset. 2240 */ 2241 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, 2242 bool want_zero, 2243 int64_t offset, int64_t bytes, 2244 int64_t *pnum, int64_t *map, 2245 BlockDriverState **file) 2246 { 2247 int64_t total_size; 2248 int64_t n; /* bytes */ 2249 int ret; 2250 int64_t local_map = 0; 2251 BlockDriverState *local_file = NULL; 2252 int64_t aligned_offset, aligned_bytes; 2253 uint32_t align; 2254 bool has_filtered_child; 2255 2256 assert(pnum); 2257 *pnum = 0; 2258 total_size = bdrv_getlength(bs); 2259 if (total_size < 0) { 2260 ret = total_size; 2261 goto early_out; 2262 } 2263 2264 if (offset >= total_size) { 2265 ret = BDRV_BLOCK_EOF; 2266 goto early_out; 2267 } 2268 if (!bytes) { 2269 ret = 0; 2270 goto early_out; 2271 } 2272 2273 n = total_size - offset; 2274 if (n < bytes) { 2275 bytes = n; 2276 } 2277 2278 /* Must be non-NULL or bdrv_getlength() would have failed */ 2279 assert(bs->drv); 2280 has_filtered_child = bdrv_filter_child(bs); 2281 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2282 *pnum = bytes; 2283 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2284 if (offset + bytes == total_size) { 2285 ret |= BDRV_BLOCK_EOF; 2286 } 2287 if (bs->drv->protocol_name) { 2288 ret |= BDRV_BLOCK_OFFSET_VALID; 2289 local_map = offset; 2290 local_file = bs; 2291 } 2292 goto early_out; 2293 } 2294 2295 bdrv_inc_in_flight(bs); 2296 2297 /* Round out to request_alignment boundaries */ 2298 align = bs->bl.request_alignment; 2299 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2300 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2301 2302 if (bs->drv->bdrv_co_block_status) { 2303 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2304 aligned_bytes, pnum, &local_map, 2305 &local_file); 2306 } else { 2307 /* Default code for filters */ 2308 2309 local_file = bdrv_filter_bs(bs); 2310 assert(local_file); 2311 2312 *pnum = aligned_bytes; 2313 local_map = aligned_offset; 2314 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2315 } 2316 if (ret < 0) { 2317 *pnum = 0; 2318 goto out; 2319 } 2320 2321 /* 2322 * The driver's result must be a non-zero multiple of request_alignment. 2323 * Clamp pnum and adjust map to original request. 2324 */ 2325 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2326 align > offset - aligned_offset); 2327 if (ret & BDRV_BLOCK_RECURSE) { 2328 assert(ret & BDRV_BLOCK_DATA); 2329 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2330 assert(!(ret & BDRV_BLOCK_ZERO)); 2331 } 2332 2333 *pnum -= offset - aligned_offset; 2334 if (*pnum > bytes) { 2335 *pnum = bytes; 2336 } 2337 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2338 local_map += offset - aligned_offset; 2339 } 2340 2341 if (ret & BDRV_BLOCK_RAW) { 2342 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2343 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2344 *pnum, pnum, &local_map, &local_file); 2345 goto out; 2346 } 2347 2348 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2349 ret |= BDRV_BLOCK_ALLOCATED; 2350 } else if (bs->drv->supports_backing) { 2351 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2352 2353 if (!cow_bs) { 2354 ret |= BDRV_BLOCK_ZERO; 2355 } else if (want_zero) { 2356 int64_t size2 = bdrv_getlength(cow_bs); 2357 2358 if (size2 >= 0 && offset >= size2) { 2359 ret |= BDRV_BLOCK_ZERO; 2360 } 2361 } 2362 } 2363 2364 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2365 local_file && local_file != bs && 2366 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2367 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2368 int64_t file_pnum; 2369 int ret2; 2370 2371 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2372 *pnum, &file_pnum, NULL, NULL); 2373 if (ret2 >= 0) { 2374 /* Ignore errors. This is just providing extra information, it 2375 * is useful but not necessary. 2376 */ 2377 if (ret2 & BDRV_BLOCK_EOF && 2378 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2379 /* 2380 * It is valid for the format block driver to read 2381 * beyond the end of the underlying file's current 2382 * size; such areas read as zero. 2383 */ 2384 ret |= BDRV_BLOCK_ZERO; 2385 } else { 2386 /* Limit request to the range reported by the protocol driver */ 2387 *pnum = file_pnum; 2388 ret |= (ret2 & BDRV_BLOCK_ZERO); 2389 } 2390 } 2391 } 2392 2393 out: 2394 bdrv_dec_in_flight(bs); 2395 if (ret >= 0 && offset + *pnum == total_size) { 2396 ret |= BDRV_BLOCK_EOF; 2397 } 2398 early_out: 2399 if (file) { 2400 *file = local_file; 2401 } 2402 if (map) { 2403 *map = local_map; 2404 } 2405 return ret; 2406 } 2407 2408 int coroutine_fn 2409 bdrv_co_common_block_status_above(BlockDriverState *bs, 2410 BlockDriverState *base, 2411 bool include_base, 2412 bool want_zero, 2413 int64_t offset, 2414 int64_t bytes, 2415 int64_t *pnum, 2416 int64_t *map, 2417 BlockDriverState **file, 2418 int *depth) 2419 { 2420 int ret; 2421 BlockDriverState *p; 2422 int64_t eof = 0; 2423 int dummy; 2424 2425 assert(!include_base || base); /* Can't include NULL base */ 2426 2427 if (!depth) { 2428 depth = &dummy; 2429 } 2430 *depth = 0; 2431 2432 if (!include_base && bs == base) { 2433 *pnum = bytes; 2434 return 0; 2435 } 2436 2437 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); 2438 ++*depth; 2439 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2440 return ret; 2441 } 2442 2443 if (ret & BDRV_BLOCK_EOF) { 2444 eof = offset + *pnum; 2445 } 2446 2447 assert(*pnum <= bytes); 2448 bytes = *pnum; 2449 2450 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2451 p = bdrv_filter_or_cow_bs(p)) 2452 { 2453 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2454 file); 2455 ++*depth; 2456 if (ret < 0) { 2457 return ret; 2458 } 2459 if (*pnum == 0) { 2460 /* 2461 * The top layer deferred to this layer, and because this layer is 2462 * short, any zeroes that we synthesize beyond EOF behave as if they 2463 * were allocated at this layer. 2464 * 2465 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2466 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2467 * below. 2468 */ 2469 assert(ret & BDRV_BLOCK_EOF); 2470 *pnum = bytes; 2471 if (file) { 2472 *file = p; 2473 } 2474 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2475 break; 2476 } 2477 if (ret & BDRV_BLOCK_ALLOCATED) { 2478 /* 2479 * We've found the node and the status, we must break. 2480 * 2481 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2482 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2483 * below. 2484 */ 2485 ret &= ~BDRV_BLOCK_EOF; 2486 break; 2487 } 2488 2489 if (p == base) { 2490 assert(include_base); 2491 break; 2492 } 2493 2494 /* 2495 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2496 * let's continue the diving. 2497 */ 2498 assert(*pnum <= bytes); 2499 bytes = *pnum; 2500 } 2501 2502 if (offset + *pnum == eof) { 2503 ret |= BDRV_BLOCK_EOF; 2504 } 2505 2506 return ret; 2507 } 2508 2509 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2510 int64_t offset, int64_t bytes, int64_t *pnum, 2511 int64_t *map, BlockDriverState **file) 2512 { 2513 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, 2514 pnum, map, file, NULL); 2515 } 2516 2517 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2518 int64_t *pnum, int64_t *map, BlockDriverState **file) 2519 { 2520 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2521 offset, bytes, pnum, map, file); 2522 } 2523 2524 /* 2525 * Check @bs (and its backing chain) to see if the range defined 2526 * by @offset and @bytes is known to read as zeroes. 2527 * Return 1 if that is the case, 0 otherwise and -errno on error. 2528 * This test is meant to be fast rather than accurate so returning 0 2529 * does not guarantee non-zero data. 2530 */ 2531 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2532 int64_t bytes) 2533 { 2534 int ret; 2535 int64_t pnum = bytes; 2536 2537 if (!bytes) { 2538 return 1; 2539 } 2540 2541 ret = bdrv_common_block_status_above(bs, NULL, false, false, offset, 2542 bytes, &pnum, NULL, NULL, NULL); 2543 2544 if (ret < 0) { 2545 return ret; 2546 } 2547 2548 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2549 } 2550 2551 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, 2552 int64_t bytes, int64_t *pnum) 2553 { 2554 int ret; 2555 int64_t dummy; 2556 2557 ret = bdrv_common_block_status_above(bs, bs, true, false, offset, 2558 bytes, pnum ? pnum : &dummy, NULL, 2559 NULL, NULL); 2560 if (ret < 0) { 2561 return ret; 2562 } 2563 return !!(ret & BDRV_BLOCK_ALLOCATED); 2564 } 2565 2566 /* 2567 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2568 * 2569 * Return a positive depth if (a prefix of) the given range is allocated 2570 * in any image between BASE and TOP (BASE is only included if include_base 2571 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2572 * BASE can be NULL to check if the given offset is allocated in any 2573 * image of the chain. Return 0 otherwise, or negative errno on 2574 * failure. 2575 * 2576 * 'pnum' is set to the number of bytes (including and immediately 2577 * following the specified offset) that are known to be in the same 2578 * allocated/unallocated state. Note that a subsequent call starting 2579 * at 'offset + *pnum' may return the same allocation status (in other 2580 * words, the result is not necessarily the maximum possible range); 2581 * but 'pnum' will only be 0 when end of file is reached. 2582 */ 2583 int bdrv_is_allocated_above(BlockDriverState *top, 2584 BlockDriverState *base, 2585 bool include_base, int64_t offset, 2586 int64_t bytes, int64_t *pnum) 2587 { 2588 int depth; 2589 int ret = bdrv_common_block_status_above(top, base, include_base, false, 2590 offset, bytes, pnum, NULL, NULL, 2591 &depth); 2592 if (ret < 0) { 2593 return ret; 2594 } 2595 2596 if (ret & BDRV_BLOCK_ALLOCATED) { 2597 return depth; 2598 } 2599 return 0; 2600 } 2601 2602 int coroutine_fn 2603 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2604 { 2605 BlockDriver *drv = bs->drv; 2606 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2607 int ret = -ENOTSUP; 2608 2609 if (!drv) { 2610 return -ENOMEDIUM; 2611 } 2612 2613 bdrv_inc_in_flight(bs); 2614 2615 if (drv->bdrv_load_vmstate) { 2616 ret = drv->bdrv_load_vmstate(bs, qiov, pos); 2617 } else if (child_bs) { 2618 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2619 } 2620 2621 bdrv_dec_in_flight(bs); 2622 2623 return ret; 2624 } 2625 2626 int coroutine_fn 2627 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2628 { 2629 BlockDriver *drv = bs->drv; 2630 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2631 int ret = -ENOTSUP; 2632 2633 if (!drv) { 2634 return -ENOMEDIUM; 2635 } 2636 2637 bdrv_inc_in_flight(bs); 2638 2639 if (drv->bdrv_save_vmstate) { 2640 ret = drv->bdrv_save_vmstate(bs, qiov, pos); 2641 } else if (child_bs) { 2642 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2643 } 2644 2645 bdrv_dec_in_flight(bs); 2646 2647 return ret; 2648 } 2649 2650 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2651 int64_t pos, int size) 2652 { 2653 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2654 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2655 2656 return ret < 0 ? ret : size; 2657 } 2658 2659 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2660 int64_t pos, int size) 2661 { 2662 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2663 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2664 2665 return ret < 0 ? ret : size; 2666 } 2667 2668 /**************************************************************/ 2669 /* async I/Os */ 2670 2671 void bdrv_aio_cancel(BlockAIOCB *acb) 2672 { 2673 qemu_aio_ref(acb); 2674 bdrv_aio_cancel_async(acb); 2675 while (acb->refcnt > 1) { 2676 if (acb->aiocb_info->get_aio_context) { 2677 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2678 } else if (acb->bs) { 2679 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2680 * assert that we're not using an I/O thread. Thread-safe 2681 * code should use bdrv_aio_cancel_async exclusively. 2682 */ 2683 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2684 aio_poll(bdrv_get_aio_context(acb->bs), true); 2685 } else { 2686 abort(); 2687 } 2688 } 2689 qemu_aio_unref(acb); 2690 } 2691 2692 /* Async version of aio cancel. The caller is not blocked if the acb implements 2693 * cancel_async, otherwise we do nothing and let the request normally complete. 2694 * In either case the completion callback must be called. */ 2695 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2696 { 2697 if (acb->aiocb_info->cancel_async) { 2698 acb->aiocb_info->cancel_async(acb); 2699 } 2700 } 2701 2702 /**************************************************************/ 2703 /* Coroutine block device emulation */ 2704 2705 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2706 { 2707 BdrvChild *primary_child = bdrv_primary_child(bs); 2708 BdrvChild *child; 2709 int current_gen; 2710 int ret = 0; 2711 2712 bdrv_inc_in_flight(bs); 2713 2714 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 2715 bdrv_is_sg(bs)) { 2716 goto early_exit; 2717 } 2718 2719 qemu_co_mutex_lock(&bs->reqs_lock); 2720 current_gen = qatomic_read(&bs->write_gen); 2721 2722 /* Wait until any previous flushes are completed */ 2723 while (bs->active_flush_req) { 2724 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2725 } 2726 2727 /* Flushes reach this point in nondecreasing current_gen order. */ 2728 bs->active_flush_req = true; 2729 qemu_co_mutex_unlock(&bs->reqs_lock); 2730 2731 /* Write back all layers by calling one driver function */ 2732 if (bs->drv->bdrv_co_flush) { 2733 ret = bs->drv->bdrv_co_flush(bs); 2734 goto out; 2735 } 2736 2737 /* Write back cached data to the OS even with cache=unsafe */ 2738 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2739 if (bs->drv->bdrv_co_flush_to_os) { 2740 ret = bs->drv->bdrv_co_flush_to_os(bs); 2741 if (ret < 0) { 2742 goto out; 2743 } 2744 } 2745 2746 /* But don't actually force it to the disk with cache=unsafe */ 2747 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2748 goto flush_children; 2749 } 2750 2751 /* Check if we really need to flush anything */ 2752 if (bs->flushed_gen == current_gen) { 2753 goto flush_children; 2754 } 2755 2756 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 2757 if (!bs->drv) { 2758 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2759 * (even in case of apparent success) */ 2760 ret = -ENOMEDIUM; 2761 goto out; 2762 } 2763 if (bs->drv->bdrv_co_flush_to_disk) { 2764 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2765 } else if (bs->drv->bdrv_aio_flush) { 2766 BlockAIOCB *acb; 2767 CoroutineIOCompletion co = { 2768 .coroutine = qemu_coroutine_self(), 2769 }; 2770 2771 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2772 if (acb == NULL) { 2773 ret = -EIO; 2774 } else { 2775 qemu_coroutine_yield(); 2776 ret = co.ret; 2777 } 2778 } else { 2779 /* 2780 * Some block drivers always operate in either writethrough or unsafe 2781 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2782 * know how the server works (because the behaviour is hardcoded or 2783 * depends on server-side configuration), so we can't ensure that 2784 * everything is safe on disk. Returning an error doesn't work because 2785 * that would break guests even if the server operates in writethrough 2786 * mode. 2787 * 2788 * Let's hope the user knows what he's doing. 2789 */ 2790 ret = 0; 2791 } 2792 2793 if (ret < 0) { 2794 goto out; 2795 } 2796 2797 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2798 * in the case of cache=unsafe, so there are no useless flushes. 2799 */ 2800 flush_children: 2801 ret = 0; 2802 QLIST_FOREACH(child, &bs->children, next) { 2803 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 2804 int this_child_ret = bdrv_co_flush(child->bs); 2805 if (!ret) { 2806 ret = this_child_ret; 2807 } 2808 } 2809 } 2810 2811 out: 2812 /* Notify any pending flushes that we have completed */ 2813 if (ret == 0) { 2814 bs->flushed_gen = current_gen; 2815 } 2816 2817 qemu_co_mutex_lock(&bs->reqs_lock); 2818 bs->active_flush_req = false; 2819 /* Return value is ignored - it's ok if wait queue is empty */ 2820 qemu_co_queue_next(&bs->flush_queue); 2821 qemu_co_mutex_unlock(&bs->reqs_lock); 2822 2823 early_exit: 2824 bdrv_dec_in_flight(bs); 2825 return ret; 2826 } 2827 2828 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2829 int64_t bytes) 2830 { 2831 BdrvTrackedRequest req; 2832 int max_pdiscard, ret; 2833 int head, tail, align; 2834 BlockDriverState *bs = child->bs; 2835 2836 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { 2837 return -ENOMEDIUM; 2838 } 2839 2840 if (bdrv_has_readonly_bitmaps(bs)) { 2841 return -EPERM; 2842 } 2843 2844 ret = bdrv_check_request(offset, bytes); 2845 if (ret < 0) { 2846 return ret; 2847 } 2848 2849 /* Do nothing if disabled. */ 2850 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2851 return 0; 2852 } 2853 2854 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2855 return 0; 2856 } 2857 2858 /* Discard is advisory, but some devices track and coalesce 2859 * unaligned requests, so we must pass everything down rather than 2860 * round here. Still, most devices will just silently ignore 2861 * unaligned requests (by returning -ENOTSUP), so we must fragment 2862 * the request accordingly. */ 2863 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 2864 assert(align % bs->bl.request_alignment == 0); 2865 head = offset % align; 2866 tail = (offset + bytes) % align; 2867 2868 bdrv_inc_in_flight(bs); 2869 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 2870 2871 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 2872 if (ret < 0) { 2873 goto out; 2874 } 2875 2876 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), 2877 align); 2878 assert(max_pdiscard >= bs->bl.request_alignment); 2879 2880 while (bytes > 0) { 2881 int64_t num = bytes; 2882 2883 if (head) { 2884 /* Make small requests to get to alignment boundaries. */ 2885 num = MIN(bytes, align - head); 2886 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 2887 num %= bs->bl.request_alignment; 2888 } 2889 head = (head + num) % align; 2890 assert(num < max_pdiscard); 2891 } else if (tail) { 2892 if (num > align) { 2893 /* Shorten the request to the last aligned cluster. */ 2894 num -= tail; 2895 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 2896 tail > bs->bl.request_alignment) { 2897 tail %= bs->bl.request_alignment; 2898 num -= tail; 2899 } 2900 } 2901 /* limit request size */ 2902 if (num > max_pdiscard) { 2903 num = max_pdiscard; 2904 } 2905 2906 if (!bs->drv) { 2907 ret = -ENOMEDIUM; 2908 goto out; 2909 } 2910 if (bs->drv->bdrv_co_pdiscard) { 2911 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 2912 } else { 2913 BlockAIOCB *acb; 2914 CoroutineIOCompletion co = { 2915 .coroutine = qemu_coroutine_self(), 2916 }; 2917 2918 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 2919 bdrv_co_io_em_complete, &co); 2920 if (acb == NULL) { 2921 ret = -EIO; 2922 goto out; 2923 } else { 2924 qemu_coroutine_yield(); 2925 ret = co.ret; 2926 } 2927 } 2928 if (ret && ret != -ENOTSUP) { 2929 goto out; 2930 } 2931 2932 offset += num; 2933 bytes -= num; 2934 } 2935 ret = 0; 2936 out: 2937 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 2938 tracked_request_end(&req); 2939 bdrv_dec_in_flight(bs); 2940 return ret; 2941 } 2942 2943 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 2944 { 2945 BlockDriver *drv = bs->drv; 2946 CoroutineIOCompletion co = { 2947 .coroutine = qemu_coroutine_self(), 2948 }; 2949 BlockAIOCB *acb; 2950 2951 bdrv_inc_in_flight(bs); 2952 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 2953 co.ret = -ENOTSUP; 2954 goto out; 2955 } 2956 2957 if (drv->bdrv_co_ioctl) { 2958 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 2959 } else { 2960 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 2961 if (!acb) { 2962 co.ret = -ENOTSUP; 2963 goto out; 2964 } 2965 qemu_coroutine_yield(); 2966 } 2967 out: 2968 bdrv_dec_in_flight(bs); 2969 return co.ret; 2970 } 2971 2972 void *qemu_blockalign(BlockDriverState *bs, size_t size) 2973 { 2974 return qemu_memalign(bdrv_opt_mem_align(bs), size); 2975 } 2976 2977 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 2978 { 2979 return memset(qemu_blockalign(bs, size), 0, size); 2980 } 2981 2982 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 2983 { 2984 size_t align = bdrv_opt_mem_align(bs); 2985 2986 /* Ensure that NULL is never returned on success */ 2987 assert(align > 0); 2988 if (size == 0) { 2989 size = align; 2990 } 2991 2992 return qemu_try_memalign(align, size); 2993 } 2994 2995 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 2996 { 2997 void *mem = qemu_try_blockalign(bs, size); 2998 2999 if (mem) { 3000 memset(mem, 0, size); 3001 } 3002 3003 return mem; 3004 } 3005 3006 /* 3007 * Check if all memory in this vector is sector aligned. 3008 */ 3009 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 3010 { 3011 int i; 3012 size_t alignment = bdrv_min_mem_align(bs); 3013 3014 for (i = 0; i < qiov->niov; i++) { 3015 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 3016 return false; 3017 } 3018 if (qiov->iov[i].iov_len % alignment) { 3019 return false; 3020 } 3021 } 3022 3023 return true; 3024 } 3025 3026 void bdrv_add_before_write_notifier(BlockDriverState *bs, 3027 NotifierWithReturn *notifier) 3028 { 3029 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 3030 } 3031 3032 void bdrv_io_plug(BlockDriverState *bs) 3033 { 3034 BdrvChild *child; 3035 3036 QLIST_FOREACH(child, &bs->children, next) { 3037 bdrv_io_plug(child->bs); 3038 } 3039 3040 if (qatomic_fetch_inc(&bs->io_plugged) == 0) { 3041 BlockDriver *drv = bs->drv; 3042 if (drv && drv->bdrv_io_plug) { 3043 drv->bdrv_io_plug(bs); 3044 } 3045 } 3046 } 3047 3048 void bdrv_io_unplug(BlockDriverState *bs) 3049 { 3050 BdrvChild *child; 3051 3052 assert(bs->io_plugged); 3053 if (qatomic_fetch_dec(&bs->io_plugged) == 1) { 3054 BlockDriver *drv = bs->drv; 3055 if (drv && drv->bdrv_io_unplug) { 3056 drv->bdrv_io_unplug(bs); 3057 } 3058 } 3059 3060 QLIST_FOREACH(child, &bs->children, next) { 3061 bdrv_io_unplug(child->bs); 3062 } 3063 } 3064 3065 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) 3066 { 3067 BdrvChild *child; 3068 3069 if (bs->drv && bs->drv->bdrv_register_buf) { 3070 bs->drv->bdrv_register_buf(bs, host, size); 3071 } 3072 QLIST_FOREACH(child, &bs->children, next) { 3073 bdrv_register_buf(child->bs, host, size); 3074 } 3075 } 3076 3077 void bdrv_unregister_buf(BlockDriverState *bs, void *host) 3078 { 3079 BdrvChild *child; 3080 3081 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3082 bs->drv->bdrv_unregister_buf(bs, host); 3083 } 3084 QLIST_FOREACH(child, &bs->children, next) { 3085 bdrv_unregister_buf(child->bs, host); 3086 } 3087 } 3088 3089 static int coroutine_fn bdrv_co_copy_range_internal( 3090 BdrvChild *src, uint64_t src_offset, BdrvChild *dst, 3091 uint64_t dst_offset, uint64_t bytes, 3092 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3093 bool recurse_src) 3094 { 3095 BdrvTrackedRequest req; 3096 int ret; 3097 3098 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3099 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3100 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3101 3102 if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) { 3103 return -ENOMEDIUM; 3104 } 3105 ret = bdrv_check_request32(dst_offset, bytes); 3106 if (ret) { 3107 return ret; 3108 } 3109 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3110 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3111 } 3112 3113 if (!src || !src->bs || !bdrv_is_inserted(src->bs)) { 3114 return -ENOMEDIUM; 3115 } 3116 ret = bdrv_check_request32(src_offset, bytes); 3117 if (ret) { 3118 return ret; 3119 } 3120 3121 if (!src->bs->drv->bdrv_co_copy_range_from 3122 || !dst->bs->drv->bdrv_co_copy_range_to 3123 || src->bs->encrypted || dst->bs->encrypted) { 3124 return -ENOTSUP; 3125 } 3126 3127 if (recurse_src) { 3128 bdrv_inc_in_flight(src->bs); 3129 tracked_request_begin(&req, src->bs, src_offset, bytes, 3130 BDRV_TRACKED_READ); 3131 3132 /* BDRV_REQ_SERIALISING is only for write operation */ 3133 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3134 bdrv_wait_serialising_requests(&req); 3135 3136 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3137 src, src_offset, 3138 dst, dst_offset, 3139 bytes, 3140 read_flags, write_flags); 3141 3142 tracked_request_end(&req); 3143 bdrv_dec_in_flight(src->bs); 3144 } else { 3145 bdrv_inc_in_flight(dst->bs); 3146 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3147 BDRV_TRACKED_WRITE); 3148 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3149 write_flags); 3150 if (!ret) { 3151 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3152 src, src_offset, 3153 dst, dst_offset, 3154 bytes, 3155 read_flags, write_flags); 3156 } 3157 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3158 tracked_request_end(&req); 3159 bdrv_dec_in_flight(dst->bs); 3160 } 3161 3162 return ret; 3163 } 3164 3165 /* Copy range from @src to @dst. 3166 * 3167 * See the comment of bdrv_co_copy_range for the parameter and return value 3168 * semantics. */ 3169 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset, 3170 BdrvChild *dst, uint64_t dst_offset, 3171 uint64_t bytes, 3172 BdrvRequestFlags read_flags, 3173 BdrvRequestFlags write_flags) 3174 { 3175 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3176 read_flags, write_flags); 3177 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3178 bytes, read_flags, write_flags, true); 3179 } 3180 3181 /* Copy range from @src to @dst. 3182 * 3183 * See the comment of bdrv_co_copy_range for the parameter and return value 3184 * semantics. */ 3185 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset, 3186 BdrvChild *dst, uint64_t dst_offset, 3187 uint64_t bytes, 3188 BdrvRequestFlags read_flags, 3189 BdrvRequestFlags write_flags) 3190 { 3191 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3192 read_flags, write_flags); 3193 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3194 bytes, read_flags, write_flags, false); 3195 } 3196 3197 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, 3198 BdrvChild *dst, uint64_t dst_offset, 3199 uint64_t bytes, BdrvRequestFlags read_flags, 3200 BdrvRequestFlags write_flags) 3201 { 3202 return bdrv_co_copy_range_from(src, src_offset, 3203 dst, dst_offset, 3204 bytes, read_flags, write_flags); 3205 } 3206 3207 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3208 { 3209 BdrvChild *c; 3210 QLIST_FOREACH(c, &bs->parents, next_parent) { 3211 if (c->klass->resize) { 3212 c->klass->resize(c); 3213 } 3214 } 3215 } 3216 3217 /** 3218 * Truncate file to 'offset' bytes (needed only for file protocols) 3219 * 3220 * If 'exact' is true, the file must be resized to exactly the given 3221 * 'offset'. Otherwise, it is sufficient for the node to be at least 3222 * 'offset' bytes in length. 3223 */ 3224 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3225 PreallocMode prealloc, BdrvRequestFlags flags, 3226 Error **errp) 3227 { 3228 BlockDriverState *bs = child->bs; 3229 BdrvChild *filtered, *backing; 3230 BlockDriver *drv = bs->drv; 3231 BdrvTrackedRequest req; 3232 int64_t old_size, new_bytes; 3233 int ret; 3234 3235 3236 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3237 if (!drv) { 3238 error_setg(errp, "No medium inserted"); 3239 return -ENOMEDIUM; 3240 } 3241 if (offset < 0) { 3242 error_setg(errp, "Image size cannot be negative"); 3243 return -EINVAL; 3244 } 3245 3246 ret = bdrv_check_request(offset, 0); 3247 if (ret < 0) { 3248 error_setg(errp, "Required too big image size, it must be not greater " 3249 "than %" PRId64, BDRV_MAX_LENGTH); 3250 return ret; 3251 } 3252 3253 old_size = bdrv_getlength(bs); 3254 if (old_size < 0) { 3255 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3256 return old_size; 3257 } 3258 3259 if (offset > old_size) { 3260 new_bytes = offset - old_size; 3261 } else { 3262 new_bytes = 0; 3263 } 3264 3265 bdrv_inc_in_flight(bs); 3266 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3267 BDRV_TRACKED_TRUNCATE); 3268 3269 /* If we are growing the image and potentially using preallocation for the 3270 * new area, we need to make sure that no write requests are made to it 3271 * concurrently or they might be overwritten by preallocation. */ 3272 if (new_bytes) { 3273 bdrv_make_request_serialising(&req, 1); 3274 } 3275 if (bs->read_only) { 3276 error_setg(errp, "Image is read-only"); 3277 ret = -EACCES; 3278 goto out; 3279 } 3280 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3281 0); 3282 if (ret < 0) { 3283 error_setg_errno(errp, -ret, 3284 "Failed to prepare request for truncation"); 3285 goto out; 3286 } 3287 3288 filtered = bdrv_filter_child(bs); 3289 backing = bdrv_cow_child(bs); 3290 3291 /* 3292 * If the image has a backing file that is large enough that it would 3293 * provide data for the new area, we cannot leave it unallocated because 3294 * then the backing file content would become visible. Instead, zero-fill 3295 * the new area. 3296 * 3297 * Note that if the image has a backing file, but was opened without the 3298 * backing file, taking care of keeping things consistent with that backing 3299 * file is the user's responsibility. 3300 */ 3301 if (new_bytes && backing) { 3302 int64_t backing_len; 3303 3304 backing_len = bdrv_getlength(backing->bs); 3305 if (backing_len < 0) { 3306 ret = backing_len; 3307 error_setg_errno(errp, -ret, "Could not get backing file size"); 3308 goto out; 3309 } 3310 3311 if (backing_len > old_size) { 3312 flags |= BDRV_REQ_ZERO_WRITE; 3313 } 3314 } 3315 3316 if (drv->bdrv_co_truncate) { 3317 if (flags & ~bs->supported_truncate_flags) { 3318 error_setg(errp, "Block driver does not support requested flags"); 3319 ret = -ENOTSUP; 3320 goto out; 3321 } 3322 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3323 } else if (filtered) { 3324 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3325 } else { 3326 error_setg(errp, "Image format driver does not support resize"); 3327 ret = -ENOTSUP; 3328 goto out; 3329 } 3330 if (ret < 0) { 3331 goto out; 3332 } 3333 3334 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3335 if (ret < 0) { 3336 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3337 } else { 3338 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3339 } 3340 /* It's possible that truncation succeeded but refresh_total_sectors 3341 * failed, but the latter doesn't affect how we should finish the request. 3342 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */ 3343 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3344 3345 out: 3346 tracked_request_end(&req); 3347 bdrv_dec_in_flight(bs); 3348 3349 return ret; 3350 } 3351