1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "qemu/cutils.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 #include "qemu/main-loop.h" 36 #include "sysemu/replay.h" 37 38 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 39 40 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 41 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 42 43 static void bdrv_parent_cb_resize(BlockDriverState *bs); 44 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 45 int64_t offset, int bytes, BdrvRequestFlags flags); 46 47 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 48 bool ignore_bds_parents) 49 { 50 BdrvChild *c, *next; 51 52 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 53 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 54 continue; 55 } 56 bdrv_parent_drained_begin_single(c, false); 57 } 58 } 59 60 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, 61 int *drained_end_counter) 62 { 63 assert(c->parent_quiesce_counter > 0); 64 c->parent_quiesce_counter--; 65 if (c->role->drained_end) { 66 c->role->drained_end(c, drained_end_counter); 67 } 68 } 69 70 void bdrv_parent_drained_end_single(BdrvChild *c) 71 { 72 int drained_end_counter = 0; 73 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); 74 BDRV_POLL_WHILE(c->bs, atomic_read(&drained_end_counter) > 0); 75 } 76 77 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 78 bool ignore_bds_parents, 79 int *drained_end_counter) 80 { 81 BdrvChild *c; 82 83 QLIST_FOREACH(c, &bs->parents, next_parent) { 84 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 85 continue; 86 } 87 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter); 88 } 89 } 90 91 static bool bdrv_parent_drained_poll_single(BdrvChild *c) 92 { 93 if (c->role->drained_poll) { 94 return c->role->drained_poll(c); 95 } 96 return false; 97 } 98 99 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 100 bool ignore_bds_parents) 101 { 102 BdrvChild *c, *next; 103 bool busy = false; 104 105 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 106 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 107 continue; 108 } 109 busy |= bdrv_parent_drained_poll_single(c); 110 } 111 112 return busy; 113 } 114 115 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) 116 { 117 c->parent_quiesce_counter++; 118 if (c->role->drained_begin) { 119 c->role->drained_begin(c); 120 } 121 if (poll) { 122 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); 123 } 124 } 125 126 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 127 { 128 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 129 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 130 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 131 src->opt_mem_alignment); 132 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 133 src->min_mem_alignment); 134 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 135 } 136 137 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 138 { 139 BlockDriver *drv = bs->drv; 140 Error *local_err = NULL; 141 142 memset(&bs->bl, 0, sizeof(bs->bl)); 143 144 if (!drv) { 145 return; 146 } 147 148 /* Default alignment based on whether driver has byte interface */ 149 bs->bl.request_alignment = (drv->bdrv_co_preadv || 150 drv->bdrv_aio_preadv || 151 drv->bdrv_co_preadv_part) ? 1 : 512; 152 153 /* Take some limits from the children as a default */ 154 if (bs->file) { 155 bdrv_refresh_limits(bs->file->bs, &local_err); 156 if (local_err) { 157 error_propagate(errp, local_err); 158 return; 159 } 160 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl); 161 } else { 162 bs->bl.min_mem_alignment = 512; 163 bs->bl.opt_mem_alignment = getpagesize(); 164 165 /* Safe default since most protocols use readv()/writev()/etc */ 166 bs->bl.max_iov = IOV_MAX; 167 } 168 169 if (bs->backing) { 170 bdrv_refresh_limits(bs->backing->bs, &local_err); 171 if (local_err) { 172 error_propagate(errp, local_err); 173 return; 174 } 175 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl); 176 } 177 178 /* Then let the driver override it */ 179 if (drv->bdrv_refresh_limits) { 180 drv->bdrv_refresh_limits(bs, errp); 181 } 182 } 183 184 /** 185 * The copy-on-read flag is actually a reference count so multiple users may 186 * use the feature without worrying about clobbering its previous state. 187 * Copy-on-read stays enabled until all users have called to disable it. 188 */ 189 void bdrv_enable_copy_on_read(BlockDriverState *bs) 190 { 191 atomic_inc(&bs->copy_on_read); 192 } 193 194 void bdrv_disable_copy_on_read(BlockDriverState *bs) 195 { 196 int old = atomic_fetch_dec(&bs->copy_on_read); 197 assert(old >= 1); 198 } 199 200 typedef struct { 201 Coroutine *co; 202 BlockDriverState *bs; 203 bool done; 204 bool begin; 205 bool recursive; 206 bool poll; 207 BdrvChild *parent; 208 bool ignore_bds_parents; 209 int *drained_end_counter; 210 } BdrvCoDrainData; 211 212 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) 213 { 214 BdrvCoDrainData *data = opaque; 215 BlockDriverState *bs = data->bs; 216 217 if (data->begin) { 218 bs->drv->bdrv_co_drain_begin(bs); 219 } else { 220 bs->drv->bdrv_co_drain_end(bs); 221 } 222 223 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ 224 atomic_mb_set(&data->done, true); 225 if (!data->begin) { 226 atomic_dec(data->drained_end_counter); 227 } 228 bdrv_dec_in_flight(bs); 229 230 g_free(data); 231 } 232 233 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ 234 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, 235 int *drained_end_counter) 236 { 237 BdrvCoDrainData *data; 238 239 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || 240 (!begin && !bs->drv->bdrv_co_drain_end)) { 241 return; 242 } 243 244 data = g_new(BdrvCoDrainData, 1); 245 *data = (BdrvCoDrainData) { 246 .bs = bs, 247 .done = false, 248 .begin = begin, 249 .drained_end_counter = drained_end_counter, 250 }; 251 252 if (!begin) { 253 atomic_inc(drained_end_counter); 254 } 255 256 /* Make sure the driver callback completes during the polling phase for 257 * drain_begin. */ 258 bdrv_inc_in_flight(bs); 259 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); 260 aio_co_schedule(bdrv_get_aio_context(bs), data->co); 261 } 262 263 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 264 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 265 BdrvChild *ignore_parent, bool ignore_bds_parents) 266 { 267 BdrvChild *child, *next; 268 269 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 270 return true; 271 } 272 273 if (atomic_read(&bs->in_flight)) { 274 return true; 275 } 276 277 if (recursive) { 278 assert(!ignore_bds_parents); 279 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 280 if (bdrv_drain_poll(child->bs, recursive, child, false)) { 281 return true; 282 } 283 } 284 } 285 286 return false; 287 } 288 289 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, 290 BdrvChild *ignore_parent) 291 { 292 return bdrv_drain_poll(bs, recursive, ignore_parent, false); 293 } 294 295 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 296 BdrvChild *parent, bool ignore_bds_parents, 297 bool poll); 298 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 299 BdrvChild *parent, bool ignore_bds_parents, 300 int *drained_end_counter); 301 302 static void bdrv_co_drain_bh_cb(void *opaque) 303 { 304 BdrvCoDrainData *data = opaque; 305 Coroutine *co = data->co; 306 BlockDriverState *bs = data->bs; 307 308 if (bs) { 309 AioContext *ctx = bdrv_get_aio_context(bs); 310 AioContext *co_ctx = qemu_coroutine_get_aio_context(co); 311 312 /* 313 * When the coroutine yielded, the lock for its home context was 314 * released, so we need to re-acquire it here. If it explicitly 315 * acquired a different context, the lock is still held and we don't 316 * want to lock it a second time (or AIO_WAIT_WHILE() would hang). 317 */ 318 if (ctx == co_ctx) { 319 aio_context_acquire(ctx); 320 } 321 bdrv_dec_in_flight(bs); 322 if (data->begin) { 323 assert(!data->drained_end_counter); 324 bdrv_do_drained_begin(bs, data->recursive, data->parent, 325 data->ignore_bds_parents, data->poll); 326 } else { 327 assert(!data->poll); 328 bdrv_do_drained_end(bs, data->recursive, data->parent, 329 data->ignore_bds_parents, 330 data->drained_end_counter); 331 } 332 if (ctx == co_ctx) { 333 aio_context_release(ctx); 334 } 335 } else { 336 assert(data->begin); 337 bdrv_drain_all_begin(); 338 } 339 340 data->done = true; 341 aio_co_wake(co); 342 } 343 344 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 345 bool begin, bool recursive, 346 BdrvChild *parent, 347 bool ignore_bds_parents, 348 bool poll, 349 int *drained_end_counter) 350 { 351 BdrvCoDrainData data; 352 353 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 354 * other coroutines run if they were queued by aio_co_enter(). */ 355 356 assert(qemu_in_coroutine()); 357 data = (BdrvCoDrainData) { 358 .co = qemu_coroutine_self(), 359 .bs = bs, 360 .done = false, 361 .begin = begin, 362 .recursive = recursive, 363 .parent = parent, 364 .ignore_bds_parents = ignore_bds_parents, 365 .poll = poll, 366 .drained_end_counter = drained_end_counter, 367 }; 368 369 if (bs) { 370 bdrv_inc_in_flight(bs); 371 } 372 replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs), 373 bdrv_co_drain_bh_cb, &data); 374 375 qemu_coroutine_yield(); 376 /* If we are resumed from some other event (such as an aio completion or a 377 * timer callback), it is a bug in the caller that should be fixed. */ 378 assert(data.done); 379 } 380 381 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 382 BdrvChild *parent, bool ignore_bds_parents) 383 { 384 assert(!qemu_in_coroutine()); 385 386 /* Stop things in parent-to-child order */ 387 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) { 388 aio_disable_external(bdrv_get_aio_context(bs)); 389 } 390 391 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); 392 bdrv_drain_invoke(bs, true, NULL); 393 } 394 395 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 396 BdrvChild *parent, bool ignore_bds_parents, 397 bool poll) 398 { 399 BdrvChild *child, *next; 400 401 if (qemu_in_coroutine()) { 402 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, 403 poll, NULL); 404 return; 405 } 406 407 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); 408 409 if (recursive) { 410 assert(!ignore_bds_parents); 411 bs->recursive_quiesce_counter++; 412 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 413 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, 414 false); 415 } 416 } 417 418 /* 419 * Wait for drained requests to finish. 420 * 421 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 422 * call is needed so things in this AioContext can make progress even 423 * though we don't return to the main AioContext loop - this automatically 424 * includes other nodes in the same AioContext and therefore all child 425 * nodes. 426 */ 427 if (poll) { 428 assert(!ignore_bds_parents); 429 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); 430 } 431 } 432 433 void bdrv_drained_begin(BlockDriverState *bs) 434 { 435 bdrv_do_drained_begin(bs, false, NULL, false, true); 436 } 437 438 void bdrv_subtree_drained_begin(BlockDriverState *bs) 439 { 440 bdrv_do_drained_begin(bs, true, NULL, false, true); 441 } 442 443 /** 444 * This function does not poll, nor must any of its recursively called 445 * functions. The *drained_end_counter pointee will be incremented 446 * once for every background operation scheduled, and decremented once 447 * the operation settles. Therefore, the pointer must remain valid 448 * until the pointee reaches 0. That implies that whoever sets up the 449 * pointee has to poll until it is 0. 450 * 451 * We use atomic operations to access *drained_end_counter, because 452 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of 453 * @bs may contain nodes in different AioContexts, 454 * (2) bdrv_drain_all_end() uses the same counter for all nodes, 455 * regardless of which AioContext they are in. 456 */ 457 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 458 BdrvChild *parent, bool ignore_bds_parents, 459 int *drained_end_counter) 460 { 461 BdrvChild *child; 462 int old_quiesce_counter; 463 464 assert(drained_end_counter != NULL); 465 466 if (qemu_in_coroutine()) { 467 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, 468 false, drained_end_counter); 469 return; 470 } 471 assert(bs->quiesce_counter > 0); 472 473 /* Re-enable things in child-to-parent order */ 474 bdrv_drain_invoke(bs, false, drained_end_counter); 475 bdrv_parent_drained_end(bs, parent, ignore_bds_parents, 476 drained_end_counter); 477 478 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter); 479 if (old_quiesce_counter == 1) { 480 aio_enable_external(bdrv_get_aio_context(bs)); 481 } 482 483 if (recursive) { 484 assert(!ignore_bds_parents); 485 bs->recursive_quiesce_counter--; 486 QLIST_FOREACH(child, &bs->children, next) { 487 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents, 488 drained_end_counter); 489 } 490 } 491 } 492 493 void bdrv_drained_end(BlockDriverState *bs) 494 { 495 int drained_end_counter = 0; 496 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); 497 BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0); 498 } 499 500 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) 501 { 502 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); 503 } 504 505 void bdrv_subtree_drained_end(BlockDriverState *bs) 506 { 507 int drained_end_counter = 0; 508 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); 509 BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0); 510 } 511 512 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) 513 { 514 int i; 515 516 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { 517 bdrv_do_drained_begin(child->bs, true, child, false, true); 518 } 519 } 520 521 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) 522 { 523 int drained_end_counter = 0; 524 int i; 525 526 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { 527 bdrv_do_drained_end(child->bs, true, child, false, 528 &drained_end_counter); 529 } 530 531 BDRV_POLL_WHILE(child->bs, atomic_read(&drained_end_counter) > 0); 532 } 533 534 /* 535 * Wait for pending requests to complete on a single BlockDriverState subtree, 536 * and suspend block driver's internal I/O until next request arrives. 537 * 538 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 539 * AioContext. 540 */ 541 void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 542 { 543 assert(qemu_in_coroutine()); 544 bdrv_drained_begin(bs); 545 bdrv_drained_end(bs); 546 } 547 548 void bdrv_drain(BlockDriverState *bs) 549 { 550 bdrv_drained_begin(bs); 551 bdrv_drained_end(bs); 552 } 553 554 static void bdrv_drain_assert_idle(BlockDriverState *bs) 555 { 556 BdrvChild *child, *next; 557 558 assert(atomic_read(&bs->in_flight) == 0); 559 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 560 bdrv_drain_assert_idle(child->bs); 561 } 562 } 563 564 unsigned int bdrv_drain_all_count = 0; 565 566 static bool bdrv_drain_all_poll(void) 567 { 568 BlockDriverState *bs = NULL; 569 bool result = false; 570 571 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 572 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 573 while ((bs = bdrv_next_all_states(bs))) { 574 AioContext *aio_context = bdrv_get_aio_context(bs); 575 aio_context_acquire(aio_context); 576 result |= bdrv_drain_poll(bs, false, NULL, true); 577 aio_context_release(aio_context); 578 } 579 580 return result; 581 } 582 583 /* 584 * Wait for pending requests to complete across all BlockDriverStates 585 * 586 * This function does not flush data to disk, use bdrv_flush_all() for that 587 * after calling this function. 588 * 589 * This pauses all block jobs and disables external clients. It must 590 * be paired with bdrv_drain_all_end(). 591 * 592 * NOTE: no new block jobs or BlockDriverStates can be created between 593 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 594 */ 595 void bdrv_drain_all_begin(void) 596 { 597 BlockDriverState *bs = NULL; 598 599 if (qemu_in_coroutine()) { 600 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); 601 return; 602 } 603 604 /* 605 * bdrv queue is managed by record/replay, 606 * waiting for finishing the I/O requests may 607 * be infinite 608 */ 609 if (replay_events_enabled()) { 610 return; 611 } 612 613 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 614 * loop AioContext, so make sure we're in the main context. */ 615 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 616 assert(bdrv_drain_all_count < INT_MAX); 617 bdrv_drain_all_count++; 618 619 /* Quiesce all nodes, without polling in-flight requests yet. The graph 620 * cannot change during this loop. */ 621 while ((bs = bdrv_next_all_states(bs))) { 622 AioContext *aio_context = bdrv_get_aio_context(bs); 623 624 aio_context_acquire(aio_context); 625 bdrv_do_drained_begin(bs, false, NULL, true, false); 626 aio_context_release(aio_context); 627 } 628 629 /* Now poll the in-flight requests */ 630 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 631 632 while ((bs = bdrv_next_all_states(bs))) { 633 bdrv_drain_assert_idle(bs); 634 } 635 } 636 637 void bdrv_drain_all_end(void) 638 { 639 BlockDriverState *bs = NULL; 640 int drained_end_counter = 0; 641 642 /* 643 * bdrv queue is managed by record/replay, 644 * waiting for finishing the I/O requests may 645 * be endless 646 */ 647 if (replay_events_enabled()) { 648 return; 649 } 650 651 while ((bs = bdrv_next_all_states(bs))) { 652 AioContext *aio_context = bdrv_get_aio_context(bs); 653 654 aio_context_acquire(aio_context); 655 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 656 aio_context_release(aio_context); 657 } 658 659 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 660 AIO_WAIT_WHILE(NULL, atomic_read(&drained_end_counter) > 0); 661 662 assert(bdrv_drain_all_count > 0); 663 bdrv_drain_all_count--; 664 } 665 666 void bdrv_drain_all(void) 667 { 668 bdrv_drain_all_begin(); 669 bdrv_drain_all_end(); 670 } 671 672 /** 673 * Remove an active request from the tracked requests list 674 * 675 * This function should be called when a tracked request is completing. 676 */ 677 static void tracked_request_end(BdrvTrackedRequest *req) 678 { 679 if (req->serialising) { 680 atomic_dec(&req->bs->serialising_in_flight); 681 } 682 683 qemu_co_mutex_lock(&req->bs->reqs_lock); 684 QLIST_REMOVE(req, list); 685 qemu_co_queue_restart_all(&req->wait_queue); 686 qemu_co_mutex_unlock(&req->bs->reqs_lock); 687 } 688 689 /** 690 * Add an active request to the tracked requests list 691 */ 692 static void tracked_request_begin(BdrvTrackedRequest *req, 693 BlockDriverState *bs, 694 int64_t offset, 695 uint64_t bytes, 696 enum BdrvTrackedRequestType type) 697 { 698 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes); 699 700 *req = (BdrvTrackedRequest){ 701 .bs = bs, 702 .offset = offset, 703 .bytes = bytes, 704 .type = type, 705 .co = qemu_coroutine_self(), 706 .serialising = false, 707 .overlap_offset = offset, 708 .overlap_bytes = bytes, 709 }; 710 711 qemu_co_queue_init(&req->wait_queue); 712 713 qemu_co_mutex_lock(&bs->reqs_lock); 714 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 715 qemu_co_mutex_unlock(&bs->reqs_lock); 716 } 717 718 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 719 { 720 int64_t overlap_offset = req->offset & ~(align - 1); 721 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 722 - overlap_offset; 723 724 if (!req->serialising) { 725 atomic_inc(&req->bs->serialising_in_flight); 726 req->serialising = true; 727 } 728 729 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 730 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 731 } 732 733 static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req) 734 { 735 /* 736 * If the request is serialising, overlap_offset and overlap_bytes are set, 737 * so we can check if the request is aligned. Otherwise, don't care and 738 * return false. 739 */ 740 741 return req->serialising && (req->offset == req->overlap_offset) && 742 (req->bytes == req->overlap_bytes); 743 } 744 745 /** 746 * Round a region to cluster boundaries 747 */ 748 void bdrv_round_to_clusters(BlockDriverState *bs, 749 int64_t offset, int64_t bytes, 750 int64_t *cluster_offset, 751 int64_t *cluster_bytes) 752 { 753 BlockDriverInfo bdi; 754 755 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 756 *cluster_offset = offset; 757 *cluster_bytes = bytes; 758 } else { 759 int64_t c = bdi.cluster_size; 760 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 761 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 762 } 763 } 764 765 static int bdrv_get_cluster_size(BlockDriverState *bs) 766 { 767 BlockDriverInfo bdi; 768 int ret; 769 770 ret = bdrv_get_info(bs, &bdi); 771 if (ret < 0 || bdi.cluster_size == 0) { 772 return bs->bl.request_alignment; 773 } else { 774 return bdi.cluster_size; 775 } 776 } 777 778 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 779 int64_t offset, uint64_t bytes) 780 { 781 /* aaaa bbbb */ 782 if (offset >= req->overlap_offset + req->overlap_bytes) { 783 return false; 784 } 785 /* bbbb aaaa */ 786 if (req->overlap_offset >= offset + bytes) { 787 return false; 788 } 789 return true; 790 } 791 792 void bdrv_inc_in_flight(BlockDriverState *bs) 793 { 794 atomic_inc(&bs->in_flight); 795 } 796 797 void bdrv_wakeup(BlockDriverState *bs) 798 { 799 aio_wait_kick(); 800 } 801 802 void bdrv_dec_in_flight(BlockDriverState *bs) 803 { 804 atomic_dec(&bs->in_flight); 805 bdrv_wakeup(bs); 806 } 807 808 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 809 { 810 BlockDriverState *bs = self->bs; 811 BdrvTrackedRequest *req; 812 bool retry; 813 bool waited = false; 814 815 if (!atomic_read(&bs->serialising_in_flight)) { 816 return false; 817 } 818 819 do { 820 retry = false; 821 qemu_co_mutex_lock(&bs->reqs_lock); 822 QLIST_FOREACH(req, &bs->tracked_requests, list) { 823 if (req == self || (!req->serialising && !self->serialising)) { 824 continue; 825 } 826 if (tracked_request_overlaps(req, self->overlap_offset, 827 self->overlap_bytes)) 828 { 829 /* Hitting this means there was a reentrant request, for 830 * example, a block driver issuing nested requests. This must 831 * never happen since it means deadlock. 832 */ 833 assert(qemu_coroutine_self() != req->co); 834 835 /* If the request is already (indirectly) waiting for us, or 836 * will wait for us as soon as it wakes up, then just go on 837 * (instead of producing a deadlock in the former case). */ 838 if (!req->waiting_for) { 839 self->waiting_for = req; 840 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); 841 self->waiting_for = NULL; 842 retry = true; 843 waited = true; 844 break; 845 } 846 } 847 } 848 qemu_co_mutex_unlock(&bs->reqs_lock); 849 } while (retry); 850 851 return waited; 852 } 853 854 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 855 size_t size) 856 { 857 if (size > BDRV_REQUEST_MAX_BYTES) { 858 return -EIO; 859 } 860 861 if (!bdrv_is_inserted(bs)) { 862 return -ENOMEDIUM; 863 } 864 865 if (offset < 0) { 866 return -EIO; 867 } 868 869 return 0; 870 } 871 872 typedef struct RwCo { 873 BdrvChild *child; 874 int64_t offset; 875 QEMUIOVector *qiov; 876 bool is_write; 877 int ret; 878 BdrvRequestFlags flags; 879 } RwCo; 880 881 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 882 { 883 RwCo *rwco = opaque; 884 885 if (!rwco->is_write) { 886 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset, 887 rwco->qiov->size, rwco->qiov, 888 rwco->flags); 889 } else { 890 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset, 891 rwco->qiov->size, rwco->qiov, 892 rwco->flags); 893 } 894 aio_wait_kick(); 895 } 896 897 /* 898 * Process a vectored synchronous request using coroutines 899 */ 900 static int bdrv_prwv_co(BdrvChild *child, int64_t offset, 901 QEMUIOVector *qiov, bool is_write, 902 BdrvRequestFlags flags) 903 { 904 Coroutine *co; 905 RwCo rwco = { 906 .child = child, 907 .offset = offset, 908 .qiov = qiov, 909 .is_write = is_write, 910 .ret = NOT_DONE, 911 .flags = flags, 912 }; 913 914 if (qemu_in_coroutine()) { 915 /* Fast-path if already in coroutine context */ 916 bdrv_rw_co_entry(&rwco); 917 } else { 918 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco); 919 bdrv_coroutine_enter(child->bs, co); 920 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE); 921 } 922 return rwco.ret; 923 } 924 925 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 926 int bytes, BdrvRequestFlags flags) 927 { 928 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes); 929 930 return bdrv_prwv_co(child, offset, &qiov, true, 931 BDRV_REQ_ZERO_WRITE | flags); 932 } 933 934 /* 935 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 936 * The operation is sped up by checking the block status and only writing 937 * zeroes to the device if they currently do not return zeroes. Optional 938 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 939 * BDRV_REQ_FUA). 940 * 941 * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 942 */ 943 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 944 { 945 int ret; 946 int64_t target_size, bytes, offset = 0; 947 BlockDriverState *bs = child->bs; 948 949 target_size = bdrv_getlength(bs); 950 if (target_size < 0) { 951 return target_size; 952 } 953 954 for (;;) { 955 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 956 if (bytes <= 0) { 957 return 0; 958 } 959 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 960 if (ret < 0) { 961 return ret; 962 } 963 if (ret & BDRV_BLOCK_ZERO) { 964 offset += bytes; 965 continue; 966 } 967 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 968 if (ret < 0) { 969 return ret; 970 } 971 offset += bytes; 972 } 973 } 974 975 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 976 { 977 int ret; 978 979 ret = bdrv_prwv_co(child, offset, qiov, false, 0); 980 if (ret < 0) { 981 return ret; 982 } 983 984 return qiov->size; 985 } 986 987 /* See bdrv_pwrite() for the return codes */ 988 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) 989 { 990 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 991 992 if (bytes < 0) { 993 return -EINVAL; 994 } 995 996 return bdrv_preadv(child, offset, &qiov); 997 } 998 999 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 1000 { 1001 int ret; 1002 1003 ret = bdrv_prwv_co(child, offset, qiov, true, 0); 1004 if (ret < 0) { 1005 return ret; 1006 } 1007 1008 return qiov->size; 1009 } 1010 1011 /* Return no. of bytes on success or < 0 on error. Important errors are: 1012 -EIO generic I/O error (may happen for all errors) 1013 -ENOMEDIUM No media inserted. 1014 -EINVAL Invalid offset or number of bytes 1015 -EACCES Trying to write a read-only device 1016 */ 1017 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes) 1018 { 1019 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1020 1021 if (bytes < 0) { 1022 return -EINVAL; 1023 } 1024 1025 return bdrv_pwritev(child, offset, &qiov); 1026 } 1027 1028 /* 1029 * Writes to the file and ensures that no writes are reordered across this 1030 * request (acts as a barrier) 1031 * 1032 * Returns 0 on success, -errno in error cases. 1033 */ 1034 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 1035 const void *buf, int count) 1036 { 1037 int ret; 1038 1039 ret = bdrv_pwrite(child, offset, buf, count); 1040 if (ret < 0) { 1041 return ret; 1042 } 1043 1044 ret = bdrv_flush(child->bs); 1045 if (ret < 0) { 1046 return ret; 1047 } 1048 1049 return 0; 1050 } 1051 1052 typedef struct CoroutineIOCompletion { 1053 Coroutine *coroutine; 1054 int ret; 1055 } CoroutineIOCompletion; 1056 1057 static void bdrv_co_io_em_complete(void *opaque, int ret) 1058 { 1059 CoroutineIOCompletion *co = opaque; 1060 1061 co->ret = ret; 1062 aio_co_wake(co->coroutine); 1063 } 1064 1065 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 1066 uint64_t offset, uint64_t bytes, 1067 QEMUIOVector *qiov, 1068 size_t qiov_offset, int flags) 1069 { 1070 BlockDriver *drv = bs->drv; 1071 int64_t sector_num; 1072 unsigned int nb_sectors; 1073 QEMUIOVector local_qiov; 1074 int ret; 1075 1076 assert(!(flags & ~BDRV_REQ_MASK)); 1077 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1078 1079 if (!drv) { 1080 return -ENOMEDIUM; 1081 } 1082 1083 if (drv->bdrv_co_preadv_part) { 1084 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 1085 flags); 1086 } 1087 1088 if (qiov_offset > 0 || bytes != qiov->size) { 1089 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1090 qiov = &local_qiov; 1091 } 1092 1093 if (drv->bdrv_co_preadv) { 1094 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 1095 goto out; 1096 } 1097 1098 if (drv->bdrv_aio_preadv) { 1099 BlockAIOCB *acb; 1100 CoroutineIOCompletion co = { 1101 .coroutine = qemu_coroutine_self(), 1102 }; 1103 1104 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1105 bdrv_co_io_em_complete, &co); 1106 if (acb == NULL) { 1107 ret = -EIO; 1108 goto out; 1109 } else { 1110 qemu_coroutine_yield(); 1111 ret = co.ret; 1112 goto out; 1113 } 1114 } 1115 1116 sector_num = offset >> BDRV_SECTOR_BITS; 1117 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1118 1119 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1120 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1121 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1122 assert(drv->bdrv_co_readv); 1123 1124 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1125 1126 out: 1127 if (qiov == &local_qiov) { 1128 qemu_iovec_destroy(&local_qiov); 1129 } 1130 1131 return ret; 1132 } 1133 1134 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 1135 uint64_t offset, uint64_t bytes, 1136 QEMUIOVector *qiov, 1137 size_t qiov_offset, int flags) 1138 { 1139 BlockDriver *drv = bs->drv; 1140 int64_t sector_num; 1141 unsigned int nb_sectors; 1142 QEMUIOVector local_qiov; 1143 int ret; 1144 1145 assert(!(flags & ~BDRV_REQ_MASK)); 1146 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1147 1148 if (!drv) { 1149 return -ENOMEDIUM; 1150 } 1151 1152 if (drv->bdrv_co_pwritev_part) { 1153 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1154 flags & bs->supported_write_flags); 1155 flags &= ~bs->supported_write_flags; 1156 goto emulate_flags; 1157 } 1158 1159 if (qiov_offset > 0 || bytes != qiov->size) { 1160 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1161 qiov = &local_qiov; 1162 } 1163 1164 if (drv->bdrv_co_pwritev) { 1165 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 1166 flags & bs->supported_write_flags); 1167 flags &= ~bs->supported_write_flags; 1168 goto emulate_flags; 1169 } 1170 1171 if (drv->bdrv_aio_pwritev) { 1172 BlockAIOCB *acb; 1173 CoroutineIOCompletion co = { 1174 .coroutine = qemu_coroutine_self(), 1175 }; 1176 1177 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, 1178 flags & bs->supported_write_flags, 1179 bdrv_co_io_em_complete, &co); 1180 flags &= ~bs->supported_write_flags; 1181 if (acb == NULL) { 1182 ret = -EIO; 1183 } else { 1184 qemu_coroutine_yield(); 1185 ret = co.ret; 1186 } 1187 goto emulate_flags; 1188 } 1189 1190 sector_num = offset >> BDRV_SECTOR_BITS; 1191 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1192 1193 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1194 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1195 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1196 1197 assert(drv->bdrv_co_writev); 1198 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, 1199 flags & bs->supported_write_flags); 1200 flags &= ~bs->supported_write_flags; 1201 1202 emulate_flags: 1203 if (ret == 0 && (flags & BDRV_REQ_FUA)) { 1204 ret = bdrv_co_flush(bs); 1205 } 1206 1207 if (qiov == &local_qiov) { 1208 qemu_iovec_destroy(&local_qiov); 1209 } 1210 1211 return ret; 1212 } 1213 1214 static int coroutine_fn 1215 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 1216 uint64_t bytes, QEMUIOVector *qiov, 1217 size_t qiov_offset) 1218 { 1219 BlockDriver *drv = bs->drv; 1220 QEMUIOVector local_qiov; 1221 int ret; 1222 1223 if (!drv) { 1224 return -ENOMEDIUM; 1225 } 1226 1227 if (!block_driver_can_compress(drv)) { 1228 return -ENOTSUP; 1229 } 1230 1231 if (drv->bdrv_co_pwritev_compressed_part) { 1232 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1233 qiov, qiov_offset); 1234 } 1235 1236 if (qiov_offset == 0) { 1237 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1238 } 1239 1240 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1241 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1242 qemu_iovec_destroy(&local_qiov); 1243 1244 return ret; 1245 } 1246 1247 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, 1248 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1249 size_t qiov_offset, int flags) 1250 { 1251 BlockDriverState *bs = child->bs; 1252 1253 /* Perform I/O through a temporary buffer so that users who scribble over 1254 * their read buffer while the operation is in progress do not end up 1255 * modifying the image file. This is critical for zero-copy guest I/O 1256 * where anything might happen inside guest memory. 1257 */ 1258 void *bounce_buffer = NULL; 1259 1260 BlockDriver *drv = bs->drv; 1261 int64_t cluster_offset; 1262 int64_t cluster_bytes; 1263 size_t skip_bytes; 1264 int ret; 1265 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1266 BDRV_REQUEST_MAX_BYTES); 1267 unsigned int progress = 0; 1268 bool skip_write; 1269 1270 if (!drv) { 1271 return -ENOMEDIUM; 1272 } 1273 1274 /* 1275 * Do not write anything when the BDS is inactive. That is not 1276 * allowed, and it would not help. 1277 */ 1278 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1279 1280 /* FIXME We cannot require callers to have write permissions when all they 1281 * are doing is a read request. If we did things right, write permissions 1282 * would be obtained anyway, but internally by the copy-on-read code. As 1283 * long as it is implemented here rather than in a separate filter driver, 1284 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1285 * it could request permissions. Therefore we have to bypass the permission 1286 * system for the moment. */ 1287 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1288 1289 /* Cover entire cluster so no additional backing file I/O is required when 1290 * allocating cluster in the image file. Note that this value may exceed 1291 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1292 * is one reason we loop rather than doing it all at once. 1293 */ 1294 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1295 skip_bytes = offset - cluster_offset; 1296 1297 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1298 cluster_offset, cluster_bytes); 1299 1300 while (cluster_bytes) { 1301 int64_t pnum; 1302 1303 if (skip_write) { 1304 ret = 1; /* "already allocated", so nothing will be copied */ 1305 pnum = MIN(cluster_bytes, max_transfer); 1306 } else { 1307 ret = bdrv_is_allocated(bs, cluster_offset, 1308 MIN(cluster_bytes, max_transfer), &pnum); 1309 if (ret < 0) { 1310 /* 1311 * Safe to treat errors in querying allocation as if 1312 * unallocated; we'll probably fail again soon on the 1313 * read, but at least that will set a decent errno. 1314 */ 1315 pnum = MIN(cluster_bytes, max_transfer); 1316 } 1317 1318 /* Stop at EOF if the image ends in the middle of the cluster */ 1319 if (ret == 0 && pnum == 0) { 1320 assert(progress >= bytes); 1321 break; 1322 } 1323 1324 assert(skip_bytes < pnum); 1325 } 1326 1327 if (ret <= 0) { 1328 QEMUIOVector local_qiov; 1329 1330 /* Must copy-on-read; use the bounce buffer */ 1331 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1332 if (!bounce_buffer) { 1333 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1334 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1335 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1336 1337 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1338 if (!bounce_buffer) { 1339 ret = -ENOMEM; 1340 goto err; 1341 } 1342 } 1343 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1344 1345 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1346 &local_qiov, 0, 0); 1347 if (ret < 0) { 1348 goto err; 1349 } 1350 1351 bdrv_debug_event(bs, BLKDBG_COR_WRITE); 1352 if (drv->bdrv_co_pwrite_zeroes && 1353 buffer_is_zero(bounce_buffer, pnum)) { 1354 /* FIXME: Should we (perhaps conditionally) be setting 1355 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1356 * that still correctly reads as zero? */ 1357 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1358 BDRV_REQ_WRITE_UNCHANGED); 1359 } else { 1360 /* This does not change the data on the disk, it is not 1361 * necessary to flush even in cache=writethrough mode. 1362 */ 1363 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1364 &local_qiov, 0, 1365 BDRV_REQ_WRITE_UNCHANGED); 1366 } 1367 1368 if (ret < 0) { 1369 /* It might be okay to ignore write errors for guest 1370 * requests. If this is a deliberate copy-on-read 1371 * then we don't want to ignore the error. Simply 1372 * report it in all cases. 1373 */ 1374 goto err; 1375 } 1376 1377 if (!(flags & BDRV_REQ_PREFETCH)) { 1378 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1379 bounce_buffer + skip_bytes, 1380 pnum - skip_bytes); 1381 } 1382 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1383 /* Read directly into the destination */ 1384 ret = bdrv_driver_preadv(bs, offset + progress, 1385 MIN(pnum - skip_bytes, bytes - progress), 1386 qiov, qiov_offset + progress, 0); 1387 if (ret < 0) { 1388 goto err; 1389 } 1390 } 1391 1392 cluster_offset += pnum; 1393 cluster_bytes -= pnum; 1394 progress += pnum - skip_bytes; 1395 skip_bytes = 0; 1396 } 1397 ret = 0; 1398 1399 err: 1400 qemu_vfree(bounce_buffer); 1401 return ret; 1402 } 1403 1404 /* 1405 * Forwards an already correctly aligned request to the BlockDriver. This 1406 * handles copy on read, zeroing after EOF, and fragmentation of large 1407 * reads; any other features must be implemented by the caller. 1408 */ 1409 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, 1410 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1411 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1412 { 1413 BlockDriverState *bs = child->bs; 1414 int64_t total_bytes, max_bytes; 1415 int ret = 0; 1416 uint64_t bytes_remaining = bytes; 1417 int max_transfer; 1418 1419 assert(is_power_of_2(align)); 1420 assert((offset & (align - 1)) == 0); 1421 assert((bytes & (align - 1)) == 0); 1422 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1423 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1424 align); 1425 1426 /* TODO: We would need a per-BDS .supported_read_flags and 1427 * potential fallback support, if we ever implement any read flags 1428 * to pass through to drivers. For now, there aren't any 1429 * passthrough flags. */ 1430 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ | 1431 BDRV_REQ_PREFETCH))); 1432 1433 /* Handle Copy on Read and associated serialisation */ 1434 if (flags & BDRV_REQ_COPY_ON_READ) { 1435 /* If we touch the same cluster it counts as an overlap. This 1436 * guarantees that allocating writes will be serialized and not race 1437 * with each other for the same cluster. For example, in copy-on-read 1438 * it ensures that the CoR read and write operations are atomic and 1439 * guest writes cannot interleave between them. */ 1440 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 1441 } 1442 1443 /* BDRV_REQ_SERIALISING is only for write operation */ 1444 assert(!(flags & BDRV_REQ_SERIALISING)); 1445 1446 if (!(flags & BDRV_REQ_NO_SERIALISING)) { 1447 wait_serialising_requests(req); 1448 } 1449 1450 if (flags & BDRV_REQ_COPY_ON_READ) { 1451 int64_t pnum; 1452 1453 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1454 if (ret < 0) { 1455 goto out; 1456 } 1457 1458 if (!ret || pnum != bytes) { 1459 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1460 qiov, qiov_offset, flags); 1461 goto out; 1462 } else if (flags & BDRV_REQ_PREFETCH) { 1463 goto out; 1464 } 1465 } 1466 1467 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1468 total_bytes = bdrv_getlength(bs); 1469 if (total_bytes < 0) { 1470 ret = total_bytes; 1471 goto out; 1472 } 1473 1474 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1475 if (bytes <= max_bytes && bytes <= max_transfer) { 1476 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0); 1477 goto out; 1478 } 1479 1480 while (bytes_remaining) { 1481 int num; 1482 1483 if (max_bytes) { 1484 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1485 assert(num); 1486 1487 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1488 num, qiov, bytes - bytes_remaining, 0); 1489 max_bytes -= num; 1490 } else { 1491 num = bytes_remaining; 1492 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0, 1493 bytes_remaining); 1494 } 1495 if (ret < 0) { 1496 goto out; 1497 } 1498 bytes_remaining -= num; 1499 } 1500 1501 out: 1502 return ret < 0 ? ret : 0; 1503 } 1504 1505 /* 1506 * Request padding 1507 * 1508 * |<---- align ----->| |<----- align ---->| 1509 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1510 * | | | | | | 1511 * -*----------$-------*-------- ... --------*-----$------------*--- 1512 * | | | | | | 1513 * | offset | | end | 1514 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1515 * [buf ... ) [tail_buf ) 1516 * 1517 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1518 * is placed at the beginning of @buf and @tail at the @end. 1519 * 1520 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1521 * around tail, if tail exists. 1522 * 1523 * @merge_reads is true for small requests, 1524 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1525 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1526 */ 1527 typedef struct BdrvRequestPadding { 1528 uint8_t *buf; 1529 size_t buf_len; 1530 uint8_t *tail_buf; 1531 size_t head; 1532 size_t tail; 1533 bool merge_reads; 1534 QEMUIOVector local_qiov; 1535 } BdrvRequestPadding; 1536 1537 static bool bdrv_init_padding(BlockDriverState *bs, 1538 int64_t offset, int64_t bytes, 1539 BdrvRequestPadding *pad) 1540 { 1541 uint64_t align = bs->bl.request_alignment; 1542 size_t sum; 1543 1544 memset(pad, 0, sizeof(*pad)); 1545 1546 pad->head = offset & (align - 1); 1547 pad->tail = ((offset + bytes) & (align - 1)); 1548 if (pad->tail) { 1549 pad->tail = align - pad->tail; 1550 } 1551 1552 if ((!pad->head && !pad->tail) || !bytes) { 1553 return false; 1554 } 1555 1556 sum = pad->head + bytes + pad->tail; 1557 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1558 pad->buf = qemu_blockalign(bs, pad->buf_len); 1559 pad->merge_reads = sum == pad->buf_len; 1560 if (pad->tail) { 1561 pad->tail_buf = pad->buf + pad->buf_len - align; 1562 } 1563 1564 return true; 1565 } 1566 1567 static int bdrv_padding_rmw_read(BdrvChild *child, 1568 BdrvTrackedRequest *req, 1569 BdrvRequestPadding *pad, 1570 bool zero_middle) 1571 { 1572 QEMUIOVector local_qiov; 1573 BlockDriverState *bs = child->bs; 1574 uint64_t align = bs->bl.request_alignment; 1575 int ret; 1576 1577 assert(req->serialising && pad->buf); 1578 1579 if (pad->head || pad->merge_reads) { 1580 uint64_t bytes = pad->merge_reads ? pad->buf_len : align; 1581 1582 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1583 1584 if (pad->head) { 1585 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1586 } 1587 if (pad->merge_reads && pad->tail) { 1588 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1589 } 1590 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1591 align, &local_qiov, 0, 0); 1592 if (ret < 0) { 1593 return ret; 1594 } 1595 if (pad->head) { 1596 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1597 } 1598 if (pad->merge_reads && pad->tail) { 1599 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1600 } 1601 1602 if (pad->merge_reads) { 1603 goto zero_mem; 1604 } 1605 } 1606 1607 if (pad->tail) { 1608 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1609 1610 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1611 ret = bdrv_aligned_preadv( 1612 child, req, 1613 req->overlap_offset + req->overlap_bytes - align, 1614 align, align, &local_qiov, 0, 0); 1615 if (ret < 0) { 1616 return ret; 1617 } 1618 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1619 } 1620 1621 zero_mem: 1622 if (zero_middle) { 1623 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1624 } 1625 1626 return 0; 1627 } 1628 1629 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1630 { 1631 if (pad->buf) { 1632 qemu_vfree(pad->buf); 1633 qemu_iovec_destroy(&pad->local_qiov); 1634 } 1635 } 1636 1637 /* 1638 * bdrv_pad_request 1639 * 1640 * Exchange request parameters with padded request if needed. Don't include RMW 1641 * read of padding, bdrv_padding_rmw_read() should be called separately if 1642 * needed. 1643 * 1644 * All parameters except @bs are in-out: they represent original request at 1645 * function call and padded (if padding needed) at function finish. 1646 * 1647 * Function always succeeds. 1648 */ 1649 static bool bdrv_pad_request(BlockDriverState *bs, 1650 QEMUIOVector **qiov, size_t *qiov_offset, 1651 int64_t *offset, unsigned int *bytes, 1652 BdrvRequestPadding *pad) 1653 { 1654 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1655 return false; 1656 } 1657 1658 qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1659 *qiov, *qiov_offset, *bytes, 1660 pad->buf + pad->buf_len - pad->tail, pad->tail); 1661 *bytes += pad->head + pad->tail; 1662 *offset -= pad->head; 1663 *qiov = &pad->local_qiov; 1664 *qiov_offset = 0; 1665 1666 return true; 1667 } 1668 1669 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1670 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1671 BdrvRequestFlags flags) 1672 { 1673 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1674 } 1675 1676 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1677 int64_t offset, unsigned int bytes, 1678 QEMUIOVector *qiov, size_t qiov_offset, 1679 BdrvRequestFlags flags) 1680 { 1681 BlockDriverState *bs = child->bs; 1682 BdrvTrackedRequest req; 1683 BdrvRequestPadding pad; 1684 int ret; 1685 1686 trace_bdrv_co_preadv(bs, offset, bytes, flags); 1687 1688 ret = bdrv_check_byte_request(bs, offset, bytes); 1689 if (ret < 0) { 1690 return ret; 1691 } 1692 1693 bdrv_inc_in_flight(bs); 1694 1695 /* Don't do copy-on-read if we read data before write operation */ 1696 if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) { 1697 flags |= BDRV_REQ_COPY_ON_READ; 1698 } 1699 1700 bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad); 1701 1702 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1703 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1704 bs->bl.request_alignment, 1705 qiov, qiov_offset, flags); 1706 tracked_request_end(&req); 1707 bdrv_dec_in_flight(bs); 1708 1709 bdrv_padding_destroy(&pad); 1710 1711 return ret; 1712 } 1713 1714 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1715 int64_t offset, int bytes, BdrvRequestFlags flags) 1716 { 1717 BlockDriver *drv = bs->drv; 1718 QEMUIOVector qiov; 1719 void *buf = NULL; 1720 int ret = 0; 1721 bool need_flush = false; 1722 int head = 0; 1723 int tail = 0; 1724 1725 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1726 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1727 bs->bl.request_alignment); 1728 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1729 1730 if (!drv) { 1731 return -ENOMEDIUM; 1732 } 1733 1734 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1735 return -ENOTSUP; 1736 } 1737 1738 assert(alignment % bs->bl.request_alignment == 0); 1739 head = offset % alignment; 1740 tail = (offset + bytes) % alignment; 1741 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1742 assert(max_write_zeroes >= bs->bl.request_alignment); 1743 1744 while (bytes > 0 && !ret) { 1745 int num = bytes; 1746 1747 /* Align request. Block drivers can expect the "bulk" of the request 1748 * to be aligned, and that unaligned requests do not cross cluster 1749 * boundaries. 1750 */ 1751 if (head) { 1752 /* Make a small request up to the first aligned sector. For 1753 * convenience, limit this request to max_transfer even if 1754 * we don't need to fall back to writes. */ 1755 num = MIN(MIN(bytes, max_transfer), alignment - head); 1756 head = (head + num) % alignment; 1757 assert(num < max_write_zeroes); 1758 } else if (tail && num > alignment) { 1759 /* Shorten the request to the last aligned sector. */ 1760 num -= tail; 1761 } 1762 1763 /* limit request size */ 1764 if (num > max_write_zeroes) { 1765 num = max_write_zeroes; 1766 } 1767 1768 ret = -ENOTSUP; 1769 /* First try the efficient write zeroes operation */ 1770 if (drv->bdrv_co_pwrite_zeroes) { 1771 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1772 flags & bs->supported_zero_flags); 1773 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1774 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1775 need_flush = true; 1776 } 1777 } else { 1778 assert(!bs->supported_zero_flags); 1779 } 1780 1781 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1782 /* Fall back to bounce buffer if write zeroes is unsupported */ 1783 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1784 1785 if ((flags & BDRV_REQ_FUA) && 1786 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1787 /* No need for bdrv_driver_pwrite() to do a fallback 1788 * flush on each chunk; use just one at the end */ 1789 write_flags &= ~BDRV_REQ_FUA; 1790 need_flush = true; 1791 } 1792 num = MIN(num, max_transfer); 1793 if (buf == NULL) { 1794 buf = qemu_try_blockalign0(bs, num); 1795 if (buf == NULL) { 1796 ret = -ENOMEM; 1797 goto fail; 1798 } 1799 } 1800 qemu_iovec_init_buf(&qiov, buf, num); 1801 1802 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1803 1804 /* Keep bounce buffer around if it is big enough for all 1805 * all future requests. 1806 */ 1807 if (num < max_transfer) { 1808 qemu_vfree(buf); 1809 buf = NULL; 1810 } 1811 } 1812 1813 offset += num; 1814 bytes -= num; 1815 } 1816 1817 fail: 1818 if (ret == 0 && need_flush) { 1819 ret = bdrv_co_flush(bs); 1820 } 1821 qemu_vfree(buf); 1822 return ret; 1823 } 1824 1825 static inline int coroutine_fn 1826 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, 1827 BdrvTrackedRequest *req, int flags) 1828 { 1829 BlockDriverState *bs = child->bs; 1830 bool waited; 1831 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1832 1833 if (bs->read_only) { 1834 return -EPERM; 1835 } 1836 1837 /* BDRV_REQ_NO_SERIALISING is only for read operation */ 1838 assert(!(flags & BDRV_REQ_NO_SERIALISING)); 1839 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1840 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1841 assert(!(flags & ~BDRV_REQ_MASK)); 1842 1843 if (flags & BDRV_REQ_SERIALISING) { 1844 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 1845 } 1846 1847 waited = wait_serialising_requests(req); 1848 1849 assert(!waited || !req->serialising || 1850 is_request_serialising_and_aligned(req)); 1851 assert(req->overlap_offset <= offset); 1852 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1853 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); 1854 1855 switch (req->type) { 1856 case BDRV_TRACKED_WRITE: 1857 case BDRV_TRACKED_DISCARD: 1858 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1859 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1860 } else { 1861 assert(child->perm & BLK_PERM_WRITE); 1862 } 1863 return notifier_with_return_list_notify(&bs->before_write_notifiers, 1864 req); 1865 case BDRV_TRACKED_TRUNCATE: 1866 assert(child->perm & BLK_PERM_RESIZE); 1867 return 0; 1868 default: 1869 abort(); 1870 } 1871 } 1872 1873 static inline void coroutine_fn 1874 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes, 1875 BdrvTrackedRequest *req, int ret) 1876 { 1877 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1878 BlockDriverState *bs = child->bs; 1879 1880 atomic_inc(&bs->write_gen); 1881 1882 /* 1883 * Discard cannot extend the image, but in error handling cases, such as 1884 * when reverting a qcow2 cluster allocation, the discarded range can pass 1885 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1886 * here. Instead, just skip it, since semantically a discard request 1887 * beyond EOF cannot expand the image anyway. 1888 */ 1889 if (ret == 0 && 1890 (req->type == BDRV_TRACKED_TRUNCATE || 1891 end_sector > bs->total_sectors) && 1892 req->type != BDRV_TRACKED_DISCARD) { 1893 bs->total_sectors = end_sector; 1894 bdrv_parent_cb_resize(bs); 1895 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1896 } 1897 if (req->bytes) { 1898 switch (req->type) { 1899 case BDRV_TRACKED_WRITE: 1900 stat64_max(&bs->wr_highest_offset, offset + bytes); 1901 /* fall through, to set dirty bits */ 1902 case BDRV_TRACKED_DISCARD: 1903 bdrv_set_dirty(bs, offset, bytes); 1904 break; 1905 default: 1906 break; 1907 } 1908 } 1909 } 1910 1911 /* 1912 * Forwards an already correctly aligned write request to the BlockDriver, 1913 * after possibly fragmenting it. 1914 */ 1915 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, 1916 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1917 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1918 { 1919 BlockDriverState *bs = child->bs; 1920 BlockDriver *drv = bs->drv; 1921 int ret; 1922 1923 uint64_t bytes_remaining = bytes; 1924 int max_transfer; 1925 1926 if (!drv) { 1927 return -ENOMEDIUM; 1928 } 1929 1930 if (bdrv_has_readonly_bitmaps(bs)) { 1931 return -EPERM; 1932 } 1933 1934 assert(is_power_of_2(align)); 1935 assert((offset & (align - 1)) == 0); 1936 assert((bytes & (align - 1)) == 0); 1937 assert(!qiov || qiov_offset + bytes <= qiov->size); 1938 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1939 align); 1940 1941 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1942 1943 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1944 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1945 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 1946 flags |= BDRV_REQ_ZERO_WRITE; 1947 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1948 flags |= BDRV_REQ_MAY_UNMAP; 1949 } 1950 } 1951 1952 if (ret < 0) { 1953 /* Do nothing, write notifier decided to fail this request */ 1954 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1955 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1956 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1957 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1958 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 1959 qiov, qiov_offset); 1960 } else if (bytes <= max_transfer) { 1961 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1962 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 1963 } else { 1964 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1965 while (bytes_remaining) { 1966 int num = MIN(bytes_remaining, max_transfer); 1967 int local_flags = flags; 1968 1969 assert(num); 1970 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1971 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1972 /* If FUA is going to be emulated by flush, we only 1973 * need to flush on the last iteration */ 1974 local_flags &= ~BDRV_REQ_FUA; 1975 } 1976 1977 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 1978 num, qiov, bytes - bytes_remaining, 1979 local_flags); 1980 if (ret < 0) { 1981 break; 1982 } 1983 bytes_remaining -= num; 1984 } 1985 } 1986 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 1987 1988 if (ret >= 0) { 1989 ret = 0; 1990 } 1991 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 1992 1993 return ret; 1994 } 1995 1996 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, 1997 int64_t offset, 1998 unsigned int bytes, 1999 BdrvRequestFlags flags, 2000 BdrvTrackedRequest *req) 2001 { 2002 BlockDriverState *bs = child->bs; 2003 QEMUIOVector local_qiov; 2004 uint64_t align = bs->bl.request_alignment; 2005 int ret = 0; 2006 bool padding; 2007 BdrvRequestPadding pad; 2008 2009 padding = bdrv_init_padding(bs, offset, bytes, &pad); 2010 if (padding) { 2011 mark_request_serialising(req, align); 2012 wait_serialising_requests(req); 2013 2014 bdrv_padding_rmw_read(child, req, &pad, true); 2015 2016 if (pad.head || pad.merge_reads) { 2017 int64_t aligned_offset = offset & ~(align - 1); 2018 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2019 2020 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2021 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2022 align, &local_qiov, 0, 2023 flags & ~BDRV_REQ_ZERO_WRITE); 2024 if (ret < 0 || pad.merge_reads) { 2025 /* Error or all work is done */ 2026 goto out; 2027 } 2028 offset += write_bytes - pad.head; 2029 bytes -= write_bytes - pad.head; 2030 } 2031 } 2032 2033 assert(!bytes || (offset & (align - 1)) == 0); 2034 if (bytes >= align) { 2035 /* Write the aligned part in the middle. */ 2036 uint64_t aligned_bytes = bytes & ~(align - 1); 2037 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2038 NULL, 0, flags); 2039 if (ret < 0) { 2040 goto out; 2041 } 2042 bytes -= aligned_bytes; 2043 offset += aligned_bytes; 2044 } 2045 2046 assert(!bytes || (offset & (align - 1)) == 0); 2047 if (bytes) { 2048 assert(align == pad.tail + bytes); 2049 2050 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2051 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2052 &local_qiov, 0, 2053 flags & ~BDRV_REQ_ZERO_WRITE); 2054 } 2055 2056 out: 2057 bdrv_padding_destroy(&pad); 2058 2059 return ret; 2060 } 2061 2062 /* 2063 * Handle a write request in coroutine context 2064 */ 2065 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2066 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 2067 BdrvRequestFlags flags) 2068 { 2069 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2070 } 2071 2072 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2073 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, size_t qiov_offset, 2074 BdrvRequestFlags flags) 2075 { 2076 BlockDriverState *bs = child->bs; 2077 BdrvTrackedRequest req; 2078 uint64_t align = bs->bl.request_alignment; 2079 BdrvRequestPadding pad; 2080 int ret; 2081 2082 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags); 2083 2084 if (!bs->drv) { 2085 return -ENOMEDIUM; 2086 } 2087 2088 ret = bdrv_check_byte_request(bs, offset, bytes); 2089 if (ret < 0) { 2090 return ret; 2091 } 2092 2093 bdrv_inc_in_flight(bs); 2094 /* 2095 * Align write if necessary by performing a read-modify-write cycle. 2096 * Pad qiov with the read parts and be sure to have a tracked request not 2097 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 2098 */ 2099 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2100 2101 if (flags & BDRV_REQ_ZERO_WRITE) { 2102 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2103 goto out; 2104 } 2105 2106 if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) { 2107 mark_request_serialising(&req, align); 2108 wait_serialising_requests(&req); 2109 bdrv_padding_rmw_read(child, &req, &pad, false); 2110 } 2111 2112 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2113 qiov, qiov_offset, flags); 2114 2115 bdrv_padding_destroy(&pad); 2116 2117 out: 2118 tracked_request_end(&req); 2119 bdrv_dec_in_flight(bs); 2120 2121 return ret; 2122 } 2123 2124 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2125 int bytes, BdrvRequestFlags flags) 2126 { 2127 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2128 2129 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2130 flags &= ~BDRV_REQ_MAY_UNMAP; 2131 } 2132 2133 return bdrv_co_pwritev(child, offset, bytes, NULL, 2134 BDRV_REQ_ZERO_WRITE | flags); 2135 } 2136 2137 /* 2138 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2139 */ 2140 int bdrv_flush_all(void) 2141 { 2142 BdrvNextIterator it; 2143 BlockDriverState *bs = NULL; 2144 int result = 0; 2145 2146 /* 2147 * bdrv queue is managed by record/replay, 2148 * creating new flush request for stopping 2149 * the VM may break the determinism 2150 */ 2151 if (replay_events_enabled()) { 2152 return result; 2153 } 2154 2155 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2156 AioContext *aio_context = bdrv_get_aio_context(bs); 2157 int ret; 2158 2159 aio_context_acquire(aio_context); 2160 ret = bdrv_flush(bs); 2161 if (ret < 0 && !result) { 2162 result = ret; 2163 } 2164 aio_context_release(aio_context); 2165 } 2166 2167 return result; 2168 } 2169 2170 2171 typedef struct BdrvCoBlockStatusData { 2172 BlockDriverState *bs; 2173 BlockDriverState *base; 2174 bool want_zero; 2175 int64_t offset; 2176 int64_t bytes; 2177 int64_t *pnum; 2178 int64_t *map; 2179 BlockDriverState **file; 2180 int ret; 2181 bool done; 2182 } BdrvCoBlockStatusData; 2183 2184 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs, 2185 bool want_zero, 2186 int64_t offset, 2187 int64_t bytes, 2188 int64_t *pnum, 2189 int64_t *map, 2190 BlockDriverState **file) 2191 { 2192 assert(bs->file && bs->file->bs); 2193 *pnum = bytes; 2194 *map = offset; 2195 *file = bs->file->bs; 2196 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2197 } 2198 2199 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs, 2200 bool want_zero, 2201 int64_t offset, 2202 int64_t bytes, 2203 int64_t *pnum, 2204 int64_t *map, 2205 BlockDriverState **file) 2206 { 2207 assert(bs->backing && bs->backing->bs); 2208 *pnum = bytes; 2209 *map = offset; 2210 *file = bs->backing->bs; 2211 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2212 } 2213 2214 /* 2215 * Returns the allocation status of the specified sectors. 2216 * Drivers not implementing the functionality are assumed to not support 2217 * backing files, hence all their sectors are reported as allocated. 2218 * 2219 * If 'want_zero' is true, the caller is querying for mapping 2220 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2221 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2222 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2223 * 2224 * If 'offset' is beyond the end of the disk image the return value is 2225 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2226 * 2227 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2228 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2229 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2230 * 2231 * 'pnum' is set to the number of bytes (including and immediately 2232 * following the specified offset) that are easily known to be in the 2233 * same allocated/unallocated state. Note that a second call starting 2234 * at the original offset plus returned pnum may have the same status. 2235 * The returned value is non-zero on success except at end-of-file. 2236 * 2237 * Returns negative errno on failure. Otherwise, if the 2238 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2239 * set to the host mapping and BDS corresponding to the guest offset. 2240 */ 2241 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, 2242 bool want_zero, 2243 int64_t offset, int64_t bytes, 2244 int64_t *pnum, int64_t *map, 2245 BlockDriverState **file) 2246 { 2247 int64_t total_size; 2248 int64_t n; /* bytes */ 2249 int ret; 2250 int64_t local_map = 0; 2251 BlockDriverState *local_file = NULL; 2252 int64_t aligned_offset, aligned_bytes; 2253 uint32_t align; 2254 2255 assert(pnum); 2256 *pnum = 0; 2257 total_size = bdrv_getlength(bs); 2258 if (total_size < 0) { 2259 ret = total_size; 2260 goto early_out; 2261 } 2262 2263 if (offset >= total_size) { 2264 ret = BDRV_BLOCK_EOF; 2265 goto early_out; 2266 } 2267 if (!bytes) { 2268 ret = 0; 2269 goto early_out; 2270 } 2271 2272 n = total_size - offset; 2273 if (n < bytes) { 2274 bytes = n; 2275 } 2276 2277 /* Must be non-NULL or bdrv_getlength() would have failed */ 2278 assert(bs->drv); 2279 if (!bs->drv->bdrv_co_block_status) { 2280 *pnum = bytes; 2281 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2282 if (offset + bytes == total_size) { 2283 ret |= BDRV_BLOCK_EOF; 2284 } 2285 if (bs->drv->protocol_name) { 2286 ret |= BDRV_BLOCK_OFFSET_VALID; 2287 local_map = offset; 2288 local_file = bs; 2289 } 2290 goto early_out; 2291 } 2292 2293 bdrv_inc_in_flight(bs); 2294 2295 /* Round out to request_alignment boundaries */ 2296 align = bs->bl.request_alignment; 2297 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2298 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2299 2300 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2301 aligned_bytes, pnum, &local_map, 2302 &local_file); 2303 if (ret < 0) { 2304 *pnum = 0; 2305 goto out; 2306 } 2307 2308 /* 2309 * The driver's result must be a non-zero multiple of request_alignment. 2310 * Clamp pnum and adjust map to original request. 2311 */ 2312 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2313 align > offset - aligned_offset); 2314 if (ret & BDRV_BLOCK_RECURSE) { 2315 assert(ret & BDRV_BLOCK_DATA); 2316 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2317 assert(!(ret & BDRV_BLOCK_ZERO)); 2318 } 2319 2320 *pnum -= offset - aligned_offset; 2321 if (*pnum > bytes) { 2322 *pnum = bytes; 2323 } 2324 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2325 local_map += offset - aligned_offset; 2326 } 2327 2328 if (ret & BDRV_BLOCK_RAW) { 2329 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2330 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2331 *pnum, pnum, &local_map, &local_file); 2332 goto out; 2333 } 2334 2335 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2336 ret |= BDRV_BLOCK_ALLOCATED; 2337 } else if (want_zero) { 2338 if (bdrv_unallocated_blocks_are_zero(bs)) { 2339 ret |= BDRV_BLOCK_ZERO; 2340 } else if (bs->backing) { 2341 BlockDriverState *bs2 = bs->backing->bs; 2342 int64_t size2 = bdrv_getlength(bs2); 2343 2344 if (size2 >= 0 && offset >= size2) { 2345 ret |= BDRV_BLOCK_ZERO; 2346 } 2347 } 2348 } 2349 2350 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2351 local_file && local_file != bs && 2352 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2353 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2354 int64_t file_pnum; 2355 int ret2; 2356 2357 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2358 *pnum, &file_pnum, NULL, NULL); 2359 if (ret2 >= 0) { 2360 /* Ignore errors. This is just providing extra information, it 2361 * is useful but not necessary. 2362 */ 2363 if (ret2 & BDRV_BLOCK_EOF && 2364 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2365 /* 2366 * It is valid for the format block driver to read 2367 * beyond the end of the underlying file's current 2368 * size; such areas read as zero. 2369 */ 2370 ret |= BDRV_BLOCK_ZERO; 2371 } else { 2372 /* Limit request to the range reported by the protocol driver */ 2373 *pnum = file_pnum; 2374 ret |= (ret2 & BDRV_BLOCK_ZERO); 2375 } 2376 } 2377 } 2378 2379 out: 2380 bdrv_dec_in_flight(bs); 2381 if (ret >= 0 && offset + *pnum == total_size) { 2382 ret |= BDRV_BLOCK_EOF; 2383 } 2384 early_out: 2385 if (file) { 2386 *file = local_file; 2387 } 2388 if (map) { 2389 *map = local_map; 2390 } 2391 return ret; 2392 } 2393 2394 static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2395 BlockDriverState *base, 2396 bool want_zero, 2397 int64_t offset, 2398 int64_t bytes, 2399 int64_t *pnum, 2400 int64_t *map, 2401 BlockDriverState **file) 2402 { 2403 BlockDriverState *p; 2404 int ret = 0; 2405 bool first = true; 2406 2407 assert(bs != base); 2408 for (p = bs; p != base; p = backing_bs(p)) { 2409 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2410 file); 2411 if (ret < 0) { 2412 break; 2413 } 2414 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) { 2415 /* 2416 * Reading beyond the end of the file continues to read 2417 * zeroes, but we can only widen the result to the 2418 * unallocated length we learned from an earlier 2419 * iteration. 2420 */ 2421 *pnum = bytes; 2422 } 2423 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) { 2424 break; 2425 } 2426 /* [offset, pnum] unallocated on this layer, which could be only 2427 * the first part of [offset, bytes]. */ 2428 bytes = MIN(bytes, *pnum); 2429 first = false; 2430 } 2431 return ret; 2432 } 2433 2434 /* Coroutine wrapper for bdrv_block_status_above() */ 2435 static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque) 2436 { 2437 BdrvCoBlockStatusData *data = opaque; 2438 2439 data->ret = bdrv_co_block_status_above(data->bs, data->base, 2440 data->want_zero, 2441 data->offset, data->bytes, 2442 data->pnum, data->map, data->file); 2443 data->done = true; 2444 aio_wait_kick(); 2445 } 2446 2447 /* 2448 * Synchronous wrapper around bdrv_co_block_status_above(). 2449 * 2450 * See bdrv_co_block_status_above() for details. 2451 */ 2452 static int bdrv_common_block_status_above(BlockDriverState *bs, 2453 BlockDriverState *base, 2454 bool want_zero, int64_t offset, 2455 int64_t bytes, int64_t *pnum, 2456 int64_t *map, 2457 BlockDriverState **file) 2458 { 2459 Coroutine *co; 2460 BdrvCoBlockStatusData data = { 2461 .bs = bs, 2462 .base = base, 2463 .want_zero = want_zero, 2464 .offset = offset, 2465 .bytes = bytes, 2466 .pnum = pnum, 2467 .map = map, 2468 .file = file, 2469 .done = false, 2470 }; 2471 2472 if (qemu_in_coroutine()) { 2473 /* Fast-path if already in coroutine context */ 2474 bdrv_block_status_above_co_entry(&data); 2475 } else { 2476 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data); 2477 bdrv_coroutine_enter(bs, co); 2478 BDRV_POLL_WHILE(bs, !data.done); 2479 } 2480 return data.ret; 2481 } 2482 2483 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2484 int64_t offset, int64_t bytes, int64_t *pnum, 2485 int64_t *map, BlockDriverState **file) 2486 { 2487 return bdrv_common_block_status_above(bs, base, true, offset, bytes, 2488 pnum, map, file); 2489 } 2490 2491 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2492 int64_t *pnum, int64_t *map, BlockDriverState **file) 2493 { 2494 return bdrv_block_status_above(bs, backing_bs(bs), 2495 offset, bytes, pnum, map, file); 2496 } 2497 2498 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, 2499 int64_t bytes, int64_t *pnum) 2500 { 2501 int ret; 2502 int64_t dummy; 2503 2504 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset, 2505 bytes, pnum ? pnum : &dummy, NULL, 2506 NULL); 2507 if (ret < 0) { 2508 return ret; 2509 } 2510 return !!(ret & BDRV_BLOCK_ALLOCATED); 2511 } 2512 2513 /* 2514 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2515 * 2516 * Return 1 if (a prefix of) the given range is allocated in any image 2517 * between BASE and TOP (BASE is only included if include_base is set). 2518 * BASE can be NULL to check if the given offset is allocated in any 2519 * image of the chain. Return 0 otherwise, or negative errno on 2520 * failure. 2521 * 2522 * 'pnum' is set to the number of bytes (including and immediately 2523 * following the specified offset) that are known to be in the same 2524 * allocated/unallocated state. Note that a subsequent call starting 2525 * at 'offset + *pnum' may return the same allocation status (in other 2526 * words, the result is not necessarily the maximum possible range); 2527 * but 'pnum' will only be 0 when end of file is reached. 2528 * 2529 */ 2530 int bdrv_is_allocated_above(BlockDriverState *top, 2531 BlockDriverState *base, 2532 bool include_base, int64_t offset, 2533 int64_t bytes, int64_t *pnum) 2534 { 2535 BlockDriverState *intermediate; 2536 int ret; 2537 int64_t n = bytes; 2538 2539 assert(base || !include_base); 2540 2541 intermediate = top; 2542 while (include_base || intermediate != base) { 2543 int64_t pnum_inter; 2544 int64_t size_inter; 2545 2546 assert(intermediate); 2547 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter); 2548 if (ret < 0) { 2549 return ret; 2550 } 2551 if (ret) { 2552 *pnum = pnum_inter; 2553 return 1; 2554 } 2555 2556 size_inter = bdrv_getlength(intermediate); 2557 if (size_inter < 0) { 2558 return size_inter; 2559 } 2560 if (n > pnum_inter && 2561 (intermediate == top || offset + pnum_inter < size_inter)) { 2562 n = pnum_inter; 2563 } 2564 2565 if (intermediate == base) { 2566 break; 2567 } 2568 2569 intermediate = backing_bs(intermediate); 2570 } 2571 2572 *pnum = n; 2573 return 0; 2574 } 2575 2576 typedef struct BdrvVmstateCo { 2577 BlockDriverState *bs; 2578 QEMUIOVector *qiov; 2579 int64_t pos; 2580 bool is_read; 2581 int ret; 2582 } BdrvVmstateCo; 2583 2584 static int coroutine_fn 2585 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 2586 bool is_read) 2587 { 2588 BlockDriver *drv = bs->drv; 2589 int ret = -ENOTSUP; 2590 2591 bdrv_inc_in_flight(bs); 2592 2593 if (!drv) { 2594 ret = -ENOMEDIUM; 2595 } else if (drv->bdrv_load_vmstate) { 2596 if (is_read) { 2597 ret = drv->bdrv_load_vmstate(bs, qiov, pos); 2598 } else { 2599 ret = drv->bdrv_save_vmstate(bs, qiov, pos); 2600 } 2601 } else if (bs->file) { 2602 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read); 2603 } 2604 2605 bdrv_dec_in_flight(bs); 2606 return ret; 2607 } 2608 2609 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque) 2610 { 2611 BdrvVmstateCo *co = opaque; 2612 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read); 2613 aio_wait_kick(); 2614 } 2615 2616 static inline int 2617 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 2618 bool is_read) 2619 { 2620 if (qemu_in_coroutine()) { 2621 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read); 2622 } else { 2623 BdrvVmstateCo data = { 2624 .bs = bs, 2625 .qiov = qiov, 2626 .pos = pos, 2627 .is_read = is_read, 2628 .ret = -EINPROGRESS, 2629 }; 2630 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data); 2631 2632 bdrv_coroutine_enter(bs, co); 2633 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS); 2634 return data.ret; 2635 } 2636 } 2637 2638 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2639 int64_t pos, int size) 2640 { 2641 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2642 int ret; 2643 2644 ret = bdrv_writev_vmstate(bs, &qiov, pos); 2645 if (ret < 0) { 2646 return ret; 2647 } 2648 2649 return size; 2650 } 2651 2652 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2653 { 2654 return bdrv_rw_vmstate(bs, qiov, pos, false); 2655 } 2656 2657 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2658 int64_t pos, int size) 2659 { 2660 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2661 int ret; 2662 2663 ret = bdrv_readv_vmstate(bs, &qiov, pos); 2664 if (ret < 0) { 2665 return ret; 2666 } 2667 2668 return size; 2669 } 2670 2671 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2672 { 2673 return bdrv_rw_vmstate(bs, qiov, pos, true); 2674 } 2675 2676 /**************************************************************/ 2677 /* async I/Os */ 2678 2679 void bdrv_aio_cancel(BlockAIOCB *acb) 2680 { 2681 qemu_aio_ref(acb); 2682 bdrv_aio_cancel_async(acb); 2683 while (acb->refcnt > 1) { 2684 if (acb->aiocb_info->get_aio_context) { 2685 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2686 } else if (acb->bs) { 2687 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2688 * assert that we're not using an I/O thread. Thread-safe 2689 * code should use bdrv_aio_cancel_async exclusively. 2690 */ 2691 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2692 aio_poll(bdrv_get_aio_context(acb->bs), true); 2693 } else { 2694 abort(); 2695 } 2696 } 2697 qemu_aio_unref(acb); 2698 } 2699 2700 /* Async version of aio cancel. The caller is not blocked if the acb implements 2701 * cancel_async, otherwise we do nothing and let the request normally complete. 2702 * In either case the completion callback must be called. */ 2703 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2704 { 2705 if (acb->aiocb_info->cancel_async) { 2706 acb->aiocb_info->cancel_async(acb); 2707 } 2708 } 2709 2710 /**************************************************************/ 2711 /* Coroutine block device emulation */ 2712 2713 typedef struct FlushCo { 2714 BlockDriverState *bs; 2715 int ret; 2716 } FlushCo; 2717 2718 2719 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 2720 { 2721 FlushCo *rwco = opaque; 2722 2723 rwco->ret = bdrv_co_flush(rwco->bs); 2724 aio_wait_kick(); 2725 } 2726 2727 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2728 { 2729 int current_gen; 2730 int ret = 0; 2731 2732 bdrv_inc_in_flight(bs); 2733 2734 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 2735 bdrv_is_sg(bs)) { 2736 goto early_exit; 2737 } 2738 2739 qemu_co_mutex_lock(&bs->reqs_lock); 2740 current_gen = atomic_read(&bs->write_gen); 2741 2742 /* Wait until any previous flushes are completed */ 2743 while (bs->active_flush_req) { 2744 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2745 } 2746 2747 /* Flushes reach this point in nondecreasing current_gen order. */ 2748 bs->active_flush_req = true; 2749 qemu_co_mutex_unlock(&bs->reqs_lock); 2750 2751 /* Write back all layers by calling one driver function */ 2752 if (bs->drv->bdrv_co_flush) { 2753 ret = bs->drv->bdrv_co_flush(bs); 2754 goto out; 2755 } 2756 2757 /* Write back cached data to the OS even with cache=unsafe */ 2758 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 2759 if (bs->drv->bdrv_co_flush_to_os) { 2760 ret = bs->drv->bdrv_co_flush_to_os(bs); 2761 if (ret < 0) { 2762 goto out; 2763 } 2764 } 2765 2766 /* But don't actually force it to the disk with cache=unsafe */ 2767 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2768 goto flush_parent; 2769 } 2770 2771 /* Check if we really need to flush anything */ 2772 if (bs->flushed_gen == current_gen) { 2773 goto flush_parent; 2774 } 2775 2776 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 2777 if (!bs->drv) { 2778 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2779 * (even in case of apparent success) */ 2780 ret = -ENOMEDIUM; 2781 goto out; 2782 } 2783 if (bs->drv->bdrv_co_flush_to_disk) { 2784 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2785 } else if (bs->drv->bdrv_aio_flush) { 2786 BlockAIOCB *acb; 2787 CoroutineIOCompletion co = { 2788 .coroutine = qemu_coroutine_self(), 2789 }; 2790 2791 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2792 if (acb == NULL) { 2793 ret = -EIO; 2794 } else { 2795 qemu_coroutine_yield(); 2796 ret = co.ret; 2797 } 2798 } else { 2799 /* 2800 * Some block drivers always operate in either writethrough or unsafe 2801 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2802 * know how the server works (because the behaviour is hardcoded or 2803 * depends on server-side configuration), so we can't ensure that 2804 * everything is safe on disk. Returning an error doesn't work because 2805 * that would break guests even if the server operates in writethrough 2806 * mode. 2807 * 2808 * Let's hope the user knows what he's doing. 2809 */ 2810 ret = 0; 2811 } 2812 2813 if (ret < 0) { 2814 goto out; 2815 } 2816 2817 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2818 * in the case of cache=unsafe, so there are no useless flushes. 2819 */ 2820 flush_parent: 2821 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2822 out: 2823 /* Notify any pending flushes that we have completed */ 2824 if (ret == 0) { 2825 bs->flushed_gen = current_gen; 2826 } 2827 2828 qemu_co_mutex_lock(&bs->reqs_lock); 2829 bs->active_flush_req = false; 2830 /* Return value is ignored - it's ok if wait queue is empty */ 2831 qemu_co_queue_next(&bs->flush_queue); 2832 qemu_co_mutex_unlock(&bs->reqs_lock); 2833 2834 early_exit: 2835 bdrv_dec_in_flight(bs); 2836 return ret; 2837 } 2838 2839 int bdrv_flush(BlockDriverState *bs) 2840 { 2841 Coroutine *co; 2842 FlushCo flush_co = { 2843 .bs = bs, 2844 .ret = NOT_DONE, 2845 }; 2846 2847 if (qemu_in_coroutine()) { 2848 /* Fast-path if already in coroutine context */ 2849 bdrv_flush_co_entry(&flush_co); 2850 } else { 2851 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co); 2852 bdrv_coroutine_enter(bs, co); 2853 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE); 2854 } 2855 2856 return flush_co.ret; 2857 } 2858 2859 typedef struct DiscardCo { 2860 BdrvChild *child; 2861 int64_t offset; 2862 int64_t bytes; 2863 int ret; 2864 } DiscardCo; 2865 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque) 2866 { 2867 DiscardCo *rwco = opaque; 2868 2869 rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes); 2870 aio_wait_kick(); 2871 } 2872 2873 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2874 int64_t bytes) 2875 { 2876 BdrvTrackedRequest req; 2877 int max_pdiscard, ret; 2878 int head, tail, align; 2879 BlockDriverState *bs = child->bs; 2880 2881 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { 2882 return -ENOMEDIUM; 2883 } 2884 2885 if (bdrv_has_readonly_bitmaps(bs)) { 2886 return -EPERM; 2887 } 2888 2889 if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) { 2890 return -EIO; 2891 } 2892 2893 /* Do nothing if disabled. */ 2894 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2895 return 0; 2896 } 2897 2898 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2899 return 0; 2900 } 2901 2902 /* Discard is advisory, but some devices track and coalesce 2903 * unaligned requests, so we must pass everything down rather than 2904 * round here. Still, most devices will just silently ignore 2905 * unaligned requests (by returning -ENOTSUP), so we must fragment 2906 * the request accordingly. */ 2907 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 2908 assert(align % bs->bl.request_alignment == 0); 2909 head = offset % align; 2910 tail = (offset + bytes) % align; 2911 2912 bdrv_inc_in_flight(bs); 2913 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 2914 2915 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 2916 if (ret < 0) { 2917 goto out; 2918 } 2919 2920 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), 2921 align); 2922 assert(max_pdiscard >= bs->bl.request_alignment); 2923 2924 while (bytes > 0) { 2925 int64_t num = bytes; 2926 2927 if (head) { 2928 /* Make small requests to get to alignment boundaries. */ 2929 num = MIN(bytes, align - head); 2930 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 2931 num %= bs->bl.request_alignment; 2932 } 2933 head = (head + num) % align; 2934 assert(num < max_pdiscard); 2935 } else if (tail) { 2936 if (num > align) { 2937 /* Shorten the request to the last aligned cluster. */ 2938 num -= tail; 2939 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 2940 tail > bs->bl.request_alignment) { 2941 tail %= bs->bl.request_alignment; 2942 num -= tail; 2943 } 2944 } 2945 /* limit request size */ 2946 if (num > max_pdiscard) { 2947 num = max_pdiscard; 2948 } 2949 2950 if (!bs->drv) { 2951 ret = -ENOMEDIUM; 2952 goto out; 2953 } 2954 if (bs->drv->bdrv_co_pdiscard) { 2955 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 2956 } else { 2957 BlockAIOCB *acb; 2958 CoroutineIOCompletion co = { 2959 .coroutine = qemu_coroutine_self(), 2960 }; 2961 2962 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 2963 bdrv_co_io_em_complete, &co); 2964 if (acb == NULL) { 2965 ret = -EIO; 2966 goto out; 2967 } else { 2968 qemu_coroutine_yield(); 2969 ret = co.ret; 2970 } 2971 } 2972 if (ret && ret != -ENOTSUP) { 2973 goto out; 2974 } 2975 2976 offset += num; 2977 bytes -= num; 2978 } 2979 ret = 0; 2980 out: 2981 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 2982 tracked_request_end(&req); 2983 bdrv_dec_in_flight(bs); 2984 return ret; 2985 } 2986 2987 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes) 2988 { 2989 Coroutine *co; 2990 DiscardCo rwco = { 2991 .child = child, 2992 .offset = offset, 2993 .bytes = bytes, 2994 .ret = NOT_DONE, 2995 }; 2996 2997 if (qemu_in_coroutine()) { 2998 /* Fast-path if already in coroutine context */ 2999 bdrv_pdiscard_co_entry(&rwco); 3000 } else { 3001 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco); 3002 bdrv_coroutine_enter(child->bs, co); 3003 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE); 3004 } 3005 3006 return rwco.ret; 3007 } 3008 3009 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3010 { 3011 BlockDriver *drv = bs->drv; 3012 CoroutineIOCompletion co = { 3013 .coroutine = qemu_coroutine_self(), 3014 }; 3015 BlockAIOCB *acb; 3016 3017 bdrv_inc_in_flight(bs); 3018 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3019 co.ret = -ENOTSUP; 3020 goto out; 3021 } 3022 3023 if (drv->bdrv_co_ioctl) { 3024 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3025 } else { 3026 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3027 if (!acb) { 3028 co.ret = -ENOTSUP; 3029 goto out; 3030 } 3031 qemu_coroutine_yield(); 3032 } 3033 out: 3034 bdrv_dec_in_flight(bs); 3035 return co.ret; 3036 } 3037 3038 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3039 { 3040 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3041 } 3042 3043 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3044 { 3045 return memset(qemu_blockalign(bs, size), 0, size); 3046 } 3047 3048 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3049 { 3050 size_t align = bdrv_opt_mem_align(bs); 3051 3052 /* Ensure that NULL is never returned on success */ 3053 assert(align > 0); 3054 if (size == 0) { 3055 size = align; 3056 } 3057 3058 return qemu_try_memalign(align, size); 3059 } 3060 3061 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3062 { 3063 void *mem = qemu_try_blockalign(bs, size); 3064 3065 if (mem) { 3066 memset(mem, 0, size); 3067 } 3068 3069 return mem; 3070 } 3071 3072 /* 3073 * Check if all memory in this vector is sector aligned. 3074 */ 3075 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 3076 { 3077 int i; 3078 size_t alignment = bdrv_min_mem_align(bs); 3079 3080 for (i = 0; i < qiov->niov; i++) { 3081 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 3082 return false; 3083 } 3084 if (qiov->iov[i].iov_len % alignment) { 3085 return false; 3086 } 3087 } 3088 3089 return true; 3090 } 3091 3092 void bdrv_add_before_write_notifier(BlockDriverState *bs, 3093 NotifierWithReturn *notifier) 3094 { 3095 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 3096 } 3097 3098 void bdrv_io_plug(BlockDriverState *bs) 3099 { 3100 BdrvChild *child; 3101 3102 QLIST_FOREACH(child, &bs->children, next) { 3103 bdrv_io_plug(child->bs); 3104 } 3105 3106 if (atomic_fetch_inc(&bs->io_plugged) == 0) { 3107 BlockDriver *drv = bs->drv; 3108 if (drv && drv->bdrv_io_plug) { 3109 drv->bdrv_io_plug(bs); 3110 } 3111 } 3112 } 3113 3114 void bdrv_io_unplug(BlockDriverState *bs) 3115 { 3116 BdrvChild *child; 3117 3118 assert(bs->io_plugged); 3119 if (atomic_fetch_dec(&bs->io_plugged) == 1) { 3120 BlockDriver *drv = bs->drv; 3121 if (drv && drv->bdrv_io_unplug) { 3122 drv->bdrv_io_unplug(bs); 3123 } 3124 } 3125 3126 QLIST_FOREACH(child, &bs->children, next) { 3127 bdrv_io_unplug(child->bs); 3128 } 3129 } 3130 3131 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) 3132 { 3133 BdrvChild *child; 3134 3135 if (bs->drv && bs->drv->bdrv_register_buf) { 3136 bs->drv->bdrv_register_buf(bs, host, size); 3137 } 3138 QLIST_FOREACH(child, &bs->children, next) { 3139 bdrv_register_buf(child->bs, host, size); 3140 } 3141 } 3142 3143 void bdrv_unregister_buf(BlockDriverState *bs, void *host) 3144 { 3145 BdrvChild *child; 3146 3147 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3148 bs->drv->bdrv_unregister_buf(bs, host); 3149 } 3150 QLIST_FOREACH(child, &bs->children, next) { 3151 bdrv_unregister_buf(child->bs, host); 3152 } 3153 } 3154 3155 static int coroutine_fn bdrv_co_copy_range_internal( 3156 BdrvChild *src, uint64_t src_offset, BdrvChild *dst, 3157 uint64_t dst_offset, uint64_t bytes, 3158 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3159 bool recurse_src) 3160 { 3161 BdrvTrackedRequest req; 3162 int ret; 3163 3164 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3165 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3166 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3167 3168 if (!dst || !dst->bs) { 3169 return -ENOMEDIUM; 3170 } 3171 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes); 3172 if (ret) { 3173 return ret; 3174 } 3175 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3176 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3177 } 3178 3179 if (!src || !src->bs) { 3180 return -ENOMEDIUM; 3181 } 3182 ret = bdrv_check_byte_request(src->bs, src_offset, bytes); 3183 if (ret) { 3184 return ret; 3185 } 3186 3187 if (!src->bs->drv->bdrv_co_copy_range_from 3188 || !dst->bs->drv->bdrv_co_copy_range_to 3189 || src->bs->encrypted || dst->bs->encrypted) { 3190 return -ENOTSUP; 3191 } 3192 3193 if (recurse_src) { 3194 bdrv_inc_in_flight(src->bs); 3195 tracked_request_begin(&req, src->bs, src_offset, bytes, 3196 BDRV_TRACKED_READ); 3197 3198 /* BDRV_REQ_SERIALISING is only for write operation */ 3199 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3200 if (!(read_flags & BDRV_REQ_NO_SERIALISING)) { 3201 wait_serialising_requests(&req); 3202 } 3203 3204 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3205 src, src_offset, 3206 dst, dst_offset, 3207 bytes, 3208 read_flags, write_flags); 3209 3210 tracked_request_end(&req); 3211 bdrv_dec_in_flight(src->bs); 3212 } else { 3213 bdrv_inc_in_flight(dst->bs); 3214 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3215 BDRV_TRACKED_WRITE); 3216 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3217 write_flags); 3218 if (!ret) { 3219 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3220 src, src_offset, 3221 dst, dst_offset, 3222 bytes, 3223 read_flags, write_flags); 3224 } 3225 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3226 tracked_request_end(&req); 3227 bdrv_dec_in_flight(dst->bs); 3228 } 3229 3230 return ret; 3231 } 3232 3233 /* Copy range from @src to @dst. 3234 * 3235 * See the comment of bdrv_co_copy_range for the parameter and return value 3236 * semantics. */ 3237 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset, 3238 BdrvChild *dst, uint64_t dst_offset, 3239 uint64_t bytes, 3240 BdrvRequestFlags read_flags, 3241 BdrvRequestFlags write_flags) 3242 { 3243 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3244 read_flags, write_flags); 3245 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3246 bytes, read_flags, write_flags, true); 3247 } 3248 3249 /* Copy range from @src to @dst. 3250 * 3251 * See the comment of bdrv_co_copy_range for the parameter and return value 3252 * semantics. */ 3253 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset, 3254 BdrvChild *dst, uint64_t dst_offset, 3255 uint64_t bytes, 3256 BdrvRequestFlags read_flags, 3257 BdrvRequestFlags write_flags) 3258 { 3259 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3260 read_flags, write_flags); 3261 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3262 bytes, read_flags, write_flags, false); 3263 } 3264 3265 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, 3266 BdrvChild *dst, uint64_t dst_offset, 3267 uint64_t bytes, BdrvRequestFlags read_flags, 3268 BdrvRequestFlags write_flags) 3269 { 3270 return bdrv_co_copy_range_from(src, src_offset, 3271 dst, dst_offset, 3272 bytes, read_flags, write_flags); 3273 } 3274 3275 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3276 { 3277 BdrvChild *c; 3278 QLIST_FOREACH(c, &bs->parents, next_parent) { 3279 if (c->role->resize) { 3280 c->role->resize(c); 3281 } 3282 } 3283 } 3284 3285 /** 3286 * Truncate file to 'offset' bytes (needed only for file protocols) 3287 */ 3288 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, 3289 PreallocMode prealloc, Error **errp) 3290 { 3291 BlockDriverState *bs = child->bs; 3292 BlockDriver *drv = bs->drv; 3293 BdrvTrackedRequest req; 3294 int64_t old_size, new_bytes; 3295 int ret; 3296 3297 3298 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3299 if (!drv) { 3300 error_setg(errp, "No medium inserted"); 3301 return -ENOMEDIUM; 3302 } 3303 if (offset < 0) { 3304 error_setg(errp, "Image size cannot be negative"); 3305 return -EINVAL; 3306 } 3307 3308 old_size = bdrv_getlength(bs); 3309 if (old_size < 0) { 3310 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3311 return old_size; 3312 } 3313 3314 if (offset > old_size) { 3315 new_bytes = offset - old_size; 3316 } else { 3317 new_bytes = 0; 3318 } 3319 3320 bdrv_inc_in_flight(bs); 3321 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3322 BDRV_TRACKED_TRUNCATE); 3323 3324 /* If we are growing the image and potentially using preallocation for the 3325 * new area, we need to make sure that no write requests are made to it 3326 * concurrently or they might be overwritten by preallocation. */ 3327 if (new_bytes) { 3328 mark_request_serialising(&req, 1); 3329 } 3330 if (bs->read_only) { 3331 error_setg(errp, "Image is read-only"); 3332 ret = -EACCES; 3333 goto out; 3334 } 3335 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3336 0); 3337 if (ret < 0) { 3338 error_setg_errno(errp, -ret, 3339 "Failed to prepare request for truncation"); 3340 goto out; 3341 } 3342 3343 if (!drv->bdrv_co_truncate) { 3344 if (bs->file && drv->is_filter) { 3345 ret = bdrv_co_truncate(bs->file, offset, prealloc, errp); 3346 goto out; 3347 } 3348 error_setg(errp, "Image format driver does not support resize"); 3349 ret = -ENOTSUP; 3350 goto out; 3351 } 3352 3353 ret = drv->bdrv_co_truncate(bs, offset, prealloc, errp); 3354 if (ret < 0) { 3355 goto out; 3356 } 3357 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3358 if (ret < 0) { 3359 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3360 } else { 3361 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3362 } 3363 /* It's possible that truncation succeeded but refresh_total_sectors 3364 * failed, but the latter doesn't affect how we should finish the request. 3365 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */ 3366 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3367 3368 out: 3369 tracked_request_end(&req); 3370 bdrv_dec_in_flight(bs); 3371 3372 return ret; 3373 } 3374 3375 typedef struct TruncateCo { 3376 BdrvChild *child; 3377 int64_t offset; 3378 PreallocMode prealloc; 3379 Error **errp; 3380 int ret; 3381 } TruncateCo; 3382 3383 static void coroutine_fn bdrv_truncate_co_entry(void *opaque) 3384 { 3385 TruncateCo *tco = opaque; 3386 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc, 3387 tco->errp); 3388 aio_wait_kick(); 3389 } 3390 3391 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc, 3392 Error **errp) 3393 { 3394 Coroutine *co; 3395 TruncateCo tco = { 3396 .child = child, 3397 .offset = offset, 3398 .prealloc = prealloc, 3399 .errp = errp, 3400 .ret = NOT_DONE, 3401 }; 3402 3403 if (qemu_in_coroutine()) { 3404 /* Fast-path if already in coroutine context */ 3405 bdrv_truncate_co_entry(&tco); 3406 } else { 3407 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco); 3408 bdrv_coroutine_enter(child->bs, co); 3409 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE); 3410 } 3411 3412 return tco.ret; 3413 } 3414