1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "block/dirty-bitmap.h" 34 #include "block/write-threshold.h" 35 #include "qemu/cutils.h" 36 #include "qemu/memalign.h" 37 #include "qapi/error.h" 38 #include "qemu/error-report.h" 39 #include "qemu/main-loop.h" 40 #include "sysemu/replay.h" 41 42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 44 45 static void bdrv_parent_cb_resize(BlockDriverState *bs); 46 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 47 int64_t offset, int64_t bytes, BdrvRequestFlags flags); 48 49 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) 50 { 51 BdrvChild *c, *next; 52 53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 54 if (c == ignore) { 55 continue; 56 } 57 bdrv_parent_drained_begin_single(c); 58 } 59 } 60 61 void bdrv_parent_drained_end_single(BdrvChild *c) 62 { 63 IO_OR_GS_CODE(); 64 65 assert(c->quiesced_parent); 66 c->quiesced_parent = false; 67 68 if (c->klass->drained_end) { 69 c->klass->drained_end(c); 70 } 71 } 72 73 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) 74 { 75 BdrvChild *c; 76 77 QLIST_FOREACH(c, &bs->parents, next_parent) { 78 if (c == ignore) { 79 continue; 80 } 81 bdrv_parent_drained_end_single(c); 82 } 83 } 84 85 bool bdrv_parent_drained_poll_single(BdrvChild *c) 86 { 87 if (c->klass->drained_poll) { 88 return c->klass->drained_poll(c); 89 } 90 return false; 91 } 92 93 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 94 bool ignore_bds_parents) 95 { 96 BdrvChild *c, *next; 97 bool busy = false; 98 99 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 100 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 101 continue; 102 } 103 busy |= bdrv_parent_drained_poll_single(c); 104 } 105 106 return busy; 107 } 108 109 void bdrv_parent_drained_begin_single(BdrvChild *c) 110 { 111 IO_OR_GS_CODE(); 112 113 assert(!c->quiesced_parent); 114 c->quiesced_parent = true; 115 116 if (c->klass->drained_begin) { 117 c->klass->drained_begin(c); 118 } 119 } 120 121 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 122 { 123 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment, 124 src->pdiscard_alignment); 125 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 126 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 127 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer, 128 src->max_hw_transfer); 129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 130 src->opt_mem_alignment); 131 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 132 src->min_mem_alignment); 133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 134 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); 135 } 136 137 typedef struct BdrvRefreshLimitsState { 138 BlockDriverState *bs; 139 BlockLimits old_bl; 140 } BdrvRefreshLimitsState; 141 142 static void bdrv_refresh_limits_abort(void *opaque) 143 { 144 BdrvRefreshLimitsState *s = opaque; 145 146 s->bs->bl = s->old_bl; 147 } 148 149 static TransactionActionDrv bdrv_refresh_limits_drv = { 150 .abort = bdrv_refresh_limits_abort, 151 .clean = g_free, 152 }; 153 154 /* @tran is allowed to be NULL, in this case no rollback is possible. */ 155 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) 156 { 157 ERRP_GUARD(); 158 BlockDriver *drv = bs->drv; 159 BdrvChild *c; 160 bool have_limits; 161 162 GLOBAL_STATE_CODE(); 163 assume_graph_lock(); /* FIXME */ 164 165 if (tran) { 166 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); 167 *s = (BdrvRefreshLimitsState) { 168 .bs = bs, 169 .old_bl = bs->bl, 170 }; 171 tran_add(tran, &bdrv_refresh_limits_drv, s); 172 } 173 174 memset(&bs->bl, 0, sizeof(bs->bl)); 175 176 if (!drv) { 177 return; 178 } 179 180 /* Default alignment based on whether driver has byte interface */ 181 bs->bl.request_alignment = (drv->bdrv_co_preadv || 182 drv->bdrv_aio_preadv || 183 drv->bdrv_co_preadv_part) ? 1 : 512; 184 185 /* Take some limits from the children as a default */ 186 have_limits = false; 187 QLIST_FOREACH(c, &bs->children, next) { 188 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 189 { 190 bdrv_merge_limits(&bs->bl, &c->bs->bl); 191 have_limits = true; 192 } 193 } 194 195 if (!have_limits) { 196 bs->bl.min_mem_alignment = 512; 197 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); 198 199 /* Safe default since most protocols use readv()/writev()/etc */ 200 bs->bl.max_iov = IOV_MAX; 201 } 202 203 /* Then let the driver override it */ 204 if (drv->bdrv_refresh_limits) { 205 drv->bdrv_refresh_limits(bs, errp); 206 if (*errp) { 207 return; 208 } 209 } 210 211 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 212 error_setg(errp, "Driver requires too large request alignment"); 213 } 214 } 215 216 /** 217 * The copy-on-read flag is actually a reference count so multiple users may 218 * use the feature without worrying about clobbering its previous state. 219 * Copy-on-read stays enabled until all users have called to disable it. 220 */ 221 void bdrv_enable_copy_on_read(BlockDriverState *bs) 222 { 223 IO_CODE(); 224 qatomic_inc(&bs->copy_on_read); 225 } 226 227 void bdrv_disable_copy_on_read(BlockDriverState *bs) 228 { 229 int old = qatomic_fetch_dec(&bs->copy_on_read); 230 IO_CODE(); 231 assert(old >= 1); 232 } 233 234 typedef struct { 235 Coroutine *co; 236 BlockDriverState *bs; 237 bool done; 238 bool begin; 239 bool poll; 240 BdrvChild *parent; 241 } BdrvCoDrainData; 242 243 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 244 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, 245 bool ignore_bds_parents) 246 { 247 IO_OR_GS_CODE(); 248 249 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 250 return true; 251 } 252 253 if (qatomic_read(&bs->in_flight)) { 254 return true; 255 } 256 257 return false; 258 } 259 260 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, 261 BdrvChild *ignore_parent) 262 { 263 return bdrv_drain_poll(bs, ignore_parent, false); 264 } 265 266 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 267 bool poll); 268 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent); 269 270 static void bdrv_co_drain_bh_cb(void *opaque) 271 { 272 BdrvCoDrainData *data = opaque; 273 Coroutine *co = data->co; 274 BlockDriverState *bs = data->bs; 275 276 if (bs) { 277 AioContext *ctx = bdrv_get_aio_context(bs); 278 aio_context_acquire(ctx); 279 bdrv_dec_in_flight(bs); 280 if (data->begin) { 281 bdrv_do_drained_begin(bs, data->parent, data->poll); 282 } else { 283 assert(!data->poll); 284 bdrv_do_drained_end(bs, data->parent); 285 } 286 aio_context_release(ctx); 287 } else { 288 assert(data->begin); 289 bdrv_drain_all_begin(); 290 } 291 292 data->done = true; 293 aio_co_wake(co); 294 } 295 296 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 297 bool begin, 298 BdrvChild *parent, 299 bool poll) 300 { 301 BdrvCoDrainData data; 302 Coroutine *self = qemu_coroutine_self(); 303 AioContext *ctx = bdrv_get_aio_context(bs); 304 AioContext *co_ctx = qemu_coroutine_get_aio_context(self); 305 306 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 307 * other coroutines run if they were queued by aio_co_enter(). */ 308 309 assert(qemu_in_coroutine()); 310 data = (BdrvCoDrainData) { 311 .co = self, 312 .bs = bs, 313 .done = false, 314 .begin = begin, 315 .parent = parent, 316 .poll = poll, 317 }; 318 319 if (bs) { 320 bdrv_inc_in_flight(bs); 321 } 322 323 /* 324 * Temporarily drop the lock across yield or we would get deadlocks. 325 * bdrv_co_drain_bh_cb() reaquires the lock as needed. 326 * 327 * When we yield below, the lock for the current context will be 328 * released, so if this is actually the lock that protects bs, don't drop 329 * it a second time. 330 */ 331 if (ctx != co_ctx) { 332 aio_context_release(ctx); 333 } 334 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); 335 336 qemu_coroutine_yield(); 337 /* If we are resumed from some other event (such as an aio completion or a 338 * timer callback), it is a bug in the caller that should be fixed. */ 339 assert(data.done); 340 341 /* Reaquire the AioContext of bs if we dropped it */ 342 if (ctx != co_ctx) { 343 aio_context_acquire(ctx); 344 } 345 } 346 347 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 348 bool poll) 349 { 350 IO_OR_GS_CODE(); 351 352 if (qemu_in_coroutine()) { 353 bdrv_co_yield_to_drain(bs, true, parent, poll); 354 return; 355 } 356 357 /* Stop things in parent-to-child order */ 358 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 359 aio_disable_external(bdrv_get_aio_context(bs)); 360 bdrv_parent_drained_begin(bs, parent); 361 if (bs->drv && bs->drv->bdrv_drain_begin) { 362 bs->drv->bdrv_drain_begin(bs); 363 } 364 } 365 366 /* 367 * Wait for drained requests to finish. 368 * 369 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 370 * call is needed so things in this AioContext can make progress even 371 * though we don't return to the main AioContext loop - this automatically 372 * includes other nodes in the same AioContext and therefore all child 373 * nodes. 374 */ 375 if (poll) { 376 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); 377 } 378 } 379 380 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent) 381 { 382 bdrv_do_drained_begin(bs, parent, false); 383 } 384 385 void bdrv_drained_begin(BlockDriverState *bs) 386 { 387 IO_OR_GS_CODE(); 388 bdrv_do_drained_begin(bs, NULL, true); 389 } 390 391 /** 392 * This function does not poll, nor must any of its recursively called 393 * functions. 394 */ 395 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) 396 { 397 int old_quiesce_counter; 398 399 if (qemu_in_coroutine()) { 400 bdrv_co_yield_to_drain(bs, false, parent, false); 401 return; 402 } 403 assert(bs->quiesce_counter > 0); 404 405 /* Re-enable things in child-to-parent order */ 406 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 407 if (old_quiesce_counter == 1) { 408 if (bs->drv && bs->drv->bdrv_drain_end) { 409 bs->drv->bdrv_drain_end(bs); 410 } 411 bdrv_parent_drained_end(bs, parent); 412 aio_enable_external(bdrv_get_aio_context(bs)); 413 } 414 } 415 416 void bdrv_drained_end(BlockDriverState *bs) 417 { 418 IO_OR_GS_CODE(); 419 bdrv_do_drained_end(bs, NULL); 420 } 421 422 void bdrv_drain(BlockDriverState *bs) 423 { 424 IO_OR_GS_CODE(); 425 bdrv_drained_begin(bs); 426 bdrv_drained_end(bs); 427 } 428 429 static void bdrv_drain_assert_idle(BlockDriverState *bs) 430 { 431 BdrvChild *child, *next; 432 433 assert(qatomic_read(&bs->in_flight) == 0); 434 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 435 bdrv_drain_assert_idle(child->bs); 436 } 437 } 438 439 unsigned int bdrv_drain_all_count = 0; 440 441 static bool bdrv_drain_all_poll(void) 442 { 443 BlockDriverState *bs = NULL; 444 bool result = false; 445 GLOBAL_STATE_CODE(); 446 447 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 448 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 449 while ((bs = bdrv_next_all_states(bs))) { 450 AioContext *aio_context = bdrv_get_aio_context(bs); 451 aio_context_acquire(aio_context); 452 result |= bdrv_drain_poll(bs, NULL, true); 453 aio_context_release(aio_context); 454 } 455 456 return result; 457 } 458 459 /* 460 * Wait for pending requests to complete across all BlockDriverStates 461 * 462 * This function does not flush data to disk, use bdrv_flush_all() for that 463 * after calling this function. 464 * 465 * This pauses all block jobs and disables external clients. It must 466 * be paired with bdrv_drain_all_end(). 467 * 468 * NOTE: no new block jobs or BlockDriverStates can be created between 469 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 470 */ 471 void bdrv_drain_all_begin_nopoll(void) 472 { 473 BlockDriverState *bs = NULL; 474 GLOBAL_STATE_CODE(); 475 476 /* 477 * bdrv queue is managed by record/replay, 478 * waiting for finishing the I/O requests may 479 * be infinite 480 */ 481 if (replay_events_enabled()) { 482 return; 483 } 484 485 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 486 * loop AioContext, so make sure we're in the main context. */ 487 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 488 assert(bdrv_drain_all_count < INT_MAX); 489 bdrv_drain_all_count++; 490 491 /* Quiesce all nodes, without polling in-flight requests yet. The graph 492 * cannot change during this loop. */ 493 while ((bs = bdrv_next_all_states(bs))) { 494 AioContext *aio_context = bdrv_get_aio_context(bs); 495 496 aio_context_acquire(aio_context); 497 bdrv_do_drained_begin(bs, NULL, false); 498 aio_context_release(aio_context); 499 } 500 } 501 502 void bdrv_drain_all_begin(void) 503 { 504 BlockDriverState *bs = NULL; 505 506 if (qemu_in_coroutine()) { 507 bdrv_co_yield_to_drain(NULL, true, NULL, true); 508 return; 509 } 510 511 /* 512 * bdrv queue is managed by record/replay, 513 * waiting for finishing the I/O requests may 514 * be infinite 515 */ 516 if (replay_events_enabled()) { 517 return; 518 } 519 520 bdrv_drain_all_begin_nopoll(); 521 522 /* Now poll the in-flight requests */ 523 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 524 525 while ((bs = bdrv_next_all_states(bs))) { 526 bdrv_drain_assert_idle(bs); 527 } 528 } 529 530 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 531 { 532 GLOBAL_STATE_CODE(); 533 534 g_assert(bs->quiesce_counter > 0); 535 g_assert(!bs->refcnt); 536 537 while (bs->quiesce_counter) { 538 bdrv_do_drained_end(bs, NULL); 539 } 540 } 541 542 void bdrv_drain_all_end(void) 543 { 544 BlockDriverState *bs = NULL; 545 GLOBAL_STATE_CODE(); 546 547 /* 548 * bdrv queue is managed by record/replay, 549 * waiting for finishing the I/O requests may 550 * be endless 551 */ 552 if (replay_events_enabled()) { 553 return; 554 } 555 556 while ((bs = bdrv_next_all_states(bs))) { 557 AioContext *aio_context = bdrv_get_aio_context(bs); 558 559 aio_context_acquire(aio_context); 560 bdrv_do_drained_end(bs, NULL); 561 aio_context_release(aio_context); 562 } 563 564 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 565 assert(bdrv_drain_all_count > 0); 566 bdrv_drain_all_count--; 567 } 568 569 void bdrv_drain_all(void) 570 { 571 GLOBAL_STATE_CODE(); 572 bdrv_drain_all_begin(); 573 bdrv_drain_all_end(); 574 } 575 576 /** 577 * Remove an active request from the tracked requests list 578 * 579 * This function should be called when a tracked request is completing. 580 */ 581 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) 582 { 583 if (req->serialising) { 584 qatomic_dec(&req->bs->serialising_in_flight); 585 } 586 587 qemu_co_mutex_lock(&req->bs->reqs_lock); 588 QLIST_REMOVE(req, list); 589 qemu_co_queue_restart_all(&req->wait_queue); 590 qemu_co_mutex_unlock(&req->bs->reqs_lock); 591 } 592 593 /** 594 * Add an active request to the tracked requests list 595 */ 596 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, 597 BlockDriverState *bs, 598 int64_t offset, 599 int64_t bytes, 600 enum BdrvTrackedRequestType type) 601 { 602 bdrv_check_request(offset, bytes, &error_abort); 603 604 *req = (BdrvTrackedRequest){ 605 .bs = bs, 606 .offset = offset, 607 .bytes = bytes, 608 .type = type, 609 .co = qemu_coroutine_self(), 610 .serialising = false, 611 .overlap_offset = offset, 612 .overlap_bytes = bytes, 613 }; 614 615 qemu_co_queue_init(&req->wait_queue); 616 617 qemu_co_mutex_lock(&bs->reqs_lock); 618 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 619 qemu_co_mutex_unlock(&bs->reqs_lock); 620 } 621 622 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 623 int64_t offset, int64_t bytes) 624 { 625 bdrv_check_request(offset, bytes, &error_abort); 626 627 /* aaaa bbbb */ 628 if (offset >= req->overlap_offset + req->overlap_bytes) { 629 return false; 630 } 631 /* bbbb aaaa */ 632 if (req->overlap_offset >= offset + bytes) { 633 return false; 634 } 635 return true; 636 } 637 638 /* Called with self->bs->reqs_lock held */ 639 static coroutine_fn BdrvTrackedRequest * 640 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 641 { 642 BdrvTrackedRequest *req; 643 644 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 645 if (req == self || (!req->serialising && !self->serialising)) { 646 continue; 647 } 648 if (tracked_request_overlaps(req, self->overlap_offset, 649 self->overlap_bytes)) 650 { 651 /* 652 * Hitting this means there was a reentrant request, for 653 * example, a block driver issuing nested requests. This must 654 * never happen since it means deadlock. 655 */ 656 assert(qemu_coroutine_self() != req->co); 657 658 /* 659 * If the request is already (indirectly) waiting for us, or 660 * will wait for us as soon as it wakes up, then just go on 661 * (instead of producing a deadlock in the former case). 662 */ 663 if (!req->waiting_for) { 664 return req; 665 } 666 } 667 } 668 669 return NULL; 670 } 671 672 /* Called with self->bs->reqs_lock held */ 673 static void coroutine_fn 674 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 675 { 676 BdrvTrackedRequest *req; 677 678 while ((req = bdrv_find_conflicting_request(self))) { 679 self->waiting_for = req; 680 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 681 self->waiting_for = NULL; 682 } 683 } 684 685 /* Called with req->bs->reqs_lock held */ 686 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 687 uint64_t align) 688 { 689 int64_t overlap_offset = req->offset & ~(align - 1); 690 int64_t overlap_bytes = 691 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; 692 693 bdrv_check_request(req->offset, req->bytes, &error_abort); 694 695 if (!req->serialising) { 696 qatomic_inc(&req->bs->serialising_in_flight); 697 req->serialising = true; 698 } 699 700 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 701 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 702 } 703 704 /** 705 * Return the tracked request on @bs for the current coroutine, or 706 * NULL if there is none. 707 */ 708 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 709 { 710 BdrvTrackedRequest *req; 711 Coroutine *self = qemu_coroutine_self(); 712 IO_CODE(); 713 714 QLIST_FOREACH(req, &bs->tracked_requests, list) { 715 if (req->co == self) { 716 return req; 717 } 718 } 719 720 return NULL; 721 } 722 723 /** 724 * Round a region to cluster boundaries 725 */ 726 void coroutine_fn bdrv_round_to_clusters(BlockDriverState *bs, 727 int64_t offset, int64_t bytes, 728 int64_t *cluster_offset, 729 int64_t *cluster_bytes) 730 { 731 BlockDriverInfo bdi; 732 IO_CODE(); 733 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 734 *cluster_offset = offset; 735 *cluster_bytes = bytes; 736 } else { 737 int64_t c = bdi.cluster_size; 738 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 739 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 740 } 741 } 742 743 static coroutine_fn int bdrv_get_cluster_size(BlockDriverState *bs) 744 { 745 BlockDriverInfo bdi; 746 int ret; 747 748 ret = bdrv_co_get_info(bs, &bdi); 749 if (ret < 0 || bdi.cluster_size == 0) { 750 return bs->bl.request_alignment; 751 } else { 752 return bdi.cluster_size; 753 } 754 } 755 756 void bdrv_inc_in_flight(BlockDriverState *bs) 757 { 758 IO_CODE(); 759 qatomic_inc(&bs->in_flight); 760 } 761 762 void bdrv_wakeup(BlockDriverState *bs) 763 { 764 IO_CODE(); 765 aio_wait_kick(); 766 } 767 768 void bdrv_dec_in_flight(BlockDriverState *bs) 769 { 770 IO_CODE(); 771 qatomic_dec(&bs->in_flight); 772 bdrv_wakeup(bs); 773 } 774 775 static void coroutine_fn 776 bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 777 { 778 BlockDriverState *bs = self->bs; 779 780 if (!qatomic_read(&bs->serialising_in_flight)) { 781 return; 782 } 783 784 qemu_co_mutex_lock(&bs->reqs_lock); 785 bdrv_wait_serialising_requests_locked(self); 786 qemu_co_mutex_unlock(&bs->reqs_lock); 787 } 788 789 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 790 uint64_t align) 791 { 792 IO_CODE(); 793 794 qemu_co_mutex_lock(&req->bs->reqs_lock); 795 796 tracked_request_set_serialising(req, align); 797 bdrv_wait_serialising_requests_locked(req); 798 799 qemu_co_mutex_unlock(&req->bs->reqs_lock); 800 } 801 802 int bdrv_check_qiov_request(int64_t offset, int64_t bytes, 803 QEMUIOVector *qiov, size_t qiov_offset, 804 Error **errp) 805 { 806 /* 807 * Check generic offset/bytes correctness 808 */ 809 810 if (offset < 0) { 811 error_setg(errp, "offset is negative: %" PRIi64, offset); 812 return -EIO; 813 } 814 815 if (bytes < 0) { 816 error_setg(errp, "bytes is negative: %" PRIi64, bytes); 817 return -EIO; 818 } 819 820 if (bytes > BDRV_MAX_LENGTH) { 821 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 822 bytes, BDRV_MAX_LENGTH); 823 return -EIO; 824 } 825 826 if (offset > BDRV_MAX_LENGTH) { 827 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 828 offset, BDRV_MAX_LENGTH); 829 return -EIO; 830 } 831 832 if (offset > BDRV_MAX_LENGTH - bytes) { 833 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") " 834 "exceeds maximum(%" PRIi64 ")", offset, bytes, 835 BDRV_MAX_LENGTH); 836 return -EIO; 837 } 838 839 if (!qiov) { 840 return 0; 841 } 842 843 /* 844 * Check qiov and qiov_offset 845 */ 846 847 if (qiov_offset > qiov->size) { 848 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)", 849 qiov_offset, qiov->size); 850 return -EIO; 851 } 852 853 if (bytes > qiov->size - qiov_offset) { 854 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io " 855 "vector size(%zu)", bytes, qiov_offset, qiov->size); 856 return -EIO; 857 } 858 859 return 0; 860 } 861 862 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp) 863 { 864 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp); 865 } 866 867 static int bdrv_check_request32(int64_t offset, int64_t bytes, 868 QEMUIOVector *qiov, size_t qiov_offset) 869 { 870 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 871 if (ret < 0) { 872 return ret; 873 } 874 875 if (bytes > BDRV_REQUEST_MAX_BYTES) { 876 return -EIO; 877 } 878 879 return 0; 880 } 881 882 /* 883 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 884 * The operation is sped up by checking the block status and only writing 885 * zeroes to the device if they currently do not return zeroes. Optional 886 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 887 * BDRV_REQ_FUA). 888 * 889 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 890 */ 891 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 892 { 893 int ret; 894 int64_t target_size, bytes, offset = 0; 895 BlockDriverState *bs = child->bs; 896 IO_CODE(); 897 898 target_size = bdrv_getlength(bs); 899 if (target_size < 0) { 900 return target_size; 901 } 902 903 for (;;) { 904 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 905 if (bytes <= 0) { 906 return 0; 907 } 908 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 909 if (ret < 0) { 910 return ret; 911 } 912 if (ret & BDRV_BLOCK_ZERO) { 913 offset += bytes; 914 continue; 915 } 916 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 917 if (ret < 0) { 918 return ret; 919 } 920 offset += bytes; 921 } 922 } 923 924 /* 925 * Writes to the file and ensures that no writes are reordered across this 926 * request (acts as a barrier) 927 * 928 * Returns 0 on success, -errno in error cases. 929 */ 930 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, 931 int64_t bytes, const void *buf, 932 BdrvRequestFlags flags) 933 { 934 int ret; 935 IO_CODE(); 936 assert_bdrv_graph_readable(); 937 938 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags); 939 if (ret < 0) { 940 return ret; 941 } 942 943 ret = bdrv_co_flush(child->bs); 944 if (ret < 0) { 945 return ret; 946 } 947 948 return 0; 949 } 950 951 typedef struct CoroutineIOCompletion { 952 Coroutine *coroutine; 953 int ret; 954 } CoroutineIOCompletion; 955 956 static void bdrv_co_io_em_complete(void *opaque, int ret) 957 { 958 CoroutineIOCompletion *co = opaque; 959 960 co->ret = ret; 961 aio_co_wake(co->coroutine); 962 } 963 964 static int coroutine_fn GRAPH_RDLOCK 965 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 966 QEMUIOVector *qiov, size_t qiov_offset, int flags) 967 { 968 BlockDriver *drv = bs->drv; 969 int64_t sector_num; 970 unsigned int nb_sectors; 971 QEMUIOVector local_qiov; 972 int ret; 973 assert_bdrv_graph_readable(); 974 975 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 976 assert(!(flags & ~bs->supported_read_flags)); 977 978 if (!drv) { 979 return -ENOMEDIUM; 980 } 981 982 if (drv->bdrv_co_preadv_part) { 983 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 984 flags); 985 } 986 987 if (qiov_offset > 0 || bytes != qiov->size) { 988 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 989 qiov = &local_qiov; 990 } 991 992 if (drv->bdrv_co_preadv) { 993 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 994 goto out; 995 } 996 997 if (drv->bdrv_aio_preadv) { 998 BlockAIOCB *acb; 999 CoroutineIOCompletion co = { 1000 .coroutine = qemu_coroutine_self(), 1001 }; 1002 1003 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1004 bdrv_co_io_em_complete, &co); 1005 if (acb == NULL) { 1006 ret = -EIO; 1007 goto out; 1008 } else { 1009 qemu_coroutine_yield(); 1010 ret = co.ret; 1011 goto out; 1012 } 1013 } 1014 1015 sector_num = offset >> BDRV_SECTOR_BITS; 1016 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1017 1018 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1019 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1020 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1021 assert(drv->bdrv_co_readv); 1022 1023 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1024 1025 out: 1026 if (qiov == &local_qiov) { 1027 qemu_iovec_destroy(&local_qiov); 1028 } 1029 1030 return ret; 1031 } 1032 1033 static int coroutine_fn GRAPH_RDLOCK 1034 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, 1035 QEMUIOVector *qiov, size_t qiov_offset, 1036 BdrvRequestFlags flags) 1037 { 1038 BlockDriver *drv = bs->drv; 1039 bool emulate_fua = false; 1040 int64_t sector_num; 1041 unsigned int nb_sectors; 1042 QEMUIOVector local_qiov; 1043 int ret; 1044 assert_bdrv_graph_readable(); 1045 1046 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1047 1048 if (!drv) { 1049 return -ENOMEDIUM; 1050 } 1051 1052 if ((flags & BDRV_REQ_FUA) && 1053 (~bs->supported_write_flags & BDRV_REQ_FUA)) { 1054 flags &= ~BDRV_REQ_FUA; 1055 emulate_fua = true; 1056 } 1057 1058 flags &= bs->supported_write_flags; 1059 1060 if (drv->bdrv_co_pwritev_part) { 1061 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1062 flags); 1063 goto emulate_flags; 1064 } 1065 1066 if (qiov_offset > 0 || bytes != qiov->size) { 1067 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1068 qiov = &local_qiov; 1069 } 1070 1071 if (drv->bdrv_co_pwritev) { 1072 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); 1073 goto emulate_flags; 1074 } 1075 1076 if (drv->bdrv_aio_pwritev) { 1077 BlockAIOCB *acb; 1078 CoroutineIOCompletion co = { 1079 .coroutine = qemu_coroutine_self(), 1080 }; 1081 1082 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, 1083 bdrv_co_io_em_complete, &co); 1084 if (acb == NULL) { 1085 ret = -EIO; 1086 } else { 1087 qemu_coroutine_yield(); 1088 ret = co.ret; 1089 } 1090 goto emulate_flags; 1091 } 1092 1093 sector_num = offset >> BDRV_SECTOR_BITS; 1094 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1095 1096 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1097 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1098 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1099 1100 assert(drv->bdrv_co_writev); 1101 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); 1102 1103 emulate_flags: 1104 if (ret == 0 && emulate_fua) { 1105 ret = bdrv_co_flush(bs); 1106 } 1107 1108 if (qiov == &local_qiov) { 1109 qemu_iovec_destroy(&local_qiov); 1110 } 1111 1112 return ret; 1113 } 1114 1115 static int coroutine_fn GRAPH_RDLOCK 1116 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, 1117 int64_t bytes, QEMUIOVector *qiov, 1118 size_t qiov_offset) 1119 { 1120 BlockDriver *drv = bs->drv; 1121 QEMUIOVector local_qiov; 1122 int ret; 1123 assert_bdrv_graph_readable(); 1124 1125 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1126 1127 if (!drv) { 1128 return -ENOMEDIUM; 1129 } 1130 1131 if (!block_driver_can_compress(drv)) { 1132 return -ENOTSUP; 1133 } 1134 1135 if (drv->bdrv_co_pwritev_compressed_part) { 1136 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1137 qiov, qiov_offset); 1138 } 1139 1140 if (qiov_offset == 0) { 1141 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1142 } 1143 1144 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1145 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1146 qemu_iovec_destroy(&local_qiov); 1147 1148 return ret; 1149 } 1150 1151 static int coroutine_fn GRAPH_RDLOCK 1152 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, 1153 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1154 { 1155 BlockDriverState *bs = child->bs; 1156 1157 /* Perform I/O through a temporary buffer so that users who scribble over 1158 * their read buffer while the operation is in progress do not end up 1159 * modifying the image file. This is critical for zero-copy guest I/O 1160 * where anything might happen inside guest memory. 1161 */ 1162 void *bounce_buffer = NULL; 1163 1164 BlockDriver *drv = bs->drv; 1165 int64_t cluster_offset; 1166 int64_t cluster_bytes; 1167 int64_t skip_bytes; 1168 int ret; 1169 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1170 BDRV_REQUEST_MAX_BYTES); 1171 int64_t progress = 0; 1172 bool skip_write; 1173 1174 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1175 1176 if (!drv) { 1177 return -ENOMEDIUM; 1178 } 1179 1180 /* 1181 * Do not write anything when the BDS is inactive. That is not 1182 * allowed, and it would not help. 1183 */ 1184 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1185 1186 /* FIXME We cannot require callers to have write permissions when all they 1187 * are doing is a read request. If we did things right, write permissions 1188 * would be obtained anyway, but internally by the copy-on-read code. As 1189 * long as it is implemented here rather than in a separate filter driver, 1190 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1191 * it could request permissions. Therefore we have to bypass the permission 1192 * system for the moment. */ 1193 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1194 1195 /* Cover entire cluster so no additional backing file I/O is required when 1196 * allocating cluster in the image file. Note that this value may exceed 1197 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1198 * is one reason we loop rather than doing it all at once. 1199 */ 1200 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1201 skip_bytes = offset - cluster_offset; 1202 1203 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1204 cluster_offset, cluster_bytes); 1205 1206 while (cluster_bytes) { 1207 int64_t pnum; 1208 1209 if (skip_write) { 1210 ret = 1; /* "already allocated", so nothing will be copied */ 1211 pnum = MIN(cluster_bytes, max_transfer); 1212 } else { 1213 ret = bdrv_is_allocated(bs, cluster_offset, 1214 MIN(cluster_bytes, max_transfer), &pnum); 1215 if (ret < 0) { 1216 /* 1217 * Safe to treat errors in querying allocation as if 1218 * unallocated; we'll probably fail again soon on the 1219 * read, but at least that will set a decent errno. 1220 */ 1221 pnum = MIN(cluster_bytes, max_transfer); 1222 } 1223 1224 /* Stop at EOF if the image ends in the middle of the cluster */ 1225 if (ret == 0 && pnum == 0) { 1226 assert(progress >= bytes); 1227 break; 1228 } 1229 1230 assert(skip_bytes < pnum); 1231 } 1232 1233 if (ret <= 0) { 1234 QEMUIOVector local_qiov; 1235 1236 /* Must copy-on-read; use the bounce buffer */ 1237 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1238 if (!bounce_buffer) { 1239 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1240 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1241 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1242 1243 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1244 if (!bounce_buffer) { 1245 ret = -ENOMEM; 1246 goto err; 1247 } 1248 } 1249 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1250 1251 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1252 &local_qiov, 0, 0); 1253 if (ret < 0) { 1254 goto err; 1255 } 1256 1257 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE); 1258 if (drv->bdrv_co_pwrite_zeroes && 1259 buffer_is_zero(bounce_buffer, pnum)) { 1260 /* FIXME: Should we (perhaps conditionally) be setting 1261 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1262 * that still correctly reads as zero? */ 1263 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1264 BDRV_REQ_WRITE_UNCHANGED); 1265 } else { 1266 /* This does not change the data on the disk, it is not 1267 * necessary to flush even in cache=writethrough mode. 1268 */ 1269 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1270 &local_qiov, 0, 1271 BDRV_REQ_WRITE_UNCHANGED); 1272 } 1273 1274 if (ret < 0) { 1275 /* It might be okay to ignore write errors for guest 1276 * requests. If this is a deliberate copy-on-read 1277 * then we don't want to ignore the error. Simply 1278 * report it in all cases. 1279 */ 1280 goto err; 1281 } 1282 1283 if (!(flags & BDRV_REQ_PREFETCH)) { 1284 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1285 bounce_buffer + skip_bytes, 1286 MIN(pnum - skip_bytes, bytes - progress)); 1287 } 1288 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1289 /* Read directly into the destination */ 1290 ret = bdrv_driver_preadv(bs, offset + progress, 1291 MIN(pnum - skip_bytes, bytes - progress), 1292 qiov, qiov_offset + progress, 0); 1293 if (ret < 0) { 1294 goto err; 1295 } 1296 } 1297 1298 cluster_offset += pnum; 1299 cluster_bytes -= pnum; 1300 progress += pnum - skip_bytes; 1301 skip_bytes = 0; 1302 } 1303 ret = 0; 1304 1305 err: 1306 qemu_vfree(bounce_buffer); 1307 return ret; 1308 } 1309 1310 /* 1311 * Forwards an already correctly aligned request to the BlockDriver. This 1312 * handles copy on read, zeroing after EOF, and fragmentation of large 1313 * reads; any other features must be implemented by the caller. 1314 */ 1315 static int coroutine_fn GRAPH_RDLOCK 1316 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req, 1317 int64_t offset, int64_t bytes, int64_t align, 1318 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1319 { 1320 BlockDriverState *bs = child->bs; 1321 int64_t total_bytes, max_bytes; 1322 int ret = 0; 1323 int64_t bytes_remaining = bytes; 1324 int max_transfer; 1325 1326 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1327 assert(is_power_of_2(align)); 1328 assert((offset & (align - 1)) == 0); 1329 assert((bytes & (align - 1)) == 0); 1330 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1331 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1332 align); 1333 1334 /* 1335 * TODO: We would need a per-BDS .supported_read_flags and 1336 * potential fallback support, if we ever implement any read flags 1337 * to pass through to drivers. For now, there aren't any 1338 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint. 1339 */ 1340 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH | 1341 BDRV_REQ_REGISTERED_BUF))); 1342 1343 /* Handle Copy on Read and associated serialisation */ 1344 if (flags & BDRV_REQ_COPY_ON_READ) { 1345 /* If we touch the same cluster it counts as an overlap. This 1346 * guarantees that allocating writes will be serialized and not race 1347 * with each other for the same cluster. For example, in copy-on-read 1348 * it ensures that the CoR read and write operations are atomic and 1349 * guest writes cannot interleave between them. */ 1350 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1351 } else { 1352 bdrv_wait_serialising_requests(req); 1353 } 1354 1355 if (flags & BDRV_REQ_COPY_ON_READ) { 1356 int64_t pnum; 1357 1358 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */ 1359 flags &= ~BDRV_REQ_COPY_ON_READ; 1360 1361 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1362 if (ret < 0) { 1363 goto out; 1364 } 1365 1366 if (!ret || pnum != bytes) { 1367 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1368 qiov, qiov_offset, flags); 1369 goto out; 1370 } else if (flags & BDRV_REQ_PREFETCH) { 1371 goto out; 1372 } 1373 } 1374 1375 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1376 total_bytes = bdrv_getlength(bs); 1377 if (total_bytes < 0) { 1378 ret = total_bytes; 1379 goto out; 1380 } 1381 1382 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); 1383 1384 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1385 if (bytes <= max_bytes && bytes <= max_transfer) { 1386 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); 1387 goto out; 1388 } 1389 1390 while (bytes_remaining) { 1391 int64_t num; 1392 1393 if (max_bytes) { 1394 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1395 assert(num); 1396 1397 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1398 num, qiov, 1399 qiov_offset + bytes - bytes_remaining, 1400 flags); 1401 max_bytes -= num; 1402 } else { 1403 num = bytes_remaining; 1404 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1405 0, bytes_remaining); 1406 } 1407 if (ret < 0) { 1408 goto out; 1409 } 1410 bytes_remaining -= num; 1411 } 1412 1413 out: 1414 return ret < 0 ? ret : 0; 1415 } 1416 1417 /* 1418 * Request padding 1419 * 1420 * |<---- align ----->| |<----- align ---->| 1421 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1422 * | | | | | | 1423 * -*----------$-------*-------- ... --------*-----$------------*--- 1424 * | | | | | | 1425 * | offset | | end | 1426 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1427 * [buf ... ) [tail_buf ) 1428 * 1429 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1430 * is placed at the beginning of @buf and @tail at the @end. 1431 * 1432 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1433 * around tail, if tail exists. 1434 * 1435 * @merge_reads is true for small requests, 1436 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1437 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1438 */ 1439 typedef struct BdrvRequestPadding { 1440 uint8_t *buf; 1441 size_t buf_len; 1442 uint8_t *tail_buf; 1443 size_t head; 1444 size_t tail; 1445 bool merge_reads; 1446 QEMUIOVector local_qiov; 1447 } BdrvRequestPadding; 1448 1449 static bool bdrv_init_padding(BlockDriverState *bs, 1450 int64_t offset, int64_t bytes, 1451 BdrvRequestPadding *pad) 1452 { 1453 int64_t align = bs->bl.request_alignment; 1454 int64_t sum; 1455 1456 bdrv_check_request(offset, bytes, &error_abort); 1457 assert(align <= INT_MAX); /* documented in block/block_int.h */ 1458 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */ 1459 1460 memset(pad, 0, sizeof(*pad)); 1461 1462 pad->head = offset & (align - 1); 1463 pad->tail = ((offset + bytes) & (align - 1)); 1464 if (pad->tail) { 1465 pad->tail = align - pad->tail; 1466 } 1467 1468 if (!pad->head && !pad->tail) { 1469 return false; 1470 } 1471 1472 assert(bytes); /* Nothing good in aligning zero-length requests */ 1473 1474 sum = pad->head + bytes + pad->tail; 1475 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1476 pad->buf = qemu_blockalign(bs, pad->buf_len); 1477 pad->merge_reads = sum == pad->buf_len; 1478 if (pad->tail) { 1479 pad->tail_buf = pad->buf + pad->buf_len - align; 1480 } 1481 1482 return true; 1483 } 1484 1485 static int coroutine_fn GRAPH_RDLOCK 1486 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req, 1487 BdrvRequestPadding *pad, bool zero_middle) 1488 { 1489 QEMUIOVector local_qiov; 1490 BlockDriverState *bs = child->bs; 1491 uint64_t align = bs->bl.request_alignment; 1492 int ret; 1493 1494 assert(req->serialising && pad->buf); 1495 1496 if (pad->head || pad->merge_reads) { 1497 int64_t bytes = pad->merge_reads ? pad->buf_len : align; 1498 1499 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1500 1501 if (pad->head) { 1502 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1503 } 1504 if (pad->merge_reads && pad->tail) { 1505 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1506 } 1507 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1508 align, &local_qiov, 0, 0); 1509 if (ret < 0) { 1510 return ret; 1511 } 1512 if (pad->head) { 1513 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1514 } 1515 if (pad->merge_reads && pad->tail) { 1516 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1517 } 1518 1519 if (pad->merge_reads) { 1520 goto zero_mem; 1521 } 1522 } 1523 1524 if (pad->tail) { 1525 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1526 1527 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1528 ret = bdrv_aligned_preadv( 1529 child, req, 1530 req->overlap_offset + req->overlap_bytes - align, 1531 align, align, &local_qiov, 0, 0); 1532 if (ret < 0) { 1533 return ret; 1534 } 1535 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1536 } 1537 1538 zero_mem: 1539 if (zero_middle) { 1540 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1541 } 1542 1543 return 0; 1544 } 1545 1546 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1547 { 1548 if (pad->buf) { 1549 qemu_vfree(pad->buf); 1550 qemu_iovec_destroy(&pad->local_qiov); 1551 } 1552 memset(pad, 0, sizeof(*pad)); 1553 } 1554 1555 /* 1556 * bdrv_pad_request 1557 * 1558 * Exchange request parameters with padded request if needed. Don't include RMW 1559 * read of padding, bdrv_padding_rmw_read() should be called separately if 1560 * needed. 1561 * 1562 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: 1563 * - on function start they represent original request 1564 * - on failure or when padding is not needed they are unchanged 1565 * - on success when padding is needed they represent padded request 1566 */ 1567 static int bdrv_pad_request(BlockDriverState *bs, 1568 QEMUIOVector **qiov, size_t *qiov_offset, 1569 int64_t *offset, int64_t *bytes, 1570 BdrvRequestPadding *pad, bool *padded, 1571 BdrvRequestFlags *flags) 1572 { 1573 int ret; 1574 1575 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort); 1576 1577 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1578 if (padded) { 1579 *padded = false; 1580 } 1581 return 0; 1582 } 1583 1584 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1585 *qiov, *qiov_offset, *bytes, 1586 pad->buf + pad->buf_len - pad->tail, 1587 pad->tail); 1588 if (ret < 0) { 1589 bdrv_padding_destroy(pad); 1590 return ret; 1591 } 1592 *bytes += pad->head + pad->tail; 1593 *offset -= pad->head; 1594 *qiov = &pad->local_qiov; 1595 *qiov_offset = 0; 1596 if (padded) { 1597 *padded = true; 1598 } 1599 if (flags) { 1600 /* Can't use optimization hint with bounce buffer */ 1601 *flags &= ~BDRV_REQ_REGISTERED_BUF; 1602 } 1603 1604 return 0; 1605 } 1606 1607 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1608 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1609 BdrvRequestFlags flags) 1610 { 1611 IO_CODE(); 1612 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1613 } 1614 1615 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1616 int64_t offset, int64_t bytes, 1617 QEMUIOVector *qiov, size_t qiov_offset, 1618 BdrvRequestFlags flags) 1619 { 1620 BlockDriverState *bs = child->bs; 1621 BdrvTrackedRequest req; 1622 BdrvRequestPadding pad; 1623 int ret; 1624 IO_CODE(); 1625 1626 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); 1627 1628 if (!bdrv_co_is_inserted(bs)) { 1629 return -ENOMEDIUM; 1630 } 1631 1632 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 1633 if (ret < 0) { 1634 return ret; 1635 } 1636 1637 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1638 /* 1639 * Aligning zero request is nonsense. Even if driver has special meaning 1640 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1641 * it to driver due to request_alignment. 1642 * 1643 * Still, no reason to return an error if someone do unaligned 1644 * zero-length read occasionally. 1645 */ 1646 return 0; 1647 } 1648 1649 bdrv_inc_in_flight(bs); 1650 1651 /* Don't do copy-on-read if we read data before write operation */ 1652 if (qatomic_read(&bs->copy_on_read)) { 1653 flags |= BDRV_REQ_COPY_ON_READ; 1654 } 1655 1656 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 1657 NULL, &flags); 1658 if (ret < 0) { 1659 goto fail; 1660 } 1661 1662 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1663 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1664 bs->bl.request_alignment, 1665 qiov, qiov_offset, flags); 1666 tracked_request_end(&req); 1667 bdrv_padding_destroy(&pad); 1668 1669 fail: 1670 bdrv_dec_in_flight(bs); 1671 1672 return ret; 1673 } 1674 1675 static int coroutine_fn GRAPH_RDLOCK 1676 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 1677 BdrvRequestFlags flags) 1678 { 1679 BlockDriver *drv = bs->drv; 1680 QEMUIOVector qiov; 1681 void *buf = NULL; 1682 int ret = 0; 1683 bool need_flush = false; 1684 int head = 0; 1685 int tail = 0; 1686 1687 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, 1688 INT64_MAX); 1689 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1690 bs->bl.request_alignment); 1691 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1692 1693 assert_bdrv_graph_readable(); 1694 bdrv_check_request(offset, bytes, &error_abort); 1695 1696 if (!drv) { 1697 return -ENOMEDIUM; 1698 } 1699 1700 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1701 return -ENOTSUP; 1702 } 1703 1704 /* By definition there is no user buffer so this flag doesn't make sense */ 1705 if (flags & BDRV_REQ_REGISTERED_BUF) { 1706 return -EINVAL; 1707 } 1708 1709 /* Invalidate the cached block-status data range if this write overlaps */ 1710 bdrv_bsc_invalidate_range(bs, offset, bytes); 1711 1712 assert(alignment % bs->bl.request_alignment == 0); 1713 head = offset % alignment; 1714 tail = (offset + bytes) % alignment; 1715 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1716 assert(max_write_zeroes >= bs->bl.request_alignment); 1717 1718 while (bytes > 0 && !ret) { 1719 int64_t num = bytes; 1720 1721 /* Align request. Block drivers can expect the "bulk" of the request 1722 * to be aligned, and that unaligned requests do not cross cluster 1723 * boundaries. 1724 */ 1725 if (head) { 1726 /* Make a small request up to the first aligned sector. For 1727 * convenience, limit this request to max_transfer even if 1728 * we don't need to fall back to writes. */ 1729 num = MIN(MIN(bytes, max_transfer), alignment - head); 1730 head = (head + num) % alignment; 1731 assert(num < max_write_zeroes); 1732 } else if (tail && num > alignment) { 1733 /* Shorten the request to the last aligned sector. */ 1734 num -= tail; 1735 } 1736 1737 /* limit request size */ 1738 if (num > max_write_zeroes) { 1739 num = max_write_zeroes; 1740 } 1741 1742 ret = -ENOTSUP; 1743 /* First try the efficient write zeroes operation */ 1744 if (drv->bdrv_co_pwrite_zeroes) { 1745 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1746 flags & bs->supported_zero_flags); 1747 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1748 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1749 need_flush = true; 1750 } 1751 } else { 1752 assert(!bs->supported_zero_flags); 1753 } 1754 1755 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1756 /* Fall back to bounce buffer if write zeroes is unsupported */ 1757 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1758 1759 if ((flags & BDRV_REQ_FUA) && 1760 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1761 /* No need for bdrv_driver_pwrite() to do a fallback 1762 * flush on each chunk; use just one at the end */ 1763 write_flags &= ~BDRV_REQ_FUA; 1764 need_flush = true; 1765 } 1766 num = MIN(num, max_transfer); 1767 if (buf == NULL) { 1768 buf = qemu_try_blockalign0(bs, num); 1769 if (buf == NULL) { 1770 ret = -ENOMEM; 1771 goto fail; 1772 } 1773 } 1774 qemu_iovec_init_buf(&qiov, buf, num); 1775 1776 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1777 1778 /* Keep bounce buffer around if it is big enough for all 1779 * all future requests. 1780 */ 1781 if (num < max_transfer) { 1782 qemu_vfree(buf); 1783 buf = NULL; 1784 } 1785 } 1786 1787 offset += num; 1788 bytes -= num; 1789 } 1790 1791 fail: 1792 if (ret == 0 && need_flush) { 1793 ret = bdrv_co_flush(bs); 1794 } 1795 qemu_vfree(buf); 1796 return ret; 1797 } 1798 1799 static inline int coroutine_fn 1800 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, 1801 BdrvTrackedRequest *req, int flags) 1802 { 1803 BlockDriverState *bs = child->bs; 1804 1805 bdrv_check_request(offset, bytes, &error_abort); 1806 1807 if (bdrv_is_read_only(bs)) { 1808 return -EPERM; 1809 } 1810 1811 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1812 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1813 assert(!(flags & ~BDRV_REQ_MASK)); 1814 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 1815 1816 if (flags & BDRV_REQ_SERIALISING) { 1817 QEMU_LOCK_GUARD(&bs->reqs_lock); 1818 1819 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 1820 1821 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 1822 return -EBUSY; 1823 } 1824 1825 bdrv_wait_serialising_requests_locked(req); 1826 } else { 1827 bdrv_wait_serialising_requests(req); 1828 } 1829 1830 assert(req->overlap_offset <= offset); 1831 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1832 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || 1833 child->perm & BLK_PERM_RESIZE); 1834 1835 switch (req->type) { 1836 case BDRV_TRACKED_WRITE: 1837 case BDRV_TRACKED_DISCARD: 1838 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1839 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1840 } else { 1841 assert(child->perm & BLK_PERM_WRITE); 1842 } 1843 bdrv_write_threshold_check_write(bs, offset, bytes); 1844 return 0; 1845 case BDRV_TRACKED_TRUNCATE: 1846 assert(child->perm & BLK_PERM_RESIZE); 1847 return 0; 1848 default: 1849 abort(); 1850 } 1851 } 1852 1853 static inline void coroutine_fn 1854 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, 1855 BdrvTrackedRequest *req, int ret) 1856 { 1857 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1858 BlockDriverState *bs = child->bs; 1859 1860 bdrv_check_request(offset, bytes, &error_abort); 1861 1862 qatomic_inc(&bs->write_gen); 1863 1864 /* 1865 * Discard cannot extend the image, but in error handling cases, such as 1866 * when reverting a qcow2 cluster allocation, the discarded range can pass 1867 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1868 * here. Instead, just skip it, since semantically a discard request 1869 * beyond EOF cannot expand the image anyway. 1870 */ 1871 if (ret == 0 && 1872 (req->type == BDRV_TRACKED_TRUNCATE || 1873 end_sector > bs->total_sectors) && 1874 req->type != BDRV_TRACKED_DISCARD) { 1875 bs->total_sectors = end_sector; 1876 bdrv_parent_cb_resize(bs); 1877 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1878 } 1879 if (req->bytes) { 1880 switch (req->type) { 1881 case BDRV_TRACKED_WRITE: 1882 stat64_max(&bs->wr_highest_offset, offset + bytes); 1883 /* fall through, to set dirty bits */ 1884 case BDRV_TRACKED_DISCARD: 1885 bdrv_set_dirty(bs, offset, bytes); 1886 break; 1887 default: 1888 break; 1889 } 1890 } 1891 } 1892 1893 /* 1894 * Forwards an already correctly aligned write request to the BlockDriver, 1895 * after possibly fragmenting it. 1896 */ 1897 static int coroutine_fn GRAPH_RDLOCK 1898 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req, 1899 int64_t offset, int64_t bytes, int64_t align, 1900 QEMUIOVector *qiov, size_t qiov_offset, 1901 BdrvRequestFlags flags) 1902 { 1903 BlockDriverState *bs = child->bs; 1904 BlockDriver *drv = bs->drv; 1905 int ret; 1906 1907 int64_t bytes_remaining = bytes; 1908 int max_transfer; 1909 1910 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1911 1912 if (!drv) { 1913 return -ENOMEDIUM; 1914 } 1915 1916 if (bdrv_has_readonly_bitmaps(bs)) { 1917 return -EPERM; 1918 } 1919 1920 assert(is_power_of_2(align)); 1921 assert((offset & (align - 1)) == 0); 1922 assert((bytes & (align - 1)) == 0); 1923 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1924 align); 1925 1926 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1927 1928 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1929 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1930 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 1931 flags |= BDRV_REQ_ZERO_WRITE; 1932 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1933 flags |= BDRV_REQ_MAY_UNMAP; 1934 } 1935 1936 /* Can't use optimization hint with bufferless zero write */ 1937 flags &= ~BDRV_REQ_REGISTERED_BUF; 1938 } 1939 1940 if (ret < 0) { 1941 /* Do nothing, write notifier decided to fail this request */ 1942 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1943 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1944 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1945 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1946 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 1947 qiov, qiov_offset); 1948 } else if (bytes <= max_transfer) { 1949 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 1950 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 1951 } else { 1952 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 1953 while (bytes_remaining) { 1954 int num = MIN(bytes_remaining, max_transfer); 1955 int local_flags = flags; 1956 1957 assert(num); 1958 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1959 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1960 /* If FUA is going to be emulated by flush, we only 1961 * need to flush on the last iteration */ 1962 local_flags &= ~BDRV_REQ_FUA; 1963 } 1964 1965 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 1966 num, qiov, 1967 qiov_offset + bytes - bytes_remaining, 1968 local_flags); 1969 if (ret < 0) { 1970 break; 1971 } 1972 bytes_remaining -= num; 1973 } 1974 } 1975 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE); 1976 1977 if (ret >= 0) { 1978 ret = 0; 1979 } 1980 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 1981 1982 return ret; 1983 } 1984 1985 static int coroutine_fn GRAPH_RDLOCK 1986 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes, 1987 BdrvRequestFlags flags, BdrvTrackedRequest *req) 1988 { 1989 BlockDriverState *bs = child->bs; 1990 QEMUIOVector local_qiov; 1991 uint64_t align = bs->bl.request_alignment; 1992 int ret = 0; 1993 bool padding; 1994 BdrvRequestPadding pad; 1995 1996 /* This flag doesn't make sense for padding or zero writes */ 1997 flags &= ~BDRV_REQ_REGISTERED_BUF; 1998 1999 padding = bdrv_init_padding(bs, offset, bytes, &pad); 2000 if (padding) { 2001 assert(!(flags & BDRV_REQ_NO_WAIT)); 2002 bdrv_make_request_serialising(req, align); 2003 2004 bdrv_padding_rmw_read(child, req, &pad, true); 2005 2006 if (pad.head || pad.merge_reads) { 2007 int64_t aligned_offset = offset & ~(align - 1); 2008 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2009 2010 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2011 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2012 align, &local_qiov, 0, 2013 flags & ~BDRV_REQ_ZERO_WRITE); 2014 if (ret < 0 || pad.merge_reads) { 2015 /* Error or all work is done */ 2016 goto out; 2017 } 2018 offset += write_bytes - pad.head; 2019 bytes -= write_bytes - pad.head; 2020 } 2021 } 2022 2023 assert(!bytes || (offset & (align - 1)) == 0); 2024 if (bytes >= align) { 2025 /* Write the aligned part in the middle. */ 2026 int64_t aligned_bytes = bytes & ~(align - 1); 2027 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2028 NULL, 0, flags); 2029 if (ret < 0) { 2030 goto out; 2031 } 2032 bytes -= aligned_bytes; 2033 offset += aligned_bytes; 2034 } 2035 2036 assert(!bytes || (offset & (align - 1)) == 0); 2037 if (bytes) { 2038 assert(align == pad.tail + bytes); 2039 2040 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2041 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2042 &local_qiov, 0, 2043 flags & ~BDRV_REQ_ZERO_WRITE); 2044 } 2045 2046 out: 2047 bdrv_padding_destroy(&pad); 2048 2049 return ret; 2050 } 2051 2052 /* 2053 * Handle a write request in coroutine context 2054 */ 2055 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2056 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 2057 BdrvRequestFlags flags) 2058 { 2059 IO_CODE(); 2060 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2061 } 2062 2063 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2064 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, 2065 BdrvRequestFlags flags) 2066 { 2067 BlockDriverState *bs = child->bs; 2068 BdrvTrackedRequest req; 2069 uint64_t align = bs->bl.request_alignment; 2070 BdrvRequestPadding pad; 2071 int ret; 2072 bool padded = false; 2073 IO_CODE(); 2074 2075 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); 2076 2077 if (!bdrv_co_is_inserted(bs)) { 2078 return -ENOMEDIUM; 2079 } 2080 2081 if (flags & BDRV_REQ_ZERO_WRITE) { 2082 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 2083 } else { 2084 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 2085 } 2086 if (ret < 0) { 2087 return ret; 2088 } 2089 2090 /* If the request is misaligned then we can't make it efficient */ 2091 if ((flags & BDRV_REQ_NO_FALLBACK) && 2092 !QEMU_IS_ALIGNED(offset | bytes, align)) 2093 { 2094 return -ENOTSUP; 2095 } 2096 2097 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2098 /* 2099 * Aligning zero request is nonsense. Even if driver has special meaning 2100 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2101 * it to driver due to request_alignment. 2102 * 2103 * Still, no reason to return an error if someone do unaligned 2104 * zero-length write occasionally. 2105 */ 2106 return 0; 2107 } 2108 2109 if (!(flags & BDRV_REQ_ZERO_WRITE)) { 2110 /* 2111 * Pad request for following read-modify-write cycle. 2112 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do 2113 * alignment only if there is no ZERO flag. 2114 */ 2115 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 2116 &padded, &flags); 2117 if (ret < 0) { 2118 return ret; 2119 } 2120 } 2121 2122 bdrv_inc_in_flight(bs); 2123 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2124 2125 if (flags & BDRV_REQ_ZERO_WRITE) { 2126 assert(!padded); 2127 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2128 goto out; 2129 } 2130 2131 if (padded) { 2132 /* 2133 * Request was unaligned to request_alignment and therefore 2134 * padded. We are going to do read-modify-write, and must 2135 * serialize the request to prevent interactions of the 2136 * widened region with other transactions. 2137 */ 2138 assert(!(flags & BDRV_REQ_NO_WAIT)); 2139 bdrv_make_request_serialising(&req, align); 2140 bdrv_padding_rmw_read(child, &req, &pad, false); 2141 } 2142 2143 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2144 qiov, qiov_offset, flags); 2145 2146 bdrv_padding_destroy(&pad); 2147 2148 out: 2149 tracked_request_end(&req); 2150 bdrv_dec_in_flight(bs); 2151 2152 return ret; 2153 } 2154 2155 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2156 int64_t bytes, BdrvRequestFlags flags) 2157 { 2158 IO_CODE(); 2159 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2160 assert_bdrv_graph_readable(); 2161 2162 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2163 flags &= ~BDRV_REQ_MAY_UNMAP; 2164 } 2165 2166 return bdrv_co_pwritev(child, offset, bytes, NULL, 2167 BDRV_REQ_ZERO_WRITE | flags); 2168 } 2169 2170 /* 2171 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2172 */ 2173 int bdrv_flush_all(void) 2174 { 2175 BdrvNextIterator it; 2176 BlockDriverState *bs = NULL; 2177 int result = 0; 2178 2179 GLOBAL_STATE_CODE(); 2180 2181 /* 2182 * bdrv queue is managed by record/replay, 2183 * creating new flush request for stopping 2184 * the VM may break the determinism 2185 */ 2186 if (replay_events_enabled()) { 2187 return result; 2188 } 2189 2190 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2191 AioContext *aio_context = bdrv_get_aio_context(bs); 2192 int ret; 2193 2194 aio_context_acquire(aio_context); 2195 ret = bdrv_flush(bs); 2196 if (ret < 0 && !result) { 2197 result = ret; 2198 } 2199 aio_context_release(aio_context); 2200 } 2201 2202 return result; 2203 } 2204 2205 /* 2206 * Returns the allocation status of the specified sectors. 2207 * Drivers not implementing the functionality are assumed to not support 2208 * backing files, hence all their sectors are reported as allocated. 2209 * 2210 * If 'want_zero' is true, the caller is querying for mapping 2211 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2212 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2213 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2214 * 2215 * If 'offset' is beyond the end of the disk image the return value is 2216 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2217 * 2218 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2219 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2220 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2221 * 2222 * 'pnum' is set to the number of bytes (including and immediately 2223 * following the specified offset) that are easily known to be in the 2224 * same allocated/unallocated state. Note that a second call starting 2225 * at the original offset plus returned pnum may have the same status. 2226 * The returned value is non-zero on success except at end-of-file. 2227 * 2228 * Returns negative errno on failure. Otherwise, if the 2229 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2230 * set to the host mapping and BDS corresponding to the guest offset. 2231 */ 2232 static int coroutine_fn GRAPH_RDLOCK 2233 bdrv_co_block_status(BlockDriverState *bs, bool want_zero, 2234 int64_t offset, int64_t bytes, 2235 int64_t *pnum, int64_t *map, BlockDriverState **file) 2236 { 2237 int64_t total_size; 2238 int64_t n; /* bytes */ 2239 int ret; 2240 int64_t local_map = 0; 2241 BlockDriverState *local_file = NULL; 2242 int64_t aligned_offset, aligned_bytes; 2243 uint32_t align; 2244 bool has_filtered_child; 2245 2246 assert(pnum); 2247 assert_bdrv_graph_readable(); 2248 *pnum = 0; 2249 total_size = bdrv_getlength(bs); 2250 if (total_size < 0) { 2251 ret = total_size; 2252 goto early_out; 2253 } 2254 2255 if (offset >= total_size) { 2256 ret = BDRV_BLOCK_EOF; 2257 goto early_out; 2258 } 2259 if (!bytes) { 2260 ret = 0; 2261 goto early_out; 2262 } 2263 2264 n = total_size - offset; 2265 if (n < bytes) { 2266 bytes = n; 2267 } 2268 2269 /* Must be non-NULL or bdrv_getlength() would have failed */ 2270 assert(bs->drv); 2271 has_filtered_child = bdrv_filter_child(bs); 2272 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2273 *pnum = bytes; 2274 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2275 if (offset + bytes == total_size) { 2276 ret |= BDRV_BLOCK_EOF; 2277 } 2278 if (bs->drv->protocol_name) { 2279 ret |= BDRV_BLOCK_OFFSET_VALID; 2280 local_map = offset; 2281 local_file = bs; 2282 } 2283 goto early_out; 2284 } 2285 2286 bdrv_inc_in_flight(bs); 2287 2288 /* Round out to request_alignment boundaries */ 2289 align = bs->bl.request_alignment; 2290 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2291 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2292 2293 if (bs->drv->bdrv_co_block_status) { 2294 /* 2295 * Use the block-status cache only for protocol nodes: Format 2296 * drivers are generally quick to inquire the status, but protocol 2297 * drivers often need to get information from outside of qemu, so 2298 * we do not have control over the actual implementation. There 2299 * have been cases where inquiring the status took an unreasonably 2300 * long time, and we can do nothing in qemu to fix it. 2301 * This is especially problematic for images with large data areas, 2302 * because finding the few holes in them and giving them special 2303 * treatment does not gain much performance. Therefore, we try to 2304 * cache the last-identified data region. 2305 * 2306 * Second, limiting ourselves to protocol nodes allows us to assume 2307 * the block status for data regions to be DATA | OFFSET_VALID, and 2308 * that the host offset is the same as the guest offset. 2309 * 2310 * Note that it is possible that external writers zero parts of 2311 * the cached regions without the cache being invalidated, and so 2312 * we may report zeroes as data. This is not catastrophic, 2313 * however, because reporting zeroes as data is fine. 2314 */ 2315 if (QLIST_EMPTY(&bs->children) && 2316 bdrv_bsc_is_data(bs, aligned_offset, pnum)) 2317 { 2318 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2319 local_file = bs; 2320 local_map = aligned_offset; 2321 } else { 2322 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2323 aligned_bytes, pnum, &local_map, 2324 &local_file); 2325 2326 /* 2327 * Note that checking QLIST_EMPTY(&bs->children) is also done when 2328 * the cache is queried above. Technically, we do not need to check 2329 * it here; the worst that can happen is that we fill the cache for 2330 * non-protocol nodes, and then it is never used. However, filling 2331 * the cache requires an RCU update, so double check here to avoid 2332 * such an update if possible. 2333 * 2334 * Check want_zero, because we only want to update the cache when we 2335 * have accurate information about what is zero and what is data. 2336 */ 2337 if (want_zero && 2338 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && 2339 QLIST_EMPTY(&bs->children)) 2340 { 2341 /* 2342 * When a protocol driver reports BLOCK_OFFSET_VALID, the 2343 * returned local_map value must be the same as the offset we 2344 * have passed (aligned_offset), and local_bs must be the node 2345 * itself. 2346 * Assert this, because we follow this rule when reading from 2347 * the cache (see the `local_file = bs` and 2348 * `local_map = aligned_offset` assignments above), and the 2349 * result the cache delivers must be the same as the driver 2350 * would deliver. 2351 */ 2352 assert(local_file == bs); 2353 assert(local_map == aligned_offset); 2354 bdrv_bsc_fill(bs, aligned_offset, *pnum); 2355 } 2356 } 2357 } else { 2358 /* Default code for filters */ 2359 2360 local_file = bdrv_filter_bs(bs); 2361 assert(local_file); 2362 2363 *pnum = aligned_bytes; 2364 local_map = aligned_offset; 2365 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2366 } 2367 if (ret < 0) { 2368 *pnum = 0; 2369 goto out; 2370 } 2371 2372 /* 2373 * The driver's result must be a non-zero multiple of request_alignment. 2374 * Clamp pnum and adjust map to original request. 2375 */ 2376 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2377 align > offset - aligned_offset); 2378 if (ret & BDRV_BLOCK_RECURSE) { 2379 assert(ret & BDRV_BLOCK_DATA); 2380 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2381 assert(!(ret & BDRV_BLOCK_ZERO)); 2382 } 2383 2384 *pnum -= offset - aligned_offset; 2385 if (*pnum > bytes) { 2386 *pnum = bytes; 2387 } 2388 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2389 local_map += offset - aligned_offset; 2390 } 2391 2392 if (ret & BDRV_BLOCK_RAW) { 2393 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2394 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2395 *pnum, pnum, &local_map, &local_file); 2396 goto out; 2397 } 2398 2399 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2400 ret |= BDRV_BLOCK_ALLOCATED; 2401 } else if (bs->drv->supports_backing) { 2402 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2403 2404 if (!cow_bs) { 2405 ret |= BDRV_BLOCK_ZERO; 2406 } else if (want_zero) { 2407 int64_t size2 = bdrv_getlength(cow_bs); 2408 2409 if (size2 >= 0 && offset >= size2) { 2410 ret |= BDRV_BLOCK_ZERO; 2411 } 2412 } 2413 } 2414 2415 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2416 local_file && local_file != bs && 2417 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2418 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2419 int64_t file_pnum; 2420 int ret2; 2421 2422 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2423 *pnum, &file_pnum, NULL, NULL); 2424 if (ret2 >= 0) { 2425 /* Ignore errors. This is just providing extra information, it 2426 * is useful but not necessary. 2427 */ 2428 if (ret2 & BDRV_BLOCK_EOF && 2429 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2430 /* 2431 * It is valid for the format block driver to read 2432 * beyond the end of the underlying file's current 2433 * size; such areas read as zero. 2434 */ 2435 ret |= BDRV_BLOCK_ZERO; 2436 } else { 2437 /* Limit request to the range reported by the protocol driver */ 2438 *pnum = file_pnum; 2439 ret |= (ret2 & BDRV_BLOCK_ZERO); 2440 } 2441 } 2442 } 2443 2444 out: 2445 bdrv_dec_in_flight(bs); 2446 if (ret >= 0 && offset + *pnum == total_size) { 2447 ret |= BDRV_BLOCK_EOF; 2448 } 2449 early_out: 2450 if (file) { 2451 *file = local_file; 2452 } 2453 if (map) { 2454 *map = local_map; 2455 } 2456 return ret; 2457 } 2458 2459 int coroutine_fn 2460 bdrv_co_common_block_status_above(BlockDriverState *bs, 2461 BlockDriverState *base, 2462 bool include_base, 2463 bool want_zero, 2464 int64_t offset, 2465 int64_t bytes, 2466 int64_t *pnum, 2467 int64_t *map, 2468 BlockDriverState **file, 2469 int *depth) 2470 { 2471 int ret; 2472 BlockDriverState *p; 2473 int64_t eof = 0; 2474 int dummy; 2475 IO_CODE(); 2476 2477 assert(!include_base || base); /* Can't include NULL base */ 2478 assert_bdrv_graph_readable(); 2479 2480 if (!depth) { 2481 depth = &dummy; 2482 } 2483 *depth = 0; 2484 2485 if (!include_base && bs == base) { 2486 *pnum = bytes; 2487 return 0; 2488 } 2489 2490 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); 2491 ++*depth; 2492 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2493 return ret; 2494 } 2495 2496 if (ret & BDRV_BLOCK_EOF) { 2497 eof = offset + *pnum; 2498 } 2499 2500 assert(*pnum <= bytes); 2501 bytes = *pnum; 2502 2503 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2504 p = bdrv_filter_or_cow_bs(p)) 2505 { 2506 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2507 file); 2508 ++*depth; 2509 if (ret < 0) { 2510 return ret; 2511 } 2512 if (*pnum == 0) { 2513 /* 2514 * The top layer deferred to this layer, and because this layer is 2515 * short, any zeroes that we synthesize beyond EOF behave as if they 2516 * were allocated at this layer. 2517 * 2518 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2519 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2520 * below. 2521 */ 2522 assert(ret & BDRV_BLOCK_EOF); 2523 *pnum = bytes; 2524 if (file) { 2525 *file = p; 2526 } 2527 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2528 break; 2529 } 2530 if (ret & BDRV_BLOCK_ALLOCATED) { 2531 /* 2532 * We've found the node and the status, we must break. 2533 * 2534 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2535 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2536 * below. 2537 */ 2538 ret &= ~BDRV_BLOCK_EOF; 2539 break; 2540 } 2541 2542 if (p == base) { 2543 assert(include_base); 2544 break; 2545 } 2546 2547 /* 2548 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2549 * let's continue the diving. 2550 */ 2551 assert(*pnum <= bytes); 2552 bytes = *pnum; 2553 } 2554 2555 if (offset + *pnum == eof) { 2556 ret |= BDRV_BLOCK_EOF; 2557 } 2558 2559 return ret; 2560 } 2561 2562 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2563 BlockDriverState *base, 2564 int64_t offset, int64_t bytes, 2565 int64_t *pnum, int64_t *map, 2566 BlockDriverState **file) 2567 { 2568 IO_CODE(); 2569 return bdrv_co_common_block_status_above(bs, base, false, true, offset, 2570 bytes, pnum, map, file, NULL); 2571 } 2572 2573 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2574 int64_t offset, int64_t bytes, int64_t *pnum, 2575 int64_t *map, BlockDriverState **file) 2576 { 2577 IO_CODE(); 2578 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, 2579 pnum, map, file, NULL); 2580 } 2581 2582 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2583 int64_t *pnum, int64_t *map, BlockDriverState **file) 2584 { 2585 IO_CODE(); 2586 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2587 offset, bytes, pnum, map, file); 2588 } 2589 2590 /* 2591 * Check @bs (and its backing chain) to see if the range defined 2592 * by @offset and @bytes is known to read as zeroes. 2593 * Return 1 if that is the case, 0 otherwise and -errno on error. 2594 * This test is meant to be fast rather than accurate so returning 0 2595 * does not guarantee non-zero data. 2596 */ 2597 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2598 int64_t bytes) 2599 { 2600 int ret; 2601 int64_t pnum = bytes; 2602 IO_CODE(); 2603 2604 if (!bytes) { 2605 return 1; 2606 } 2607 2608 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, 2609 bytes, &pnum, NULL, NULL, NULL); 2610 2611 if (ret < 0) { 2612 return ret; 2613 } 2614 2615 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2616 } 2617 2618 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, 2619 int64_t bytes, int64_t *pnum) 2620 { 2621 int ret; 2622 int64_t dummy; 2623 IO_CODE(); 2624 2625 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, 2626 bytes, pnum ? pnum : &dummy, NULL, 2627 NULL, NULL); 2628 if (ret < 0) { 2629 return ret; 2630 } 2631 return !!(ret & BDRV_BLOCK_ALLOCATED); 2632 } 2633 2634 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, 2635 int64_t *pnum) 2636 { 2637 int ret; 2638 int64_t dummy; 2639 IO_CODE(); 2640 2641 ret = bdrv_common_block_status_above(bs, bs, true, false, offset, 2642 bytes, pnum ? pnum : &dummy, NULL, 2643 NULL, NULL); 2644 if (ret < 0) { 2645 return ret; 2646 } 2647 return !!(ret & BDRV_BLOCK_ALLOCATED); 2648 } 2649 2650 /* See bdrv_is_allocated_above for documentation */ 2651 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, 2652 BlockDriverState *base, 2653 bool include_base, int64_t offset, 2654 int64_t bytes, int64_t *pnum) 2655 { 2656 int depth; 2657 int ret; 2658 IO_CODE(); 2659 2660 ret = bdrv_co_common_block_status_above(top, base, include_base, false, 2661 offset, bytes, pnum, NULL, NULL, 2662 &depth); 2663 if (ret < 0) { 2664 return ret; 2665 } 2666 2667 if (ret & BDRV_BLOCK_ALLOCATED) { 2668 return depth; 2669 } 2670 return 0; 2671 } 2672 2673 /* 2674 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2675 * 2676 * Return a positive depth if (a prefix of) the given range is allocated 2677 * in any image between BASE and TOP (BASE is only included if include_base 2678 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2679 * BASE can be NULL to check if the given offset is allocated in any 2680 * image of the chain. Return 0 otherwise, or negative errno on 2681 * failure. 2682 * 2683 * 'pnum' is set to the number of bytes (including and immediately 2684 * following the specified offset) that are known to be in the same 2685 * allocated/unallocated state. Note that a subsequent call starting 2686 * at 'offset + *pnum' may return the same allocation status (in other 2687 * words, the result is not necessarily the maximum possible range); 2688 * but 'pnum' will only be 0 when end of file is reached. 2689 */ 2690 int bdrv_is_allocated_above(BlockDriverState *top, 2691 BlockDriverState *base, 2692 bool include_base, int64_t offset, 2693 int64_t bytes, int64_t *pnum) 2694 { 2695 int depth; 2696 int ret; 2697 IO_CODE(); 2698 2699 ret = bdrv_common_block_status_above(top, base, include_base, false, 2700 offset, bytes, pnum, NULL, NULL, 2701 &depth); 2702 if (ret < 0) { 2703 return ret; 2704 } 2705 2706 if (ret & BDRV_BLOCK_ALLOCATED) { 2707 return depth; 2708 } 2709 return 0; 2710 } 2711 2712 int coroutine_fn 2713 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2714 { 2715 BlockDriver *drv = bs->drv; 2716 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2717 int ret; 2718 IO_CODE(); 2719 assert_bdrv_graph_readable(); 2720 2721 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2722 if (ret < 0) { 2723 return ret; 2724 } 2725 2726 if (!drv) { 2727 return -ENOMEDIUM; 2728 } 2729 2730 bdrv_inc_in_flight(bs); 2731 2732 if (drv->bdrv_co_load_vmstate) { 2733 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos); 2734 } else if (child_bs) { 2735 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2736 } else { 2737 ret = -ENOTSUP; 2738 } 2739 2740 bdrv_dec_in_flight(bs); 2741 2742 return ret; 2743 } 2744 2745 int coroutine_fn 2746 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2747 { 2748 BlockDriver *drv = bs->drv; 2749 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2750 int ret; 2751 IO_CODE(); 2752 assert_bdrv_graph_readable(); 2753 2754 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2755 if (ret < 0) { 2756 return ret; 2757 } 2758 2759 if (!drv) { 2760 return -ENOMEDIUM; 2761 } 2762 2763 bdrv_inc_in_flight(bs); 2764 2765 if (drv->bdrv_co_save_vmstate) { 2766 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos); 2767 } else if (child_bs) { 2768 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2769 } else { 2770 ret = -ENOTSUP; 2771 } 2772 2773 bdrv_dec_in_flight(bs); 2774 2775 return ret; 2776 } 2777 2778 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2779 int64_t pos, int size) 2780 { 2781 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2782 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2783 IO_CODE(); 2784 2785 return ret < 0 ? ret : size; 2786 } 2787 2788 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2789 int64_t pos, int size) 2790 { 2791 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2792 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2793 IO_CODE(); 2794 2795 return ret < 0 ? ret : size; 2796 } 2797 2798 /**************************************************************/ 2799 /* async I/Os */ 2800 2801 void bdrv_aio_cancel(BlockAIOCB *acb) 2802 { 2803 IO_CODE(); 2804 qemu_aio_ref(acb); 2805 bdrv_aio_cancel_async(acb); 2806 while (acb->refcnt > 1) { 2807 if (acb->aiocb_info->get_aio_context) { 2808 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2809 } else if (acb->bs) { 2810 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2811 * assert that we're not using an I/O thread. Thread-safe 2812 * code should use bdrv_aio_cancel_async exclusively. 2813 */ 2814 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2815 aio_poll(bdrv_get_aio_context(acb->bs), true); 2816 } else { 2817 abort(); 2818 } 2819 } 2820 qemu_aio_unref(acb); 2821 } 2822 2823 /* Async version of aio cancel. The caller is not blocked if the acb implements 2824 * cancel_async, otherwise we do nothing and let the request normally complete. 2825 * In either case the completion callback must be called. */ 2826 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2827 { 2828 IO_CODE(); 2829 if (acb->aiocb_info->cancel_async) { 2830 acb->aiocb_info->cancel_async(acb); 2831 } 2832 } 2833 2834 /**************************************************************/ 2835 /* Coroutine block device emulation */ 2836 2837 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2838 { 2839 BdrvChild *primary_child = bdrv_primary_child(bs); 2840 BdrvChild *child; 2841 int current_gen; 2842 int ret = 0; 2843 IO_CODE(); 2844 2845 assert_bdrv_graph_readable(); 2846 bdrv_inc_in_flight(bs); 2847 2848 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) || 2849 bdrv_is_sg(bs)) { 2850 goto early_exit; 2851 } 2852 2853 qemu_co_mutex_lock(&bs->reqs_lock); 2854 current_gen = qatomic_read(&bs->write_gen); 2855 2856 /* Wait until any previous flushes are completed */ 2857 while (bs->active_flush_req) { 2858 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2859 } 2860 2861 /* Flushes reach this point in nondecreasing current_gen order. */ 2862 bs->active_flush_req = true; 2863 qemu_co_mutex_unlock(&bs->reqs_lock); 2864 2865 /* Write back all layers by calling one driver function */ 2866 if (bs->drv->bdrv_co_flush) { 2867 ret = bs->drv->bdrv_co_flush(bs); 2868 goto out; 2869 } 2870 2871 /* Write back cached data to the OS even with cache=unsafe */ 2872 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2873 if (bs->drv->bdrv_co_flush_to_os) { 2874 ret = bs->drv->bdrv_co_flush_to_os(bs); 2875 if (ret < 0) { 2876 goto out; 2877 } 2878 } 2879 2880 /* But don't actually force it to the disk with cache=unsafe */ 2881 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2882 goto flush_children; 2883 } 2884 2885 /* Check if we really need to flush anything */ 2886 if (bs->flushed_gen == current_gen) { 2887 goto flush_children; 2888 } 2889 2890 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 2891 if (!bs->drv) { 2892 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2893 * (even in case of apparent success) */ 2894 ret = -ENOMEDIUM; 2895 goto out; 2896 } 2897 if (bs->drv->bdrv_co_flush_to_disk) { 2898 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2899 } else if (bs->drv->bdrv_aio_flush) { 2900 BlockAIOCB *acb; 2901 CoroutineIOCompletion co = { 2902 .coroutine = qemu_coroutine_self(), 2903 }; 2904 2905 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2906 if (acb == NULL) { 2907 ret = -EIO; 2908 } else { 2909 qemu_coroutine_yield(); 2910 ret = co.ret; 2911 } 2912 } else { 2913 /* 2914 * Some block drivers always operate in either writethrough or unsafe 2915 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2916 * know how the server works (because the behaviour is hardcoded or 2917 * depends on server-side configuration), so we can't ensure that 2918 * everything is safe on disk. Returning an error doesn't work because 2919 * that would break guests even if the server operates in writethrough 2920 * mode. 2921 * 2922 * Let's hope the user knows what he's doing. 2923 */ 2924 ret = 0; 2925 } 2926 2927 if (ret < 0) { 2928 goto out; 2929 } 2930 2931 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2932 * in the case of cache=unsafe, so there are no useless flushes. 2933 */ 2934 flush_children: 2935 ret = 0; 2936 QLIST_FOREACH(child, &bs->children, next) { 2937 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 2938 int this_child_ret = bdrv_co_flush(child->bs); 2939 if (!ret) { 2940 ret = this_child_ret; 2941 } 2942 } 2943 } 2944 2945 out: 2946 /* Notify any pending flushes that we have completed */ 2947 if (ret == 0) { 2948 bs->flushed_gen = current_gen; 2949 } 2950 2951 qemu_co_mutex_lock(&bs->reqs_lock); 2952 bs->active_flush_req = false; 2953 /* Return value is ignored - it's ok if wait queue is empty */ 2954 qemu_co_queue_next(&bs->flush_queue); 2955 qemu_co_mutex_unlock(&bs->reqs_lock); 2956 2957 early_exit: 2958 bdrv_dec_in_flight(bs); 2959 return ret; 2960 } 2961 2962 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2963 int64_t bytes) 2964 { 2965 BdrvTrackedRequest req; 2966 int ret; 2967 int64_t max_pdiscard; 2968 int head, tail, align; 2969 BlockDriverState *bs = child->bs; 2970 IO_CODE(); 2971 assert_bdrv_graph_readable(); 2972 2973 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { 2974 return -ENOMEDIUM; 2975 } 2976 2977 if (bdrv_has_readonly_bitmaps(bs)) { 2978 return -EPERM; 2979 } 2980 2981 ret = bdrv_check_request(offset, bytes, NULL); 2982 if (ret < 0) { 2983 return ret; 2984 } 2985 2986 /* Do nothing if disabled. */ 2987 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2988 return 0; 2989 } 2990 2991 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2992 return 0; 2993 } 2994 2995 /* Invalidate the cached block-status data range if this discard overlaps */ 2996 bdrv_bsc_invalidate_range(bs, offset, bytes); 2997 2998 /* Discard is advisory, but some devices track and coalesce 2999 * unaligned requests, so we must pass everything down rather than 3000 * round here. Still, most devices will just silently ignore 3001 * unaligned requests (by returning -ENOTSUP), so we must fragment 3002 * the request accordingly. */ 3003 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 3004 assert(align % bs->bl.request_alignment == 0); 3005 head = offset % align; 3006 tail = (offset + bytes) % align; 3007 3008 bdrv_inc_in_flight(bs); 3009 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 3010 3011 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 3012 if (ret < 0) { 3013 goto out; 3014 } 3015 3016 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), 3017 align); 3018 assert(max_pdiscard >= bs->bl.request_alignment); 3019 3020 while (bytes > 0) { 3021 int64_t num = bytes; 3022 3023 if (head) { 3024 /* Make small requests to get to alignment boundaries. */ 3025 num = MIN(bytes, align - head); 3026 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 3027 num %= bs->bl.request_alignment; 3028 } 3029 head = (head + num) % align; 3030 assert(num < max_pdiscard); 3031 } else if (tail) { 3032 if (num > align) { 3033 /* Shorten the request to the last aligned cluster. */ 3034 num -= tail; 3035 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 3036 tail > bs->bl.request_alignment) { 3037 tail %= bs->bl.request_alignment; 3038 num -= tail; 3039 } 3040 } 3041 /* limit request size */ 3042 if (num > max_pdiscard) { 3043 num = max_pdiscard; 3044 } 3045 3046 if (!bs->drv) { 3047 ret = -ENOMEDIUM; 3048 goto out; 3049 } 3050 if (bs->drv->bdrv_co_pdiscard) { 3051 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 3052 } else { 3053 BlockAIOCB *acb; 3054 CoroutineIOCompletion co = { 3055 .coroutine = qemu_coroutine_self(), 3056 }; 3057 3058 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 3059 bdrv_co_io_em_complete, &co); 3060 if (acb == NULL) { 3061 ret = -EIO; 3062 goto out; 3063 } else { 3064 qemu_coroutine_yield(); 3065 ret = co.ret; 3066 } 3067 } 3068 if (ret && ret != -ENOTSUP) { 3069 goto out; 3070 } 3071 3072 offset += num; 3073 bytes -= num; 3074 } 3075 ret = 0; 3076 out: 3077 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 3078 tracked_request_end(&req); 3079 bdrv_dec_in_flight(bs); 3080 return ret; 3081 } 3082 3083 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3084 { 3085 BlockDriver *drv = bs->drv; 3086 CoroutineIOCompletion co = { 3087 .coroutine = qemu_coroutine_self(), 3088 }; 3089 BlockAIOCB *acb; 3090 IO_CODE(); 3091 assert_bdrv_graph_readable(); 3092 3093 bdrv_inc_in_flight(bs); 3094 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3095 co.ret = -ENOTSUP; 3096 goto out; 3097 } 3098 3099 if (drv->bdrv_co_ioctl) { 3100 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3101 } else { 3102 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3103 if (!acb) { 3104 co.ret = -ENOTSUP; 3105 goto out; 3106 } 3107 qemu_coroutine_yield(); 3108 } 3109 out: 3110 bdrv_dec_in_flight(bs); 3111 return co.ret; 3112 } 3113 3114 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3115 { 3116 IO_CODE(); 3117 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3118 } 3119 3120 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3121 { 3122 IO_CODE(); 3123 return memset(qemu_blockalign(bs, size), 0, size); 3124 } 3125 3126 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3127 { 3128 size_t align = bdrv_opt_mem_align(bs); 3129 IO_CODE(); 3130 3131 /* Ensure that NULL is never returned on success */ 3132 assert(align > 0); 3133 if (size == 0) { 3134 size = align; 3135 } 3136 3137 return qemu_try_memalign(align, size); 3138 } 3139 3140 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3141 { 3142 void *mem = qemu_try_blockalign(bs, size); 3143 IO_CODE(); 3144 3145 if (mem) { 3146 memset(mem, 0, size); 3147 } 3148 3149 return mem; 3150 } 3151 3152 void coroutine_fn bdrv_co_io_plug(BlockDriverState *bs) 3153 { 3154 BdrvChild *child; 3155 IO_CODE(); 3156 assert_bdrv_graph_readable(); 3157 3158 QLIST_FOREACH(child, &bs->children, next) { 3159 bdrv_co_io_plug(child->bs); 3160 } 3161 3162 if (qatomic_fetch_inc(&bs->io_plugged) == 0) { 3163 BlockDriver *drv = bs->drv; 3164 if (drv && drv->bdrv_co_io_plug) { 3165 drv->bdrv_co_io_plug(bs); 3166 } 3167 } 3168 } 3169 3170 void coroutine_fn bdrv_co_io_unplug(BlockDriverState *bs) 3171 { 3172 BdrvChild *child; 3173 IO_CODE(); 3174 assert_bdrv_graph_readable(); 3175 3176 assert(bs->io_plugged); 3177 if (qatomic_fetch_dec(&bs->io_plugged) == 1) { 3178 BlockDriver *drv = bs->drv; 3179 if (drv && drv->bdrv_co_io_unplug) { 3180 drv->bdrv_co_io_unplug(bs); 3181 } 3182 } 3183 3184 QLIST_FOREACH(child, &bs->children, next) { 3185 bdrv_co_io_unplug(child->bs); 3186 } 3187 } 3188 3189 /* Helper that undoes bdrv_register_buf() when it fails partway through */ 3190 static void GRAPH_RDLOCK 3191 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size, 3192 BdrvChild *final_child) 3193 { 3194 BdrvChild *child; 3195 3196 GLOBAL_STATE_CODE(); 3197 assert_bdrv_graph_readable(); 3198 3199 QLIST_FOREACH(child, &bs->children, next) { 3200 if (child == final_child) { 3201 break; 3202 } 3203 3204 bdrv_unregister_buf(child->bs, host, size); 3205 } 3206 3207 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3208 bs->drv->bdrv_unregister_buf(bs, host, size); 3209 } 3210 } 3211 3212 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, 3213 Error **errp) 3214 { 3215 BdrvChild *child; 3216 3217 GLOBAL_STATE_CODE(); 3218 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3219 3220 if (bs->drv && bs->drv->bdrv_register_buf) { 3221 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { 3222 return false; 3223 } 3224 } 3225 QLIST_FOREACH(child, &bs->children, next) { 3226 if (!bdrv_register_buf(child->bs, host, size, errp)) { 3227 bdrv_register_buf_rollback(bs, host, size, child); 3228 return false; 3229 } 3230 } 3231 return true; 3232 } 3233 3234 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size) 3235 { 3236 BdrvChild *child; 3237 3238 GLOBAL_STATE_CODE(); 3239 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3240 3241 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3242 bs->drv->bdrv_unregister_buf(bs, host, size); 3243 } 3244 QLIST_FOREACH(child, &bs->children, next) { 3245 bdrv_unregister_buf(child->bs, host, size); 3246 } 3247 } 3248 3249 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal( 3250 BdrvChild *src, int64_t src_offset, BdrvChild *dst, 3251 int64_t dst_offset, int64_t bytes, 3252 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3253 bool recurse_src) 3254 { 3255 BdrvTrackedRequest req; 3256 int ret; 3257 assert_bdrv_graph_readable(); 3258 3259 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3260 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3261 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3262 assert(!(read_flags & BDRV_REQ_NO_WAIT)); 3263 assert(!(write_flags & BDRV_REQ_NO_WAIT)); 3264 3265 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { 3266 return -ENOMEDIUM; 3267 } 3268 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0); 3269 if (ret) { 3270 return ret; 3271 } 3272 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3273 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3274 } 3275 3276 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { 3277 return -ENOMEDIUM; 3278 } 3279 ret = bdrv_check_request32(src_offset, bytes, NULL, 0); 3280 if (ret) { 3281 return ret; 3282 } 3283 3284 if (!src->bs->drv->bdrv_co_copy_range_from 3285 || !dst->bs->drv->bdrv_co_copy_range_to 3286 || src->bs->encrypted || dst->bs->encrypted) { 3287 return -ENOTSUP; 3288 } 3289 3290 if (recurse_src) { 3291 bdrv_inc_in_flight(src->bs); 3292 tracked_request_begin(&req, src->bs, src_offset, bytes, 3293 BDRV_TRACKED_READ); 3294 3295 /* BDRV_REQ_SERIALISING is only for write operation */ 3296 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3297 bdrv_wait_serialising_requests(&req); 3298 3299 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3300 src, src_offset, 3301 dst, dst_offset, 3302 bytes, 3303 read_flags, write_flags); 3304 3305 tracked_request_end(&req); 3306 bdrv_dec_in_flight(src->bs); 3307 } else { 3308 bdrv_inc_in_flight(dst->bs); 3309 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3310 BDRV_TRACKED_WRITE); 3311 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3312 write_flags); 3313 if (!ret) { 3314 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3315 src, src_offset, 3316 dst, dst_offset, 3317 bytes, 3318 read_flags, write_flags); 3319 } 3320 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3321 tracked_request_end(&req); 3322 bdrv_dec_in_flight(dst->bs); 3323 } 3324 3325 return ret; 3326 } 3327 3328 /* Copy range from @src to @dst. 3329 * 3330 * See the comment of bdrv_co_copy_range for the parameter and return value 3331 * semantics. */ 3332 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, 3333 BdrvChild *dst, int64_t dst_offset, 3334 int64_t bytes, 3335 BdrvRequestFlags read_flags, 3336 BdrvRequestFlags write_flags) 3337 { 3338 IO_CODE(); 3339 assert_bdrv_graph_readable(); 3340 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3341 read_flags, write_flags); 3342 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3343 bytes, read_flags, write_flags, true); 3344 } 3345 3346 /* Copy range from @src to @dst. 3347 * 3348 * See the comment of bdrv_co_copy_range for the parameter and return value 3349 * semantics. */ 3350 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, 3351 BdrvChild *dst, int64_t dst_offset, 3352 int64_t bytes, 3353 BdrvRequestFlags read_flags, 3354 BdrvRequestFlags write_flags) 3355 { 3356 IO_CODE(); 3357 assert_bdrv_graph_readable(); 3358 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3359 read_flags, write_flags); 3360 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3361 bytes, read_flags, write_flags, false); 3362 } 3363 3364 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, 3365 BdrvChild *dst, int64_t dst_offset, 3366 int64_t bytes, BdrvRequestFlags read_flags, 3367 BdrvRequestFlags write_flags) 3368 { 3369 IO_CODE(); 3370 assert_bdrv_graph_readable(); 3371 3372 return bdrv_co_copy_range_from(src, src_offset, 3373 dst, dst_offset, 3374 bytes, read_flags, write_flags); 3375 } 3376 3377 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3378 { 3379 BdrvChild *c; 3380 QLIST_FOREACH(c, &bs->parents, next_parent) { 3381 if (c->klass->resize) { 3382 c->klass->resize(c); 3383 } 3384 } 3385 } 3386 3387 /** 3388 * Truncate file to 'offset' bytes (needed only for file protocols) 3389 * 3390 * If 'exact' is true, the file must be resized to exactly the given 3391 * 'offset'. Otherwise, it is sufficient for the node to be at least 3392 * 'offset' bytes in length. 3393 */ 3394 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3395 PreallocMode prealloc, BdrvRequestFlags flags, 3396 Error **errp) 3397 { 3398 BlockDriverState *bs = child->bs; 3399 BdrvChild *filtered, *backing; 3400 BlockDriver *drv = bs->drv; 3401 BdrvTrackedRequest req; 3402 int64_t old_size, new_bytes; 3403 int ret; 3404 IO_CODE(); 3405 assert_bdrv_graph_readable(); 3406 3407 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3408 if (!drv) { 3409 error_setg(errp, "No medium inserted"); 3410 return -ENOMEDIUM; 3411 } 3412 if (offset < 0) { 3413 error_setg(errp, "Image size cannot be negative"); 3414 return -EINVAL; 3415 } 3416 3417 ret = bdrv_check_request(offset, 0, errp); 3418 if (ret < 0) { 3419 return ret; 3420 } 3421 3422 old_size = bdrv_getlength(bs); 3423 if (old_size < 0) { 3424 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3425 return old_size; 3426 } 3427 3428 if (bdrv_is_read_only(bs)) { 3429 error_setg(errp, "Image is read-only"); 3430 return -EACCES; 3431 } 3432 3433 if (offset > old_size) { 3434 new_bytes = offset - old_size; 3435 } else { 3436 new_bytes = 0; 3437 } 3438 3439 bdrv_inc_in_flight(bs); 3440 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3441 BDRV_TRACKED_TRUNCATE); 3442 3443 /* If we are growing the image and potentially using preallocation for the 3444 * new area, we need to make sure that no write requests are made to it 3445 * concurrently or they might be overwritten by preallocation. */ 3446 if (new_bytes) { 3447 bdrv_make_request_serialising(&req, 1); 3448 } 3449 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3450 0); 3451 if (ret < 0) { 3452 error_setg_errno(errp, -ret, 3453 "Failed to prepare request for truncation"); 3454 goto out; 3455 } 3456 3457 filtered = bdrv_filter_child(bs); 3458 backing = bdrv_cow_child(bs); 3459 3460 /* 3461 * If the image has a backing file that is large enough that it would 3462 * provide data for the new area, we cannot leave it unallocated because 3463 * then the backing file content would become visible. Instead, zero-fill 3464 * the new area. 3465 * 3466 * Note that if the image has a backing file, but was opened without the 3467 * backing file, taking care of keeping things consistent with that backing 3468 * file is the user's responsibility. 3469 */ 3470 if (new_bytes && backing) { 3471 int64_t backing_len; 3472 3473 backing_len = bdrv_co_getlength(backing->bs); 3474 if (backing_len < 0) { 3475 ret = backing_len; 3476 error_setg_errno(errp, -ret, "Could not get backing file size"); 3477 goto out; 3478 } 3479 3480 if (backing_len > old_size) { 3481 flags |= BDRV_REQ_ZERO_WRITE; 3482 } 3483 } 3484 3485 if (drv->bdrv_co_truncate) { 3486 if (flags & ~bs->supported_truncate_flags) { 3487 error_setg(errp, "Block driver does not support requested flags"); 3488 ret = -ENOTSUP; 3489 goto out; 3490 } 3491 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3492 } else if (filtered) { 3493 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3494 } else { 3495 error_setg(errp, "Image format driver does not support resize"); 3496 ret = -ENOTSUP; 3497 goto out; 3498 } 3499 if (ret < 0) { 3500 goto out; 3501 } 3502 3503 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3504 if (ret < 0) { 3505 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3506 } else { 3507 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3508 } 3509 /* 3510 * It's possible that truncation succeeded but bdrv_refresh_total_sectors 3511 * failed, but the latter doesn't affect how we should finish the request. 3512 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. 3513 */ 3514 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3515 3516 out: 3517 tracked_request_end(&req); 3518 bdrv_dec_in_flight(bs); 3519 3520 return ret; 3521 } 3522 3523 void bdrv_cancel_in_flight(BlockDriverState *bs) 3524 { 3525 GLOBAL_STATE_CODE(); 3526 if (!bs || !bs->drv) { 3527 return; 3528 } 3529 3530 if (bs->drv->bdrv_cancel_in_flight) { 3531 bs->drv->bdrv_cancel_in_flight(bs); 3532 } 3533 } 3534 3535 int coroutine_fn 3536 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, 3537 QEMUIOVector *qiov, size_t qiov_offset) 3538 { 3539 BlockDriverState *bs = child->bs; 3540 BlockDriver *drv = bs->drv; 3541 int ret; 3542 IO_CODE(); 3543 assert_bdrv_graph_readable(); 3544 3545 if (!drv) { 3546 return -ENOMEDIUM; 3547 } 3548 3549 if (!drv->bdrv_co_preadv_snapshot) { 3550 return -ENOTSUP; 3551 } 3552 3553 bdrv_inc_in_flight(bs); 3554 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); 3555 bdrv_dec_in_flight(bs); 3556 3557 return ret; 3558 } 3559 3560 int coroutine_fn 3561 bdrv_co_snapshot_block_status(BlockDriverState *bs, 3562 bool want_zero, int64_t offset, int64_t bytes, 3563 int64_t *pnum, int64_t *map, 3564 BlockDriverState **file) 3565 { 3566 BlockDriver *drv = bs->drv; 3567 int ret; 3568 IO_CODE(); 3569 assert_bdrv_graph_readable(); 3570 3571 if (!drv) { 3572 return -ENOMEDIUM; 3573 } 3574 3575 if (!drv->bdrv_co_snapshot_block_status) { 3576 return -ENOTSUP; 3577 } 3578 3579 bdrv_inc_in_flight(bs); 3580 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, 3581 pnum, map, file); 3582 bdrv_dec_in_flight(bs); 3583 3584 return ret; 3585 } 3586 3587 int coroutine_fn 3588 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) 3589 { 3590 BlockDriver *drv = bs->drv; 3591 int ret; 3592 IO_CODE(); 3593 assert_bdrv_graph_readable(); 3594 3595 if (!drv) { 3596 return -ENOMEDIUM; 3597 } 3598 3599 if (!drv->bdrv_co_pdiscard_snapshot) { 3600 return -ENOTSUP; 3601 } 3602 3603 bdrv_inc_in_flight(bs); 3604 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); 3605 bdrv_dec_in_flight(bs); 3606 3607 return ret; 3608 } 3609