1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "block/dirty-bitmap.h" 34 #include "block/write-threshold.h" 35 #include "qemu/cutils.h" 36 #include "qemu/memalign.h" 37 #include "qapi/error.h" 38 #include "qemu/error-report.h" 39 #include "qemu/main-loop.h" 40 #include "sysemu/replay.h" 41 42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 44 45 static void bdrv_parent_cb_resize(BlockDriverState *bs); 46 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 47 int64_t offset, int64_t bytes, BdrvRequestFlags flags); 48 49 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) 50 { 51 BdrvChild *c, *next; 52 53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 54 if (c == ignore) { 55 continue; 56 } 57 bdrv_parent_drained_begin_single(c); 58 } 59 } 60 61 void bdrv_parent_drained_end_single(BdrvChild *c) 62 { 63 IO_OR_GS_CODE(); 64 65 assert(c->quiesced_parent); 66 c->quiesced_parent = false; 67 68 if (c->klass->drained_end) { 69 c->klass->drained_end(c); 70 } 71 } 72 73 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) 74 { 75 BdrvChild *c; 76 77 QLIST_FOREACH(c, &bs->parents, next_parent) { 78 if (c == ignore) { 79 continue; 80 } 81 bdrv_parent_drained_end_single(c); 82 } 83 } 84 85 bool bdrv_parent_drained_poll_single(BdrvChild *c) 86 { 87 if (c->klass->drained_poll) { 88 return c->klass->drained_poll(c); 89 } 90 return false; 91 } 92 93 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 94 bool ignore_bds_parents) 95 { 96 BdrvChild *c, *next; 97 bool busy = false; 98 99 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 100 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 101 continue; 102 } 103 busy |= bdrv_parent_drained_poll_single(c); 104 } 105 106 return busy; 107 } 108 109 void bdrv_parent_drained_begin_single(BdrvChild *c) 110 { 111 IO_OR_GS_CODE(); 112 113 assert(!c->quiesced_parent); 114 c->quiesced_parent = true; 115 116 if (c->klass->drained_begin) { 117 c->klass->drained_begin(c); 118 } 119 } 120 121 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 122 { 123 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment, 124 src->pdiscard_alignment); 125 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 126 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 127 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer, 128 src->max_hw_transfer); 129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 130 src->opt_mem_alignment); 131 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 132 src->min_mem_alignment); 133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 134 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); 135 } 136 137 typedef struct BdrvRefreshLimitsState { 138 BlockDriverState *bs; 139 BlockLimits old_bl; 140 } BdrvRefreshLimitsState; 141 142 static void bdrv_refresh_limits_abort(void *opaque) 143 { 144 BdrvRefreshLimitsState *s = opaque; 145 146 s->bs->bl = s->old_bl; 147 } 148 149 static TransactionActionDrv bdrv_refresh_limits_drv = { 150 .abort = bdrv_refresh_limits_abort, 151 .clean = g_free, 152 }; 153 154 /* @tran is allowed to be NULL, in this case no rollback is possible. */ 155 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) 156 { 157 ERRP_GUARD(); 158 BlockDriver *drv = bs->drv; 159 BdrvChild *c; 160 bool have_limits; 161 162 GLOBAL_STATE_CODE(); 163 assume_graph_lock(); /* FIXME */ 164 165 if (tran) { 166 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); 167 *s = (BdrvRefreshLimitsState) { 168 .bs = bs, 169 .old_bl = bs->bl, 170 }; 171 tran_add(tran, &bdrv_refresh_limits_drv, s); 172 } 173 174 memset(&bs->bl, 0, sizeof(bs->bl)); 175 176 if (!drv) { 177 return; 178 } 179 180 /* Default alignment based on whether driver has byte interface */ 181 bs->bl.request_alignment = (drv->bdrv_co_preadv || 182 drv->bdrv_aio_preadv || 183 drv->bdrv_co_preadv_part) ? 1 : 512; 184 185 /* Take some limits from the children as a default */ 186 have_limits = false; 187 QLIST_FOREACH(c, &bs->children, next) { 188 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 189 { 190 bdrv_merge_limits(&bs->bl, &c->bs->bl); 191 have_limits = true; 192 } 193 194 if (c->role & BDRV_CHILD_FILTERED) { 195 bs->bl.has_variable_length |= c->bs->bl.has_variable_length; 196 } 197 } 198 199 if (!have_limits) { 200 bs->bl.min_mem_alignment = 512; 201 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); 202 203 /* Safe default since most protocols use readv()/writev()/etc */ 204 bs->bl.max_iov = IOV_MAX; 205 } 206 207 /* Then let the driver override it */ 208 if (drv->bdrv_refresh_limits) { 209 drv->bdrv_refresh_limits(bs, errp); 210 if (*errp) { 211 return; 212 } 213 } 214 215 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 216 error_setg(errp, "Driver requires too large request alignment"); 217 } 218 } 219 220 /** 221 * The copy-on-read flag is actually a reference count so multiple users may 222 * use the feature without worrying about clobbering its previous state. 223 * Copy-on-read stays enabled until all users have called to disable it. 224 */ 225 void bdrv_enable_copy_on_read(BlockDriverState *bs) 226 { 227 IO_CODE(); 228 qatomic_inc(&bs->copy_on_read); 229 } 230 231 void bdrv_disable_copy_on_read(BlockDriverState *bs) 232 { 233 int old = qatomic_fetch_dec(&bs->copy_on_read); 234 IO_CODE(); 235 assert(old >= 1); 236 } 237 238 typedef struct { 239 Coroutine *co; 240 BlockDriverState *bs; 241 bool done; 242 bool begin; 243 bool poll; 244 BdrvChild *parent; 245 } BdrvCoDrainData; 246 247 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 248 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, 249 bool ignore_bds_parents) 250 { 251 IO_OR_GS_CODE(); 252 253 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 254 return true; 255 } 256 257 if (qatomic_read(&bs->in_flight)) { 258 return true; 259 } 260 261 return false; 262 } 263 264 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, 265 BdrvChild *ignore_parent) 266 { 267 return bdrv_drain_poll(bs, ignore_parent, false); 268 } 269 270 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 271 bool poll); 272 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent); 273 274 static void bdrv_co_drain_bh_cb(void *opaque) 275 { 276 BdrvCoDrainData *data = opaque; 277 Coroutine *co = data->co; 278 BlockDriverState *bs = data->bs; 279 280 if (bs) { 281 AioContext *ctx = bdrv_get_aio_context(bs); 282 aio_context_acquire(ctx); 283 bdrv_dec_in_flight(bs); 284 if (data->begin) { 285 bdrv_do_drained_begin(bs, data->parent, data->poll); 286 } else { 287 assert(!data->poll); 288 bdrv_do_drained_end(bs, data->parent); 289 } 290 aio_context_release(ctx); 291 } else { 292 assert(data->begin); 293 bdrv_drain_all_begin(); 294 } 295 296 data->done = true; 297 aio_co_wake(co); 298 } 299 300 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 301 bool begin, 302 BdrvChild *parent, 303 bool poll) 304 { 305 BdrvCoDrainData data; 306 Coroutine *self = qemu_coroutine_self(); 307 AioContext *ctx = bdrv_get_aio_context(bs); 308 AioContext *co_ctx = qemu_coroutine_get_aio_context(self); 309 310 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 311 * other coroutines run if they were queued by aio_co_enter(). */ 312 313 assert(qemu_in_coroutine()); 314 data = (BdrvCoDrainData) { 315 .co = self, 316 .bs = bs, 317 .done = false, 318 .begin = begin, 319 .parent = parent, 320 .poll = poll, 321 }; 322 323 if (bs) { 324 bdrv_inc_in_flight(bs); 325 } 326 327 /* 328 * Temporarily drop the lock across yield or we would get deadlocks. 329 * bdrv_co_drain_bh_cb() reaquires the lock as needed. 330 * 331 * When we yield below, the lock for the current context will be 332 * released, so if this is actually the lock that protects bs, don't drop 333 * it a second time. 334 */ 335 if (ctx != co_ctx) { 336 aio_context_release(ctx); 337 } 338 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); 339 340 qemu_coroutine_yield(); 341 /* If we are resumed from some other event (such as an aio completion or a 342 * timer callback), it is a bug in the caller that should be fixed. */ 343 assert(data.done); 344 345 /* Reaquire the AioContext of bs if we dropped it */ 346 if (ctx != co_ctx) { 347 aio_context_acquire(ctx); 348 } 349 } 350 351 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 352 bool poll) 353 { 354 IO_OR_GS_CODE(); 355 356 if (qemu_in_coroutine()) { 357 bdrv_co_yield_to_drain(bs, true, parent, poll); 358 return; 359 } 360 361 /* Stop things in parent-to-child order */ 362 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 363 aio_disable_external(bdrv_get_aio_context(bs)); 364 bdrv_parent_drained_begin(bs, parent); 365 if (bs->drv && bs->drv->bdrv_drain_begin) { 366 bs->drv->bdrv_drain_begin(bs); 367 } 368 } 369 370 /* 371 * Wait for drained requests to finish. 372 * 373 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 374 * call is needed so things in this AioContext can make progress even 375 * though we don't return to the main AioContext loop - this automatically 376 * includes other nodes in the same AioContext and therefore all child 377 * nodes. 378 */ 379 if (poll) { 380 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); 381 } 382 } 383 384 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent) 385 { 386 bdrv_do_drained_begin(bs, parent, false); 387 } 388 389 void bdrv_drained_begin(BlockDriverState *bs) 390 { 391 IO_OR_GS_CODE(); 392 bdrv_do_drained_begin(bs, NULL, true); 393 } 394 395 /** 396 * This function does not poll, nor must any of its recursively called 397 * functions. 398 */ 399 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) 400 { 401 int old_quiesce_counter; 402 403 if (qemu_in_coroutine()) { 404 bdrv_co_yield_to_drain(bs, false, parent, false); 405 return; 406 } 407 assert(bs->quiesce_counter > 0); 408 409 /* Re-enable things in child-to-parent order */ 410 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 411 if (old_quiesce_counter == 1) { 412 if (bs->drv && bs->drv->bdrv_drain_end) { 413 bs->drv->bdrv_drain_end(bs); 414 } 415 bdrv_parent_drained_end(bs, parent); 416 aio_enable_external(bdrv_get_aio_context(bs)); 417 } 418 } 419 420 void bdrv_drained_end(BlockDriverState *bs) 421 { 422 IO_OR_GS_CODE(); 423 bdrv_do_drained_end(bs, NULL); 424 } 425 426 void bdrv_drain(BlockDriverState *bs) 427 { 428 IO_OR_GS_CODE(); 429 bdrv_drained_begin(bs); 430 bdrv_drained_end(bs); 431 } 432 433 static void bdrv_drain_assert_idle(BlockDriverState *bs) 434 { 435 BdrvChild *child, *next; 436 437 assert(qatomic_read(&bs->in_flight) == 0); 438 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 439 bdrv_drain_assert_idle(child->bs); 440 } 441 } 442 443 unsigned int bdrv_drain_all_count = 0; 444 445 static bool bdrv_drain_all_poll(void) 446 { 447 BlockDriverState *bs = NULL; 448 bool result = false; 449 GLOBAL_STATE_CODE(); 450 451 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 452 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 453 while ((bs = bdrv_next_all_states(bs))) { 454 AioContext *aio_context = bdrv_get_aio_context(bs); 455 aio_context_acquire(aio_context); 456 result |= bdrv_drain_poll(bs, NULL, true); 457 aio_context_release(aio_context); 458 } 459 460 return result; 461 } 462 463 /* 464 * Wait for pending requests to complete across all BlockDriverStates 465 * 466 * This function does not flush data to disk, use bdrv_flush_all() for that 467 * after calling this function. 468 * 469 * This pauses all block jobs and disables external clients. It must 470 * be paired with bdrv_drain_all_end(). 471 * 472 * NOTE: no new block jobs or BlockDriverStates can be created between 473 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 474 */ 475 void bdrv_drain_all_begin_nopoll(void) 476 { 477 BlockDriverState *bs = NULL; 478 GLOBAL_STATE_CODE(); 479 480 /* 481 * bdrv queue is managed by record/replay, 482 * waiting for finishing the I/O requests may 483 * be infinite 484 */ 485 if (replay_events_enabled()) { 486 return; 487 } 488 489 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 490 * loop AioContext, so make sure we're in the main context. */ 491 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 492 assert(bdrv_drain_all_count < INT_MAX); 493 bdrv_drain_all_count++; 494 495 /* Quiesce all nodes, without polling in-flight requests yet. The graph 496 * cannot change during this loop. */ 497 while ((bs = bdrv_next_all_states(bs))) { 498 AioContext *aio_context = bdrv_get_aio_context(bs); 499 500 aio_context_acquire(aio_context); 501 bdrv_do_drained_begin(bs, NULL, false); 502 aio_context_release(aio_context); 503 } 504 } 505 506 void bdrv_drain_all_begin(void) 507 { 508 BlockDriverState *bs = NULL; 509 510 if (qemu_in_coroutine()) { 511 bdrv_co_yield_to_drain(NULL, true, NULL, true); 512 return; 513 } 514 515 /* 516 * bdrv queue is managed by record/replay, 517 * waiting for finishing the I/O requests may 518 * be infinite 519 */ 520 if (replay_events_enabled()) { 521 return; 522 } 523 524 bdrv_drain_all_begin_nopoll(); 525 526 /* Now poll the in-flight requests */ 527 AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll()); 528 529 while ((bs = bdrv_next_all_states(bs))) { 530 bdrv_drain_assert_idle(bs); 531 } 532 } 533 534 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 535 { 536 GLOBAL_STATE_CODE(); 537 538 g_assert(bs->quiesce_counter > 0); 539 g_assert(!bs->refcnt); 540 541 while (bs->quiesce_counter) { 542 bdrv_do_drained_end(bs, NULL); 543 } 544 } 545 546 void bdrv_drain_all_end(void) 547 { 548 BlockDriverState *bs = NULL; 549 GLOBAL_STATE_CODE(); 550 551 /* 552 * bdrv queue is managed by record/replay, 553 * waiting for finishing the I/O requests may 554 * be endless 555 */ 556 if (replay_events_enabled()) { 557 return; 558 } 559 560 while ((bs = bdrv_next_all_states(bs))) { 561 AioContext *aio_context = bdrv_get_aio_context(bs); 562 563 aio_context_acquire(aio_context); 564 bdrv_do_drained_end(bs, NULL); 565 aio_context_release(aio_context); 566 } 567 568 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 569 assert(bdrv_drain_all_count > 0); 570 bdrv_drain_all_count--; 571 } 572 573 void bdrv_drain_all(void) 574 { 575 GLOBAL_STATE_CODE(); 576 bdrv_drain_all_begin(); 577 bdrv_drain_all_end(); 578 } 579 580 /** 581 * Remove an active request from the tracked requests list 582 * 583 * This function should be called when a tracked request is completing. 584 */ 585 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) 586 { 587 if (req->serialising) { 588 qatomic_dec(&req->bs->serialising_in_flight); 589 } 590 591 qemu_co_mutex_lock(&req->bs->reqs_lock); 592 QLIST_REMOVE(req, list); 593 qemu_co_queue_restart_all(&req->wait_queue); 594 qemu_co_mutex_unlock(&req->bs->reqs_lock); 595 } 596 597 /** 598 * Add an active request to the tracked requests list 599 */ 600 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, 601 BlockDriverState *bs, 602 int64_t offset, 603 int64_t bytes, 604 enum BdrvTrackedRequestType type) 605 { 606 bdrv_check_request(offset, bytes, &error_abort); 607 608 *req = (BdrvTrackedRequest){ 609 .bs = bs, 610 .offset = offset, 611 .bytes = bytes, 612 .type = type, 613 .co = qemu_coroutine_self(), 614 .serialising = false, 615 .overlap_offset = offset, 616 .overlap_bytes = bytes, 617 }; 618 619 qemu_co_queue_init(&req->wait_queue); 620 621 qemu_co_mutex_lock(&bs->reqs_lock); 622 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 623 qemu_co_mutex_unlock(&bs->reqs_lock); 624 } 625 626 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 627 int64_t offset, int64_t bytes) 628 { 629 bdrv_check_request(offset, bytes, &error_abort); 630 631 /* aaaa bbbb */ 632 if (offset >= req->overlap_offset + req->overlap_bytes) { 633 return false; 634 } 635 /* bbbb aaaa */ 636 if (req->overlap_offset >= offset + bytes) { 637 return false; 638 } 639 return true; 640 } 641 642 /* Called with self->bs->reqs_lock held */ 643 static coroutine_fn BdrvTrackedRequest * 644 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 645 { 646 BdrvTrackedRequest *req; 647 648 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 649 if (req == self || (!req->serialising && !self->serialising)) { 650 continue; 651 } 652 if (tracked_request_overlaps(req, self->overlap_offset, 653 self->overlap_bytes)) 654 { 655 /* 656 * Hitting this means there was a reentrant request, for 657 * example, a block driver issuing nested requests. This must 658 * never happen since it means deadlock. 659 */ 660 assert(qemu_coroutine_self() != req->co); 661 662 /* 663 * If the request is already (indirectly) waiting for us, or 664 * will wait for us as soon as it wakes up, then just go on 665 * (instead of producing a deadlock in the former case). 666 */ 667 if (!req->waiting_for) { 668 return req; 669 } 670 } 671 } 672 673 return NULL; 674 } 675 676 /* Called with self->bs->reqs_lock held */ 677 static void coroutine_fn 678 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 679 { 680 BdrvTrackedRequest *req; 681 682 while ((req = bdrv_find_conflicting_request(self))) { 683 self->waiting_for = req; 684 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 685 self->waiting_for = NULL; 686 } 687 } 688 689 /* Called with req->bs->reqs_lock held */ 690 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 691 uint64_t align) 692 { 693 int64_t overlap_offset = req->offset & ~(align - 1); 694 int64_t overlap_bytes = 695 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; 696 697 bdrv_check_request(req->offset, req->bytes, &error_abort); 698 699 if (!req->serialising) { 700 qatomic_inc(&req->bs->serialising_in_flight); 701 req->serialising = true; 702 } 703 704 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 705 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 706 } 707 708 /** 709 * Return the tracked request on @bs for the current coroutine, or 710 * NULL if there is none. 711 */ 712 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 713 { 714 BdrvTrackedRequest *req; 715 Coroutine *self = qemu_coroutine_self(); 716 IO_CODE(); 717 718 QLIST_FOREACH(req, &bs->tracked_requests, list) { 719 if (req->co == self) { 720 return req; 721 } 722 } 723 724 return NULL; 725 } 726 727 /** 728 * Round a region to cluster boundaries 729 */ 730 void coroutine_fn bdrv_round_to_clusters(BlockDriverState *bs, 731 int64_t offset, int64_t bytes, 732 int64_t *cluster_offset, 733 int64_t *cluster_bytes) 734 { 735 BlockDriverInfo bdi; 736 IO_CODE(); 737 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 738 *cluster_offset = offset; 739 *cluster_bytes = bytes; 740 } else { 741 int64_t c = bdi.cluster_size; 742 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 743 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 744 } 745 } 746 747 static coroutine_fn int bdrv_get_cluster_size(BlockDriverState *bs) 748 { 749 BlockDriverInfo bdi; 750 int ret; 751 752 ret = bdrv_co_get_info(bs, &bdi); 753 if (ret < 0 || bdi.cluster_size == 0) { 754 return bs->bl.request_alignment; 755 } else { 756 return bdi.cluster_size; 757 } 758 } 759 760 void bdrv_inc_in_flight(BlockDriverState *bs) 761 { 762 IO_CODE(); 763 qatomic_inc(&bs->in_flight); 764 } 765 766 void bdrv_wakeup(BlockDriverState *bs) 767 { 768 IO_CODE(); 769 aio_wait_kick(); 770 } 771 772 void bdrv_dec_in_flight(BlockDriverState *bs) 773 { 774 IO_CODE(); 775 qatomic_dec(&bs->in_flight); 776 bdrv_wakeup(bs); 777 } 778 779 static void coroutine_fn 780 bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 781 { 782 BlockDriverState *bs = self->bs; 783 784 if (!qatomic_read(&bs->serialising_in_flight)) { 785 return; 786 } 787 788 qemu_co_mutex_lock(&bs->reqs_lock); 789 bdrv_wait_serialising_requests_locked(self); 790 qemu_co_mutex_unlock(&bs->reqs_lock); 791 } 792 793 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 794 uint64_t align) 795 { 796 IO_CODE(); 797 798 qemu_co_mutex_lock(&req->bs->reqs_lock); 799 800 tracked_request_set_serialising(req, align); 801 bdrv_wait_serialising_requests_locked(req); 802 803 qemu_co_mutex_unlock(&req->bs->reqs_lock); 804 } 805 806 int bdrv_check_qiov_request(int64_t offset, int64_t bytes, 807 QEMUIOVector *qiov, size_t qiov_offset, 808 Error **errp) 809 { 810 /* 811 * Check generic offset/bytes correctness 812 */ 813 814 if (offset < 0) { 815 error_setg(errp, "offset is negative: %" PRIi64, offset); 816 return -EIO; 817 } 818 819 if (bytes < 0) { 820 error_setg(errp, "bytes is negative: %" PRIi64, bytes); 821 return -EIO; 822 } 823 824 if (bytes > BDRV_MAX_LENGTH) { 825 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 826 bytes, BDRV_MAX_LENGTH); 827 return -EIO; 828 } 829 830 if (offset > BDRV_MAX_LENGTH) { 831 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 832 offset, BDRV_MAX_LENGTH); 833 return -EIO; 834 } 835 836 if (offset > BDRV_MAX_LENGTH - bytes) { 837 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") " 838 "exceeds maximum(%" PRIi64 ")", offset, bytes, 839 BDRV_MAX_LENGTH); 840 return -EIO; 841 } 842 843 if (!qiov) { 844 return 0; 845 } 846 847 /* 848 * Check qiov and qiov_offset 849 */ 850 851 if (qiov_offset > qiov->size) { 852 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)", 853 qiov_offset, qiov->size); 854 return -EIO; 855 } 856 857 if (bytes > qiov->size - qiov_offset) { 858 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io " 859 "vector size(%zu)", bytes, qiov_offset, qiov->size); 860 return -EIO; 861 } 862 863 return 0; 864 } 865 866 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp) 867 { 868 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp); 869 } 870 871 static int bdrv_check_request32(int64_t offset, int64_t bytes, 872 QEMUIOVector *qiov, size_t qiov_offset) 873 { 874 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 875 if (ret < 0) { 876 return ret; 877 } 878 879 if (bytes > BDRV_REQUEST_MAX_BYTES) { 880 return -EIO; 881 } 882 883 return 0; 884 } 885 886 /* 887 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 888 * The operation is sped up by checking the block status and only writing 889 * zeroes to the device if they currently do not return zeroes. Optional 890 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 891 * BDRV_REQ_FUA). 892 * 893 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 894 */ 895 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 896 { 897 int ret; 898 int64_t target_size, bytes, offset = 0; 899 BlockDriverState *bs = child->bs; 900 IO_CODE(); 901 902 target_size = bdrv_getlength(bs); 903 if (target_size < 0) { 904 return target_size; 905 } 906 907 for (;;) { 908 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 909 if (bytes <= 0) { 910 return 0; 911 } 912 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 913 if (ret < 0) { 914 return ret; 915 } 916 if (ret & BDRV_BLOCK_ZERO) { 917 offset += bytes; 918 continue; 919 } 920 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 921 if (ret < 0) { 922 return ret; 923 } 924 offset += bytes; 925 } 926 } 927 928 /* 929 * Writes to the file and ensures that no writes are reordered across this 930 * request (acts as a barrier) 931 * 932 * Returns 0 on success, -errno in error cases. 933 */ 934 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, 935 int64_t bytes, const void *buf, 936 BdrvRequestFlags flags) 937 { 938 int ret; 939 IO_CODE(); 940 assert_bdrv_graph_readable(); 941 942 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags); 943 if (ret < 0) { 944 return ret; 945 } 946 947 ret = bdrv_co_flush(child->bs); 948 if (ret < 0) { 949 return ret; 950 } 951 952 return 0; 953 } 954 955 typedef struct CoroutineIOCompletion { 956 Coroutine *coroutine; 957 int ret; 958 } CoroutineIOCompletion; 959 960 static void bdrv_co_io_em_complete(void *opaque, int ret) 961 { 962 CoroutineIOCompletion *co = opaque; 963 964 co->ret = ret; 965 aio_co_wake(co->coroutine); 966 } 967 968 static int coroutine_fn GRAPH_RDLOCK 969 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 970 QEMUIOVector *qiov, size_t qiov_offset, int flags) 971 { 972 BlockDriver *drv = bs->drv; 973 int64_t sector_num; 974 unsigned int nb_sectors; 975 QEMUIOVector local_qiov; 976 int ret; 977 assert_bdrv_graph_readable(); 978 979 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 980 assert(!(flags & ~bs->supported_read_flags)); 981 982 if (!drv) { 983 return -ENOMEDIUM; 984 } 985 986 if (drv->bdrv_co_preadv_part) { 987 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 988 flags); 989 } 990 991 if (qiov_offset > 0 || bytes != qiov->size) { 992 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 993 qiov = &local_qiov; 994 } 995 996 if (drv->bdrv_co_preadv) { 997 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 998 goto out; 999 } 1000 1001 if (drv->bdrv_aio_preadv) { 1002 BlockAIOCB *acb; 1003 CoroutineIOCompletion co = { 1004 .coroutine = qemu_coroutine_self(), 1005 }; 1006 1007 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1008 bdrv_co_io_em_complete, &co); 1009 if (acb == NULL) { 1010 ret = -EIO; 1011 goto out; 1012 } else { 1013 qemu_coroutine_yield(); 1014 ret = co.ret; 1015 goto out; 1016 } 1017 } 1018 1019 sector_num = offset >> BDRV_SECTOR_BITS; 1020 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1021 1022 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1023 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1024 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1025 assert(drv->bdrv_co_readv); 1026 1027 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1028 1029 out: 1030 if (qiov == &local_qiov) { 1031 qemu_iovec_destroy(&local_qiov); 1032 } 1033 1034 return ret; 1035 } 1036 1037 static int coroutine_fn GRAPH_RDLOCK 1038 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, 1039 QEMUIOVector *qiov, size_t qiov_offset, 1040 BdrvRequestFlags flags) 1041 { 1042 BlockDriver *drv = bs->drv; 1043 bool emulate_fua = false; 1044 int64_t sector_num; 1045 unsigned int nb_sectors; 1046 QEMUIOVector local_qiov; 1047 int ret; 1048 assert_bdrv_graph_readable(); 1049 1050 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1051 1052 if (!drv) { 1053 return -ENOMEDIUM; 1054 } 1055 1056 if ((flags & BDRV_REQ_FUA) && 1057 (~bs->supported_write_flags & BDRV_REQ_FUA)) { 1058 flags &= ~BDRV_REQ_FUA; 1059 emulate_fua = true; 1060 } 1061 1062 flags &= bs->supported_write_flags; 1063 1064 if (drv->bdrv_co_pwritev_part) { 1065 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1066 flags); 1067 goto emulate_flags; 1068 } 1069 1070 if (qiov_offset > 0 || bytes != qiov->size) { 1071 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1072 qiov = &local_qiov; 1073 } 1074 1075 if (drv->bdrv_co_pwritev) { 1076 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); 1077 goto emulate_flags; 1078 } 1079 1080 if (drv->bdrv_aio_pwritev) { 1081 BlockAIOCB *acb; 1082 CoroutineIOCompletion co = { 1083 .coroutine = qemu_coroutine_self(), 1084 }; 1085 1086 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, 1087 bdrv_co_io_em_complete, &co); 1088 if (acb == NULL) { 1089 ret = -EIO; 1090 } else { 1091 qemu_coroutine_yield(); 1092 ret = co.ret; 1093 } 1094 goto emulate_flags; 1095 } 1096 1097 sector_num = offset >> BDRV_SECTOR_BITS; 1098 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1099 1100 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1101 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1102 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1103 1104 assert(drv->bdrv_co_writev); 1105 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); 1106 1107 emulate_flags: 1108 if (ret == 0 && emulate_fua) { 1109 ret = bdrv_co_flush(bs); 1110 } 1111 1112 if (qiov == &local_qiov) { 1113 qemu_iovec_destroy(&local_qiov); 1114 } 1115 1116 return ret; 1117 } 1118 1119 static int coroutine_fn GRAPH_RDLOCK 1120 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, 1121 int64_t bytes, QEMUIOVector *qiov, 1122 size_t qiov_offset) 1123 { 1124 BlockDriver *drv = bs->drv; 1125 QEMUIOVector local_qiov; 1126 int ret; 1127 assert_bdrv_graph_readable(); 1128 1129 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1130 1131 if (!drv) { 1132 return -ENOMEDIUM; 1133 } 1134 1135 if (!block_driver_can_compress(drv)) { 1136 return -ENOTSUP; 1137 } 1138 1139 if (drv->bdrv_co_pwritev_compressed_part) { 1140 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1141 qiov, qiov_offset); 1142 } 1143 1144 if (qiov_offset == 0) { 1145 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1146 } 1147 1148 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1149 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1150 qemu_iovec_destroy(&local_qiov); 1151 1152 return ret; 1153 } 1154 1155 static int coroutine_fn GRAPH_RDLOCK 1156 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, 1157 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1158 { 1159 BlockDriverState *bs = child->bs; 1160 1161 /* Perform I/O through a temporary buffer so that users who scribble over 1162 * their read buffer while the operation is in progress do not end up 1163 * modifying the image file. This is critical for zero-copy guest I/O 1164 * where anything might happen inside guest memory. 1165 */ 1166 void *bounce_buffer = NULL; 1167 1168 BlockDriver *drv = bs->drv; 1169 int64_t cluster_offset; 1170 int64_t cluster_bytes; 1171 int64_t skip_bytes; 1172 int ret; 1173 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1174 BDRV_REQUEST_MAX_BYTES); 1175 int64_t progress = 0; 1176 bool skip_write; 1177 1178 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1179 1180 if (!drv) { 1181 return -ENOMEDIUM; 1182 } 1183 1184 /* 1185 * Do not write anything when the BDS is inactive. That is not 1186 * allowed, and it would not help. 1187 */ 1188 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1189 1190 /* FIXME We cannot require callers to have write permissions when all they 1191 * are doing is a read request. If we did things right, write permissions 1192 * would be obtained anyway, but internally by the copy-on-read code. As 1193 * long as it is implemented here rather than in a separate filter driver, 1194 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1195 * it could request permissions. Therefore we have to bypass the permission 1196 * system for the moment. */ 1197 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1198 1199 /* Cover entire cluster so no additional backing file I/O is required when 1200 * allocating cluster in the image file. Note that this value may exceed 1201 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1202 * is one reason we loop rather than doing it all at once. 1203 */ 1204 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1205 skip_bytes = offset - cluster_offset; 1206 1207 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1208 cluster_offset, cluster_bytes); 1209 1210 while (cluster_bytes) { 1211 int64_t pnum; 1212 1213 if (skip_write) { 1214 ret = 1; /* "already allocated", so nothing will be copied */ 1215 pnum = MIN(cluster_bytes, max_transfer); 1216 } else { 1217 ret = bdrv_is_allocated(bs, cluster_offset, 1218 MIN(cluster_bytes, max_transfer), &pnum); 1219 if (ret < 0) { 1220 /* 1221 * Safe to treat errors in querying allocation as if 1222 * unallocated; we'll probably fail again soon on the 1223 * read, but at least that will set a decent errno. 1224 */ 1225 pnum = MIN(cluster_bytes, max_transfer); 1226 } 1227 1228 /* Stop at EOF if the image ends in the middle of the cluster */ 1229 if (ret == 0 && pnum == 0) { 1230 assert(progress >= bytes); 1231 break; 1232 } 1233 1234 assert(skip_bytes < pnum); 1235 } 1236 1237 if (ret <= 0) { 1238 QEMUIOVector local_qiov; 1239 1240 /* Must copy-on-read; use the bounce buffer */ 1241 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1242 if (!bounce_buffer) { 1243 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1244 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1245 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1246 1247 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1248 if (!bounce_buffer) { 1249 ret = -ENOMEM; 1250 goto err; 1251 } 1252 } 1253 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1254 1255 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1256 &local_qiov, 0, 0); 1257 if (ret < 0) { 1258 goto err; 1259 } 1260 1261 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE); 1262 if (drv->bdrv_co_pwrite_zeroes && 1263 buffer_is_zero(bounce_buffer, pnum)) { 1264 /* FIXME: Should we (perhaps conditionally) be setting 1265 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1266 * that still correctly reads as zero? */ 1267 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1268 BDRV_REQ_WRITE_UNCHANGED); 1269 } else { 1270 /* This does not change the data on the disk, it is not 1271 * necessary to flush even in cache=writethrough mode. 1272 */ 1273 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1274 &local_qiov, 0, 1275 BDRV_REQ_WRITE_UNCHANGED); 1276 } 1277 1278 if (ret < 0) { 1279 /* It might be okay to ignore write errors for guest 1280 * requests. If this is a deliberate copy-on-read 1281 * then we don't want to ignore the error. Simply 1282 * report it in all cases. 1283 */ 1284 goto err; 1285 } 1286 1287 if (!(flags & BDRV_REQ_PREFETCH)) { 1288 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1289 bounce_buffer + skip_bytes, 1290 MIN(pnum - skip_bytes, bytes - progress)); 1291 } 1292 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1293 /* Read directly into the destination */ 1294 ret = bdrv_driver_preadv(bs, offset + progress, 1295 MIN(pnum - skip_bytes, bytes - progress), 1296 qiov, qiov_offset + progress, 0); 1297 if (ret < 0) { 1298 goto err; 1299 } 1300 } 1301 1302 cluster_offset += pnum; 1303 cluster_bytes -= pnum; 1304 progress += pnum - skip_bytes; 1305 skip_bytes = 0; 1306 } 1307 ret = 0; 1308 1309 err: 1310 qemu_vfree(bounce_buffer); 1311 return ret; 1312 } 1313 1314 /* 1315 * Forwards an already correctly aligned request to the BlockDriver. This 1316 * handles copy on read, zeroing after EOF, and fragmentation of large 1317 * reads; any other features must be implemented by the caller. 1318 */ 1319 static int coroutine_fn GRAPH_RDLOCK 1320 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req, 1321 int64_t offset, int64_t bytes, int64_t align, 1322 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1323 { 1324 BlockDriverState *bs = child->bs; 1325 int64_t total_bytes, max_bytes; 1326 int ret = 0; 1327 int64_t bytes_remaining = bytes; 1328 int max_transfer; 1329 1330 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1331 assert(is_power_of_2(align)); 1332 assert((offset & (align - 1)) == 0); 1333 assert((bytes & (align - 1)) == 0); 1334 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1335 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1336 align); 1337 1338 /* 1339 * TODO: We would need a per-BDS .supported_read_flags and 1340 * potential fallback support, if we ever implement any read flags 1341 * to pass through to drivers. For now, there aren't any 1342 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint. 1343 */ 1344 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH | 1345 BDRV_REQ_REGISTERED_BUF))); 1346 1347 /* Handle Copy on Read and associated serialisation */ 1348 if (flags & BDRV_REQ_COPY_ON_READ) { 1349 /* If we touch the same cluster it counts as an overlap. This 1350 * guarantees that allocating writes will be serialized and not race 1351 * with each other for the same cluster. For example, in copy-on-read 1352 * it ensures that the CoR read and write operations are atomic and 1353 * guest writes cannot interleave between them. */ 1354 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1355 } else { 1356 bdrv_wait_serialising_requests(req); 1357 } 1358 1359 if (flags & BDRV_REQ_COPY_ON_READ) { 1360 int64_t pnum; 1361 1362 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */ 1363 flags &= ~BDRV_REQ_COPY_ON_READ; 1364 1365 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1366 if (ret < 0) { 1367 goto out; 1368 } 1369 1370 if (!ret || pnum != bytes) { 1371 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1372 qiov, qiov_offset, flags); 1373 goto out; 1374 } else if (flags & BDRV_REQ_PREFETCH) { 1375 goto out; 1376 } 1377 } 1378 1379 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1380 total_bytes = bdrv_getlength(bs); 1381 if (total_bytes < 0) { 1382 ret = total_bytes; 1383 goto out; 1384 } 1385 1386 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); 1387 1388 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1389 if (bytes <= max_bytes && bytes <= max_transfer) { 1390 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); 1391 goto out; 1392 } 1393 1394 while (bytes_remaining) { 1395 int64_t num; 1396 1397 if (max_bytes) { 1398 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1399 assert(num); 1400 1401 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1402 num, qiov, 1403 qiov_offset + bytes - bytes_remaining, 1404 flags); 1405 max_bytes -= num; 1406 } else { 1407 num = bytes_remaining; 1408 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1409 0, bytes_remaining); 1410 } 1411 if (ret < 0) { 1412 goto out; 1413 } 1414 bytes_remaining -= num; 1415 } 1416 1417 out: 1418 return ret < 0 ? ret : 0; 1419 } 1420 1421 /* 1422 * Request padding 1423 * 1424 * |<---- align ----->| |<----- align ---->| 1425 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1426 * | | | | | | 1427 * -*----------$-------*-------- ... --------*-----$------------*--- 1428 * | | | | | | 1429 * | offset | | end | 1430 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1431 * [buf ... ) [tail_buf ) 1432 * 1433 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1434 * is placed at the beginning of @buf and @tail at the @end. 1435 * 1436 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1437 * around tail, if tail exists. 1438 * 1439 * @merge_reads is true for small requests, 1440 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1441 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1442 */ 1443 typedef struct BdrvRequestPadding { 1444 uint8_t *buf; 1445 size_t buf_len; 1446 uint8_t *tail_buf; 1447 size_t head; 1448 size_t tail; 1449 bool merge_reads; 1450 QEMUIOVector local_qiov; 1451 } BdrvRequestPadding; 1452 1453 static bool bdrv_init_padding(BlockDriverState *bs, 1454 int64_t offset, int64_t bytes, 1455 BdrvRequestPadding *pad) 1456 { 1457 int64_t align = bs->bl.request_alignment; 1458 int64_t sum; 1459 1460 bdrv_check_request(offset, bytes, &error_abort); 1461 assert(align <= INT_MAX); /* documented in block/block_int.h */ 1462 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */ 1463 1464 memset(pad, 0, sizeof(*pad)); 1465 1466 pad->head = offset & (align - 1); 1467 pad->tail = ((offset + bytes) & (align - 1)); 1468 if (pad->tail) { 1469 pad->tail = align - pad->tail; 1470 } 1471 1472 if (!pad->head && !pad->tail) { 1473 return false; 1474 } 1475 1476 assert(bytes); /* Nothing good in aligning zero-length requests */ 1477 1478 sum = pad->head + bytes + pad->tail; 1479 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1480 pad->buf = qemu_blockalign(bs, pad->buf_len); 1481 pad->merge_reads = sum == pad->buf_len; 1482 if (pad->tail) { 1483 pad->tail_buf = pad->buf + pad->buf_len - align; 1484 } 1485 1486 return true; 1487 } 1488 1489 static int coroutine_fn GRAPH_RDLOCK 1490 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req, 1491 BdrvRequestPadding *pad, bool zero_middle) 1492 { 1493 QEMUIOVector local_qiov; 1494 BlockDriverState *bs = child->bs; 1495 uint64_t align = bs->bl.request_alignment; 1496 int ret; 1497 1498 assert(req->serialising && pad->buf); 1499 1500 if (pad->head || pad->merge_reads) { 1501 int64_t bytes = pad->merge_reads ? pad->buf_len : align; 1502 1503 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1504 1505 if (pad->head) { 1506 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1507 } 1508 if (pad->merge_reads && pad->tail) { 1509 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1510 } 1511 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1512 align, &local_qiov, 0, 0); 1513 if (ret < 0) { 1514 return ret; 1515 } 1516 if (pad->head) { 1517 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1518 } 1519 if (pad->merge_reads && pad->tail) { 1520 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1521 } 1522 1523 if (pad->merge_reads) { 1524 goto zero_mem; 1525 } 1526 } 1527 1528 if (pad->tail) { 1529 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1530 1531 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1532 ret = bdrv_aligned_preadv( 1533 child, req, 1534 req->overlap_offset + req->overlap_bytes - align, 1535 align, align, &local_qiov, 0, 0); 1536 if (ret < 0) { 1537 return ret; 1538 } 1539 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1540 } 1541 1542 zero_mem: 1543 if (zero_middle) { 1544 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1545 } 1546 1547 return 0; 1548 } 1549 1550 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1551 { 1552 if (pad->buf) { 1553 qemu_vfree(pad->buf); 1554 qemu_iovec_destroy(&pad->local_qiov); 1555 } 1556 memset(pad, 0, sizeof(*pad)); 1557 } 1558 1559 /* 1560 * bdrv_pad_request 1561 * 1562 * Exchange request parameters with padded request if needed. Don't include RMW 1563 * read of padding, bdrv_padding_rmw_read() should be called separately if 1564 * needed. 1565 * 1566 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: 1567 * - on function start they represent original request 1568 * - on failure or when padding is not needed they are unchanged 1569 * - on success when padding is needed they represent padded request 1570 */ 1571 static int bdrv_pad_request(BlockDriverState *bs, 1572 QEMUIOVector **qiov, size_t *qiov_offset, 1573 int64_t *offset, int64_t *bytes, 1574 BdrvRequestPadding *pad, bool *padded, 1575 BdrvRequestFlags *flags) 1576 { 1577 int ret; 1578 1579 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort); 1580 1581 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1582 if (padded) { 1583 *padded = false; 1584 } 1585 return 0; 1586 } 1587 1588 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1589 *qiov, *qiov_offset, *bytes, 1590 pad->buf + pad->buf_len - pad->tail, 1591 pad->tail); 1592 if (ret < 0) { 1593 bdrv_padding_destroy(pad); 1594 return ret; 1595 } 1596 *bytes += pad->head + pad->tail; 1597 *offset -= pad->head; 1598 *qiov = &pad->local_qiov; 1599 *qiov_offset = 0; 1600 if (padded) { 1601 *padded = true; 1602 } 1603 if (flags) { 1604 /* Can't use optimization hint with bounce buffer */ 1605 *flags &= ~BDRV_REQ_REGISTERED_BUF; 1606 } 1607 1608 return 0; 1609 } 1610 1611 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1612 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1613 BdrvRequestFlags flags) 1614 { 1615 IO_CODE(); 1616 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1617 } 1618 1619 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1620 int64_t offset, int64_t bytes, 1621 QEMUIOVector *qiov, size_t qiov_offset, 1622 BdrvRequestFlags flags) 1623 { 1624 BlockDriverState *bs = child->bs; 1625 BdrvTrackedRequest req; 1626 BdrvRequestPadding pad; 1627 int ret; 1628 IO_CODE(); 1629 1630 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); 1631 1632 if (!bdrv_co_is_inserted(bs)) { 1633 return -ENOMEDIUM; 1634 } 1635 1636 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 1637 if (ret < 0) { 1638 return ret; 1639 } 1640 1641 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1642 /* 1643 * Aligning zero request is nonsense. Even if driver has special meaning 1644 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1645 * it to driver due to request_alignment. 1646 * 1647 * Still, no reason to return an error if someone do unaligned 1648 * zero-length read occasionally. 1649 */ 1650 return 0; 1651 } 1652 1653 bdrv_inc_in_flight(bs); 1654 1655 /* Don't do copy-on-read if we read data before write operation */ 1656 if (qatomic_read(&bs->copy_on_read)) { 1657 flags |= BDRV_REQ_COPY_ON_READ; 1658 } 1659 1660 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 1661 NULL, &flags); 1662 if (ret < 0) { 1663 goto fail; 1664 } 1665 1666 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1667 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1668 bs->bl.request_alignment, 1669 qiov, qiov_offset, flags); 1670 tracked_request_end(&req); 1671 bdrv_padding_destroy(&pad); 1672 1673 fail: 1674 bdrv_dec_in_flight(bs); 1675 1676 return ret; 1677 } 1678 1679 static int coroutine_fn GRAPH_RDLOCK 1680 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 1681 BdrvRequestFlags flags) 1682 { 1683 BlockDriver *drv = bs->drv; 1684 QEMUIOVector qiov; 1685 void *buf = NULL; 1686 int ret = 0; 1687 bool need_flush = false; 1688 int head = 0; 1689 int tail = 0; 1690 1691 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, 1692 INT64_MAX); 1693 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1694 bs->bl.request_alignment); 1695 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1696 1697 assert_bdrv_graph_readable(); 1698 bdrv_check_request(offset, bytes, &error_abort); 1699 1700 if (!drv) { 1701 return -ENOMEDIUM; 1702 } 1703 1704 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1705 return -ENOTSUP; 1706 } 1707 1708 /* By definition there is no user buffer so this flag doesn't make sense */ 1709 if (flags & BDRV_REQ_REGISTERED_BUF) { 1710 return -EINVAL; 1711 } 1712 1713 /* Invalidate the cached block-status data range if this write overlaps */ 1714 bdrv_bsc_invalidate_range(bs, offset, bytes); 1715 1716 assert(alignment % bs->bl.request_alignment == 0); 1717 head = offset % alignment; 1718 tail = (offset + bytes) % alignment; 1719 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1720 assert(max_write_zeroes >= bs->bl.request_alignment); 1721 1722 while (bytes > 0 && !ret) { 1723 int64_t num = bytes; 1724 1725 /* Align request. Block drivers can expect the "bulk" of the request 1726 * to be aligned, and that unaligned requests do not cross cluster 1727 * boundaries. 1728 */ 1729 if (head) { 1730 /* Make a small request up to the first aligned sector. For 1731 * convenience, limit this request to max_transfer even if 1732 * we don't need to fall back to writes. */ 1733 num = MIN(MIN(bytes, max_transfer), alignment - head); 1734 head = (head + num) % alignment; 1735 assert(num < max_write_zeroes); 1736 } else if (tail && num > alignment) { 1737 /* Shorten the request to the last aligned sector. */ 1738 num -= tail; 1739 } 1740 1741 /* limit request size */ 1742 if (num > max_write_zeroes) { 1743 num = max_write_zeroes; 1744 } 1745 1746 ret = -ENOTSUP; 1747 /* First try the efficient write zeroes operation */ 1748 if (drv->bdrv_co_pwrite_zeroes) { 1749 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1750 flags & bs->supported_zero_flags); 1751 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1752 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1753 need_flush = true; 1754 } 1755 } else { 1756 assert(!bs->supported_zero_flags); 1757 } 1758 1759 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1760 /* Fall back to bounce buffer if write zeroes is unsupported */ 1761 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1762 1763 if ((flags & BDRV_REQ_FUA) && 1764 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1765 /* No need for bdrv_driver_pwrite() to do a fallback 1766 * flush on each chunk; use just one at the end */ 1767 write_flags &= ~BDRV_REQ_FUA; 1768 need_flush = true; 1769 } 1770 num = MIN(num, max_transfer); 1771 if (buf == NULL) { 1772 buf = qemu_try_blockalign0(bs, num); 1773 if (buf == NULL) { 1774 ret = -ENOMEM; 1775 goto fail; 1776 } 1777 } 1778 qemu_iovec_init_buf(&qiov, buf, num); 1779 1780 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1781 1782 /* Keep bounce buffer around if it is big enough for all 1783 * all future requests. 1784 */ 1785 if (num < max_transfer) { 1786 qemu_vfree(buf); 1787 buf = NULL; 1788 } 1789 } 1790 1791 offset += num; 1792 bytes -= num; 1793 } 1794 1795 fail: 1796 if (ret == 0 && need_flush) { 1797 ret = bdrv_co_flush(bs); 1798 } 1799 qemu_vfree(buf); 1800 return ret; 1801 } 1802 1803 static inline int coroutine_fn 1804 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, 1805 BdrvTrackedRequest *req, int flags) 1806 { 1807 BlockDriverState *bs = child->bs; 1808 1809 bdrv_check_request(offset, bytes, &error_abort); 1810 1811 if (bdrv_is_read_only(bs)) { 1812 return -EPERM; 1813 } 1814 1815 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1816 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1817 assert(!(flags & ~BDRV_REQ_MASK)); 1818 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 1819 1820 if (flags & BDRV_REQ_SERIALISING) { 1821 QEMU_LOCK_GUARD(&bs->reqs_lock); 1822 1823 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 1824 1825 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 1826 return -EBUSY; 1827 } 1828 1829 bdrv_wait_serialising_requests_locked(req); 1830 } else { 1831 bdrv_wait_serialising_requests(req); 1832 } 1833 1834 assert(req->overlap_offset <= offset); 1835 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1836 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || 1837 child->perm & BLK_PERM_RESIZE); 1838 1839 switch (req->type) { 1840 case BDRV_TRACKED_WRITE: 1841 case BDRV_TRACKED_DISCARD: 1842 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1843 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1844 } else { 1845 assert(child->perm & BLK_PERM_WRITE); 1846 } 1847 bdrv_write_threshold_check_write(bs, offset, bytes); 1848 return 0; 1849 case BDRV_TRACKED_TRUNCATE: 1850 assert(child->perm & BLK_PERM_RESIZE); 1851 return 0; 1852 default: 1853 abort(); 1854 } 1855 } 1856 1857 static inline void coroutine_fn 1858 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, 1859 BdrvTrackedRequest *req, int ret) 1860 { 1861 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1862 BlockDriverState *bs = child->bs; 1863 1864 bdrv_check_request(offset, bytes, &error_abort); 1865 1866 qatomic_inc(&bs->write_gen); 1867 1868 /* 1869 * Discard cannot extend the image, but in error handling cases, such as 1870 * when reverting a qcow2 cluster allocation, the discarded range can pass 1871 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1872 * here. Instead, just skip it, since semantically a discard request 1873 * beyond EOF cannot expand the image anyway. 1874 */ 1875 if (ret == 0 && 1876 (req->type == BDRV_TRACKED_TRUNCATE || 1877 end_sector > bs->total_sectors) && 1878 req->type != BDRV_TRACKED_DISCARD) { 1879 bs->total_sectors = end_sector; 1880 bdrv_parent_cb_resize(bs); 1881 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1882 } 1883 if (req->bytes) { 1884 switch (req->type) { 1885 case BDRV_TRACKED_WRITE: 1886 stat64_max(&bs->wr_highest_offset, offset + bytes); 1887 /* fall through, to set dirty bits */ 1888 case BDRV_TRACKED_DISCARD: 1889 bdrv_set_dirty(bs, offset, bytes); 1890 break; 1891 default: 1892 break; 1893 } 1894 } 1895 } 1896 1897 /* 1898 * Forwards an already correctly aligned write request to the BlockDriver, 1899 * after possibly fragmenting it. 1900 */ 1901 static int coroutine_fn GRAPH_RDLOCK 1902 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req, 1903 int64_t offset, int64_t bytes, int64_t align, 1904 QEMUIOVector *qiov, size_t qiov_offset, 1905 BdrvRequestFlags flags) 1906 { 1907 BlockDriverState *bs = child->bs; 1908 BlockDriver *drv = bs->drv; 1909 int ret; 1910 1911 int64_t bytes_remaining = bytes; 1912 int max_transfer; 1913 1914 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1915 1916 if (!drv) { 1917 return -ENOMEDIUM; 1918 } 1919 1920 if (bdrv_has_readonly_bitmaps(bs)) { 1921 return -EPERM; 1922 } 1923 1924 assert(is_power_of_2(align)); 1925 assert((offset & (align - 1)) == 0); 1926 assert((bytes & (align - 1)) == 0); 1927 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1928 align); 1929 1930 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1931 1932 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1933 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1934 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 1935 flags |= BDRV_REQ_ZERO_WRITE; 1936 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1937 flags |= BDRV_REQ_MAY_UNMAP; 1938 } 1939 1940 /* Can't use optimization hint with bufferless zero write */ 1941 flags &= ~BDRV_REQ_REGISTERED_BUF; 1942 } 1943 1944 if (ret < 0) { 1945 /* Do nothing, write notifier decided to fail this request */ 1946 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1947 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1948 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1949 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1950 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 1951 qiov, qiov_offset); 1952 } else if (bytes <= max_transfer) { 1953 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 1954 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 1955 } else { 1956 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 1957 while (bytes_remaining) { 1958 int num = MIN(bytes_remaining, max_transfer); 1959 int local_flags = flags; 1960 1961 assert(num); 1962 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1963 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1964 /* If FUA is going to be emulated by flush, we only 1965 * need to flush on the last iteration */ 1966 local_flags &= ~BDRV_REQ_FUA; 1967 } 1968 1969 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 1970 num, qiov, 1971 qiov_offset + bytes - bytes_remaining, 1972 local_flags); 1973 if (ret < 0) { 1974 break; 1975 } 1976 bytes_remaining -= num; 1977 } 1978 } 1979 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE); 1980 1981 if (ret >= 0) { 1982 ret = 0; 1983 } 1984 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 1985 1986 return ret; 1987 } 1988 1989 static int coroutine_fn GRAPH_RDLOCK 1990 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes, 1991 BdrvRequestFlags flags, BdrvTrackedRequest *req) 1992 { 1993 BlockDriverState *bs = child->bs; 1994 QEMUIOVector local_qiov; 1995 uint64_t align = bs->bl.request_alignment; 1996 int ret = 0; 1997 bool padding; 1998 BdrvRequestPadding pad; 1999 2000 /* This flag doesn't make sense for padding or zero writes */ 2001 flags &= ~BDRV_REQ_REGISTERED_BUF; 2002 2003 padding = bdrv_init_padding(bs, offset, bytes, &pad); 2004 if (padding) { 2005 assert(!(flags & BDRV_REQ_NO_WAIT)); 2006 bdrv_make_request_serialising(req, align); 2007 2008 bdrv_padding_rmw_read(child, req, &pad, true); 2009 2010 if (pad.head || pad.merge_reads) { 2011 int64_t aligned_offset = offset & ~(align - 1); 2012 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2013 2014 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2015 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2016 align, &local_qiov, 0, 2017 flags & ~BDRV_REQ_ZERO_WRITE); 2018 if (ret < 0 || pad.merge_reads) { 2019 /* Error or all work is done */ 2020 goto out; 2021 } 2022 offset += write_bytes - pad.head; 2023 bytes -= write_bytes - pad.head; 2024 } 2025 } 2026 2027 assert(!bytes || (offset & (align - 1)) == 0); 2028 if (bytes >= align) { 2029 /* Write the aligned part in the middle. */ 2030 int64_t aligned_bytes = bytes & ~(align - 1); 2031 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2032 NULL, 0, flags); 2033 if (ret < 0) { 2034 goto out; 2035 } 2036 bytes -= aligned_bytes; 2037 offset += aligned_bytes; 2038 } 2039 2040 assert(!bytes || (offset & (align - 1)) == 0); 2041 if (bytes) { 2042 assert(align == pad.tail + bytes); 2043 2044 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2045 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2046 &local_qiov, 0, 2047 flags & ~BDRV_REQ_ZERO_WRITE); 2048 } 2049 2050 out: 2051 bdrv_padding_destroy(&pad); 2052 2053 return ret; 2054 } 2055 2056 /* 2057 * Handle a write request in coroutine context 2058 */ 2059 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2060 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 2061 BdrvRequestFlags flags) 2062 { 2063 IO_CODE(); 2064 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2065 } 2066 2067 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2068 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, 2069 BdrvRequestFlags flags) 2070 { 2071 BlockDriverState *bs = child->bs; 2072 BdrvTrackedRequest req; 2073 uint64_t align = bs->bl.request_alignment; 2074 BdrvRequestPadding pad; 2075 int ret; 2076 bool padded = false; 2077 IO_CODE(); 2078 2079 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); 2080 2081 if (!bdrv_co_is_inserted(bs)) { 2082 return -ENOMEDIUM; 2083 } 2084 2085 if (flags & BDRV_REQ_ZERO_WRITE) { 2086 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 2087 } else { 2088 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 2089 } 2090 if (ret < 0) { 2091 return ret; 2092 } 2093 2094 /* If the request is misaligned then we can't make it efficient */ 2095 if ((flags & BDRV_REQ_NO_FALLBACK) && 2096 !QEMU_IS_ALIGNED(offset | bytes, align)) 2097 { 2098 return -ENOTSUP; 2099 } 2100 2101 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2102 /* 2103 * Aligning zero request is nonsense. Even if driver has special meaning 2104 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2105 * it to driver due to request_alignment. 2106 * 2107 * Still, no reason to return an error if someone do unaligned 2108 * zero-length write occasionally. 2109 */ 2110 return 0; 2111 } 2112 2113 if (!(flags & BDRV_REQ_ZERO_WRITE)) { 2114 /* 2115 * Pad request for following read-modify-write cycle. 2116 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do 2117 * alignment only if there is no ZERO flag. 2118 */ 2119 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, 2120 &padded, &flags); 2121 if (ret < 0) { 2122 return ret; 2123 } 2124 } 2125 2126 bdrv_inc_in_flight(bs); 2127 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2128 2129 if (flags & BDRV_REQ_ZERO_WRITE) { 2130 assert(!padded); 2131 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2132 goto out; 2133 } 2134 2135 if (padded) { 2136 /* 2137 * Request was unaligned to request_alignment and therefore 2138 * padded. We are going to do read-modify-write, and must 2139 * serialize the request to prevent interactions of the 2140 * widened region with other transactions. 2141 */ 2142 assert(!(flags & BDRV_REQ_NO_WAIT)); 2143 bdrv_make_request_serialising(&req, align); 2144 bdrv_padding_rmw_read(child, &req, &pad, false); 2145 } 2146 2147 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2148 qiov, qiov_offset, flags); 2149 2150 bdrv_padding_destroy(&pad); 2151 2152 out: 2153 tracked_request_end(&req); 2154 bdrv_dec_in_flight(bs); 2155 2156 return ret; 2157 } 2158 2159 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2160 int64_t bytes, BdrvRequestFlags flags) 2161 { 2162 IO_CODE(); 2163 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2164 assert_bdrv_graph_readable(); 2165 2166 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2167 flags &= ~BDRV_REQ_MAY_UNMAP; 2168 } 2169 2170 return bdrv_co_pwritev(child, offset, bytes, NULL, 2171 BDRV_REQ_ZERO_WRITE | flags); 2172 } 2173 2174 /* 2175 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2176 */ 2177 int bdrv_flush_all(void) 2178 { 2179 BdrvNextIterator it; 2180 BlockDriverState *bs = NULL; 2181 int result = 0; 2182 2183 GLOBAL_STATE_CODE(); 2184 2185 /* 2186 * bdrv queue is managed by record/replay, 2187 * creating new flush request for stopping 2188 * the VM may break the determinism 2189 */ 2190 if (replay_events_enabled()) { 2191 return result; 2192 } 2193 2194 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2195 AioContext *aio_context = bdrv_get_aio_context(bs); 2196 int ret; 2197 2198 aio_context_acquire(aio_context); 2199 ret = bdrv_flush(bs); 2200 if (ret < 0 && !result) { 2201 result = ret; 2202 } 2203 aio_context_release(aio_context); 2204 } 2205 2206 return result; 2207 } 2208 2209 /* 2210 * Returns the allocation status of the specified sectors. 2211 * Drivers not implementing the functionality are assumed to not support 2212 * backing files, hence all their sectors are reported as allocated. 2213 * 2214 * If 'want_zero' is true, the caller is querying for mapping 2215 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2216 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2217 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2218 * 2219 * If 'offset' is beyond the end of the disk image the return value is 2220 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2221 * 2222 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2223 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2224 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2225 * 2226 * 'pnum' is set to the number of bytes (including and immediately 2227 * following the specified offset) that are easily known to be in the 2228 * same allocated/unallocated state. Note that a second call starting 2229 * at the original offset plus returned pnum may have the same status. 2230 * The returned value is non-zero on success except at end-of-file. 2231 * 2232 * Returns negative errno on failure. Otherwise, if the 2233 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2234 * set to the host mapping and BDS corresponding to the guest offset. 2235 */ 2236 static int coroutine_fn GRAPH_RDLOCK 2237 bdrv_co_block_status(BlockDriverState *bs, bool want_zero, 2238 int64_t offset, int64_t bytes, 2239 int64_t *pnum, int64_t *map, BlockDriverState **file) 2240 { 2241 int64_t total_size; 2242 int64_t n; /* bytes */ 2243 int ret; 2244 int64_t local_map = 0; 2245 BlockDriverState *local_file = NULL; 2246 int64_t aligned_offset, aligned_bytes; 2247 uint32_t align; 2248 bool has_filtered_child; 2249 2250 assert(pnum); 2251 assert_bdrv_graph_readable(); 2252 *pnum = 0; 2253 total_size = bdrv_getlength(bs); 2254 if (total_size < 0) { 2255 ret = total_size; 2256 goto early_out; 2257 } 2258 2259 if (offset >= total_size) { 2260 ret = BDRV_BLOCK_EOF; 2261 goto early_out; 2262 } 2263 if (!bytes) { 2264 ret = 0; 2265 goto early_out; 2266 } 2267 2268 n = total_size - offset; 2269 if (n < bytes) { 2270 bytes = n; 2271 } 2272 2273 /* Must be non-NULL or bdrv_getlength() would have failed */ 2274 assert(bs->drv); 2275 has_filtered_child = bdrv_filter_child(bs); 2276 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2277 *pnum = bytes; 2278 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2279 if (offset + bytes == total_size) { 2280 ret |= BDRV_BLOCK_EOF; 2281 } 2282 if (bs->drv->protocol_name) { 2283 ret |= BDRV_BLOCK_OFFSET_VALID; 2284 local_map = offset; 2285 local_file = bs; 2286 } 2287 goto early_out; 2288 } 2289 2290 bdrv_inc_in_flight(bs); 2291 2292 /* Round out to request_alignment boundaries */ 2293 align = bs->bl.request_alignment; 2294 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2295 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2296 2297 if (bs->drv->bdrv_co_block_status) { 2298 /* 2299 * Use the block-status cache only for protocol nodes: Format 2300 * drivers are generally quick to inquire the status, but protocol 2301 * drivers often need to get information from outside of qemu, so 2302 * we do not have control over the actual implementation. There 2303 * have been cases where inquiring the status took an unreasonably 2304 * long time, and we can do nothing in qemu to fix it. 2305 * This is especially problematic for images with large data areas, 2306 * because finding the few holes in them and giving them special 2307 * treatment does not gain much performance. Therefore, we try to 2308 * cache the last-identified data region. 2309 * 2310 * Second, limiting ourselves to protocol nodes allows us to assume 2311 * the block status for data regions to be DATA | OFFSET_VALID, and 2312 * that the host offset is the same as the guest offset. 2313 * 2314 * Note that it is possible that external writers zero parts of 2315 * the cached regions without the cache being invalidated, and so 2316 * we may report zeroes as data. This is not catastrophic, 2317 * however, because reporting zeroes as data is fine. 2318 */ 2319 if (QLIST_EMPTY(&bs->children) && 2320 bdrv_bsc_is_data(bs, aligned_offset, pnum)) 2321 { 2322 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2323 local_file = bs; 2324 local_map = aligned_offset; 2325 } else { 2326 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2327 aligned_bytes, pnum, &local_map, 2328 &local_file); 2329 2330 /* 2331 * Note that checking QLIST_EMPTY(&bs->children) is also done when 2332 * the cache is queried above. Technically, we do not need to check 2333 * it here; the worst that can happen is that we fill the cache for 2334 * non-protocol nodes, and then it is never used. However, filling 2335 * the cache requires an RCU update, so double check here to avoid 2336 * such an update if possible. 2337 * 2338 * Check want_zero, because we only want to update the cache when we 2339 * have accurate information about what is zero and what is data. 2340 */ 2341 if (want_zero && 2342 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && 2343 QLIST_EMPTY(&bs->children)) 2344 { 2345 /* 2346 * When a protocol driver reports BLOCK_OFFSET_VALID, the 2347 * returned local_map value must be the same as the offset we 2348 * have passed (aligned_offset), and local_bs must be the node 2349 * itself. 2350 * Assert this, because we follow this rule when reading from 2351 * the cache (see the `local_file = bs` and 2352 * `local_map = aligned_offset` assignments above), and the 2353 * result the cache delivers must be the same as the driver 2354 * would deliver. 2355 */ 2356 assert(local_file == bs); 2357 assert(local_map == aligned_offset); 2358 bdrv_bsc_fill(bs, aligned_offset, *pnum); 2359 } 2360 } 2361 } else { 2362 /* Default code for filters */ 2363 2364 local_file = bdrv_filter_bs(bs); 2365 assert(local_file); 2366 2367 *pnum = aligned_bytes; 2368 local_map = aligned_offset; 2369 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2370 } 2371 if (ret < 0) { 2372 *pnum = 0; 2373 goto out; 2374 } 2375 2376 /* 2377 * The driver's result must be a non-zero multiple of request_alignment. 2378 * Clamp pnum and adjust map to original request. 2379 */ 2380 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2381 align > offset - aligned_offset); 2382 if (ret & BDRV_BLOCK_RECURSE) { 2383 assert(ret & BDRV_BLOCK_DATA); 2384 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2385 assert(!(ret & BDRV_BLOCK_ZERO)); 2386 } 2387 2388 *pnum -= offset - aligned_offset; 2389 if (*pnum > bytes) { 2390 *pnum = bytes; 2391 } 2392 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2393 local_map += offset - aligned_offset; 2394 } 2395 2396 if (ret & BDRV_BLOCK_RAW) { 2397 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2398 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2399 *pnum, pnum, &local_map, &local_file); 2400 goto out; 2401 } 2402 2403 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2404 ret |= BDRV_BLOCK_ALLOCATED; 2405 } else if (bs->drv->supports_backing) { 2406 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2407 2408 if (!cow_bs) { 2409 ret |= BDRV_BLOCK_ZERO; 2410 } else if (want_zero) { 2411 int64_t size2 = bdrv_getlength(cow_bs); 2412 2413 if (size2 >= 0 && offset >= size2) { 2414 ret |= BDRV_BLOCK_ZERO; 2415 } 2416 } 2417 } 2418 2419 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2420 local_file && local_file != bs && 2421 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2422 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2423 int64_t file_pnum; 2424 int ret2; 2425 2426 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2427 *pnum, &file_pnum, NULL, NULL); 2428 if (ret2 >= 0) { 2429 /* Ignore errors. This is just providing extra information, it 2430 * is useful but not necessary. 2431 */ 2432 if (ret2 & BDRV_BLOCK_EOF && 2433 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2434 /* 2435 * It is valid for the format block driver to read 2436 * beyond the end of the underlying file's current 2437 * size; such areas read as zero. 2438 */ 2439 ret |= BDRV_BLOCK_ZERO; 2440 } else { 2441 /* Limit request to the range reported by the protocol driver */ 2442 *pnum = file_pnum; 2443 ret |= (ret2 & BDRV_BLOCK_ZERO); 2444 } 2445 } 2446 } 2447 2448 out: 2449 bdrv_dec_in_flight(bs); 2450 if (ret >= 0 && offset + *pnum == total_size) { 2451 ret |= BDRV_BLOCK_EOF; 2452 } 2453 early_out: 2454 if (file) { 2455 *file = local_file; 2456 } 2457 if (map) { 2458 *map = local_map; 2459 } 2460 return ret; 2461 } 2462 2463 int coroutine_fn 2464 bdrv_co_common_block_status_above(BlockDriverState *bs, 2465 BlockDriverState *base, 2466 bool include_base, 2467 bool want_zero, 2468 int64_t offset, 2469 int64_t bytes, 2470 int64_t *pnum, 2471 int64_t *map, 2472 BlockDriverState **file, 2473 int *depth) 2474 { 2475 int ret; 2476 BlockDriverState *p; 2477 int64_t eof = 0; 2478 int dummy; 2479 IO_CODE(); 2480 2481 assert(!include_base || base); /* Can't include NULL base */ 2482 assert_bdrv_graph_readable(); 2483 2484 if (!depth) { 2485 depth = &dummy; 2486 } 2487 *depth = 0; 2488 2489 if (!include_base && bs == base) { 2490 *pnum = bytes; 2491 return 0; 2492 } 2493 2494 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); 2495 ++*depth; 2496 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2497 return ret; 2498 } 2499 2500 if (ret & BDRV_BLOCK_EOF) { 2501 eof = offset + *pnum; 2502 } 2503 2504 assert(*pnum <= bytes); 2505 bytes = *pnum; 2506 2507 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2508 p = bdrv_filter_or_cow_bs(p)) 2509 { 2510 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2511 file); 2512 ++*depth; 2513 if (ret < 0) { 2514 return ret; 2515 } 2516 if (*pnum == 0) { 2517 /* 2518 * The top layer deferred to this layer, and because this layer is 2519 * short, any zeroes that we synthesize beyond EOF behave as if they 2520 * were allocated at this layer. 2521 * 2522 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2523 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2524 * below. 2525 */ 2526 assert(ret & BDRV_BLOCK_EOF); 2527 *pnum = bytes; 2528 if (file) { 2529 *file = p; 2530 } 2531 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2532 break; 2533 } 2534 if (ret & BDRV_BLOCK_ALLOCATED) { 2535 /* 2536 * We've found the node and the status, we must break. 2537 * 2538 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2539 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2540 * below. 2541 */ 2542 ret &= ~BDRV_BLOCK_EOF; 2543 break; 2544 } 2545 2546 if (p == base) { 2547 assert(include_base); 2548 break; 2549 } 2550 2551 /* 2552 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2553 * let's continue the diving. 2554 */ 2555 assert(*pnum <= bytes); 2556 bytes = *pnum; 2557 } 2558 2559 if (offset + *pnum == eof) { 2560 ret |= BDRV_BLOCK_EOF; 2561 } 2562 2563 return ret; 2564 } 2565 2566 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2567 BlockDriverState *base, 2568 int64_t offset, int64_t bytes, 2569 int64_t *pnum, int64_t *map, 2570 BlockDriverState **file) 2571 { 2572 IO_CODE(); 2573 return bdrv_co_common_block_status_above(bs, base, false, true, offset, 2574 bytes, pnum, map, file, NULL); 2575 } 2576 2577 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2578 int64_t offset, int64_t bytes, int64_t *pnum, 2579 int64_t *map, BlockDriverState **file) 2580 { 2581 IO_CODE(); 2582 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, 2583 pnum, map, file, NULL); 2584 } 2585 2586 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2587 int64_t *pnum, int64_t *map, BlockDriverState **file) 2588 { 2589 IO_CODE(); 2590 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2591 offset, bytes, pnum, map, file); 2592 } 2593 2594 /* 2595 * Check @bs (and its backing chain) to see if the range defined 2596 * by @offset and @bytes is known to read as zeroes. 2597 * Return 1 if that is the case, 0 otherwise and -errno on error. 2598 * This test is meant to be fast rather than accurate so returning 0 2599 * does not guarantee non-zero data. 2600 */ 2601 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2602 int64_t bytes) 2603 { 2604 int ret; 2605 int64_t pnum = bytes; 2606 IO_CODE(); 2607 2608 if (!bytes) { 2609 return 1; 2610 } 2611 2612 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, 2613 bytes, &pnum, NULL, NULL, NULL); 2614 2615 if (ret < 0) { 2616 return ret; 2617 } 2618 2619 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2620 } 2621 2622 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, 2623 int64_t bytes, int64_t *pnum) 2624 { 2625 int ret; 2626 int64_t dummy; 2627 IO_CODE(); 2628 2629 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, 2630 bytes, pnum ? pnum : &dummy, NULL, 2631 NULL, NULL); 2632 if (ret < 0) { 2633 return ret; 2634 } 2635 return !!(ret & BDRV_BLOCK_ALLOCATED); 2636 } 2637 2638 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, 2639 int64_t *pnum) 2640 { 2641 int ret; 2642 int64_t dummy; 2643 IO_CODE(); 2644 2645 ret = bdrv_common_block_status_above(bs, bs, true, false, offset, 2646 bytes, pnum ? pnum : &dummy, NULL, 2647 NULL, NULL); 2648 if (ret < 0) { 2649 return ret; 2650 } 2651 return !!(ret & BDRV_BLOCK_ALLOCATED); 2652 } 2653 2654 /* See bdrv_is_allocated_above for documentation */ 2655 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, 2656 BlockDriverState *base, 2657 bool include_base, int64_t offset, 2658 int64_t bytes, int64_t *pnum) 2659 { 2660 int depth; 2661 int ret; 2662 IO_CODE(); 2663 2664 ret = bdrv_co_common_block_status_above(top, base, include_base, false, 2665 offset, bytes, pnum, NULL, NULL, 2666 &depth); 2667 if (ret < 0) { 2668 return ret; 2669 } 2670 2671 if (ret & BDRV_BLOCK_ALLOCATED) { 2672 return depth; 2673 } 2674 return 0; 2675 } 2676 2677 /* 2678 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2679 * 2680 * Return a positive depth if (a prefix of) the given range is allocated 2681 * in any image between BASE and TOP (BASE is only included if include_base 2682 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2683 * BASE can be NULL to check if the given offset is allocated in any 2684 * image of the chain. Return 0 otherwise, or negative errno on 2685 * failure. 2686 * 2687 * 'pnum' is set to the number of bytes (including and immediately 2688 * following the specified offset) that are known to be in the same 2689 * allocated/unallocated state. Note that a subsequent call starting 2690 * at 'offset + *pnum' may return the same allocation status (in other 2691 * words, the result is not necessarily the maximum possible range); 2692 * but 'pnum' will only be 0 when end of file is reached. 2693 */ 2694 int bdrv_is_allocated_above(BlockDriverState *top, 2695 BlockDriverState *base, 2696 bool include_base, int64_t offset, 2697 int64_t bytes, int64_t *pnum) 2698 { 2699 int depth; 2700 int ret; 2701 IO_CODE(); 2702 2703 ret = bdrv_common_block_status_above(top, base, include_base, false, 2704 offset, bytes, pnum, NULL, NULL, 2705 &depth); 2706 if (ret < 0) { 2707 return ret; 2708 } 2709 2710 if (ret & BDRV_BLOCK_ALLOCATED) { 2711 return depth; 2712 } 2713 return 0; 2714 } 2715 2716 int coroutine_fn 2717 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2718 { 2719 BlockDriver *drv = bs->drv; 2720 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2721 int ret; 2722 IO_CODE(); 2723 assert_bdrv_graph_readable(); 2724 2725 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2726 if (ret < 0) { 2727 return ret; 2728 } 2729 2730 if (!drv) { 2731 return -ENOMEDIUM; 2732 } 2733 2734 bdrv_inc_in_flight(bs); 2735 2736 if (drv->bdrv_co_load_vmstate) { 2737 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos); 2738 } else if (child_bs) { 2739 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2740 } else { 2741 ret = -ENOTSUP; 2742 } 2743 2744 bdrv_dec_in_flight(bs); 2745 2746 return ret; 2747 } 2748 2749 int coroutine_fn 2750 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2751 { 2752 BlockDriver *drv = bs->drv; 2753 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2754 int ret; 2755 IO_CODE(); 2756 assert_bdrv_graph_readable(); 2757 2758 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2759 if (ret < 0) { 2760 return ret; 2761 } 2762 2763 if (!drv) { 2764 return -ENOMEDIUM; 2765 } 2766 2767 bdrv_inc_in_flight(bs); 2768 2769 if (drv->bdrv_co_save_vmstate) { 2770 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos); 2771 } else if (child_bs) { 2772 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2773 } else { 2774 ret = -ENOTSUP; 2775 } 2776 2777 bdrv_dec_in_flight(bs); 2778 2779 return ret; 2780 } 2781 2782 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2783 int64_t pos, int size) 2784 { 2785 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2786 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2787 IO_CODE(); 2788 2789 return ret < 0 ? ret : size; 2790 } 2791 2792 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2793 int64_t pos, int size) 2794 { 2795 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2796 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2797 IO_CODE(); 2798 2799 return ret < 0 ? ret : size; 2800 } 2801 2802 /**************************************************************/ 2803 /* async I/Os */ 2804 2805 void bdrv_aio_cancel(BlockAIOCB *acb) 2806 { 2807 IO_CODE(); 2808 qemu_aio_ref(acb); 2809 bdrv_aio_cancel_async(acb); 2810 while (acb->refcnt > 1) { 2811 if (acb->aiocb_info->get_aio_context) { 2812 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2813 } else if (acb->bs) { 2814 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2815 * assert that we're not using an I/O thread. Thread-safe 2816 * code should use bdrv_aio_cancel_async exclusively. 2817 */ 2818 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2819 aio_poll(bdrv_get_aio_context(acb->bs), true); 2820 } else { 2821 abort(); 2822 } 2823 } 2824 qemu_aio_unref(acb); 2825 } 2826 2827 /* Async version of aio cancel. The caller is not blocked if the acb implements 2828 * cancel_async, otherwise we do nothing and let the request normally complete. 2829 * In either case the completion callback must be called. */ 2830 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2831 { 2832 IO_CODE(); 2833 if (acb->aiocb_info->cancel_async) { 2834 acb->aiocb_info->cancel_async(acb); 2835 } 2836 } 2837 2838 /**************************************************************/ 2839 /* Coroutine block device emulation */ 2840 2841 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2842 { 2843 BdrvChild *primary_child = bdrv_primary_child(bs); 2844 BdrvChild *child; 2845 int current_gen; 2846 int ret = 0; 2847 IO_CODE(); 2848 2849 assert_bdrv_graph_readable(); 2850 bdrv_inc_in_flight(bs); 2851 2852 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) || 2853 bdrv_is_sg(bs)) { 2854 goto early_exit; 2855 } 2856 2857 qemu_co_mutex_lock(&bs->reqs_lock); 2858 current_gen = qatomic_read(&bs->write_gen); 2859 2860 /* Wait until any previous flushes are completed */ 2861 while (bs->active_flush_req) { 2862 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2863 } 2864 2865 /* Flushes reach this point in nondecreasing current_gen order. */ 2866 bs->active_flush_req = true; 2867 qemu_co_mutex_unlock(&bs->reqs_lock); 2868 2869 /* Write back all layers by calling one driver function */ 2870 if (bs->drv->bdrv_co_flush) { 2871 ret = bs->drv->bdrv_co_flush(bs); 2872 goto out; 2873 } 2874 2875 /* Write back cached data to the OS even with cache=unsafe */ 2876 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2877 if (bs->drv->bdrv_co_flush_to_os) { 2878 ret = bs->drv->bdrv_co_flush_to_os(bs); 2879 if (ret < 0) { 2880 goto out; 2881 } 2882 } 2883 2884 /* But don't actually force it to the disk with cache=unsafe */ 2885 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2886 goto flush_children; 2887 } 2888 2889 /* Check if we really need to flush anything */ 2890 if (bs->flushed_gen == current_gen) { 2891 goto flush_children; 2892 } 2893 2894 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 2895 if (!bs->drv) { 2896 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2897 * (even in case of apparent success) */ 2898 ret = -ENOMEDIUM; 2899 goto out; 2900 } 2901 if (bs->drv->bdrv_co_flush_to_disk) { 2902 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2903 } else if (bs->drv->bdrv_aio_flush) { 2904 BlockAIOCB *acb; 2905 CoroutineIOCompletion co = { 2906 .coroutine = qemu_coroutine_self(), 2907 }; 2908 2909 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2910 if (acb == NULL) { 2911 ret = -EIO; 2912 } else { 2913 qemu_coroutine_yield(); 2914 ret = co.ret; 2915 } 2916 } else { 2917 /* 2918 * Some block drivers always operate in either writethrough or unsafe 2919 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2920 * know how the server works (because the behaviour is hardcoded or 2921 * depends on server-side configuration), so we can't ensure that 2922 * everything is safe on disk. Returning an error doesn't work because 2923 * that would break guests even if the server operates in writethrough 2924 * mode. 2925 * 2926 * Let's hope the user knows what he's doing. 2927 */ 2928 ret = 0; 2929 } 2930 2931 if (ret < 0) { 2932 goto out; 2933 } 2934 2935 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2936 * in the case of cache=unsafe, so there are no useless flushes. 2937 */ 2938 flush_children: 2939 ret = 0; 2940 QLIST_FOREACH(child, &bs->children, next) { 2941 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 2942 int this_child_ret = bdrv_co_flush(child->bs); 2943 if (!ret) { 2944 ret = this_child_ret; 2945 } 2946 } 2947 } 2948 2949 out: 2950 /* Notify any pending flushes that we have completed */ 2951 if (ret == 0) { 2952 bs->flushed_gen = current_gen; 2953 } 2954 2955 qemu_co_mutex_lock(&bs->reqs_lock); 2956 bs->active_flush_req = false; 2957 /* Return value is ignored - it's ok if wait queue is empty */ 2958 qemu_co_queue_next(&bs->flush_queue); 2959 qemu_co_mutex_unlock(&bs->reqs_lock); 2960 2961 early_exit: 2962 bdrv_dec_in_flight(bs); 2963 return ret; 2964 } 2965 2966 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2967 int64_t bytes) 2968 { 2969 BdrvTrackedRequest req; 2970 int ret; 2971 int64_t max_pdiscard; 2972 int head, tail, align; 2973 BlockDriverState *bs = child->bs; 2974 IO_CODE(); 2975 assert_bdrv_graph_readable(); 2976 2977 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { 2978 return -ENOMEDIUM; 2979 } 2980 2981 if (bdrv_has_readonly_bitmaps(bs)) { 2982 return -EPERM; 2983 } 2984 2985 ret = bdrv_check_request(offset, bytes, NULL); 2986 if (ret < 0) { 2987 return ret; 2988 } 2989 2990 /* Do nothing if disabled. */ 2991 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2992 return 0; 2993 } 2994 2995 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2996 return 0; 2997 } 2998 2999 /* Invalidate the cached block-status data range if this discard overlaps */ 3000 bdrv_bsc_invalidate_range(bs, offset, bytes); 3001 3002 /* Discard is advisory, but some devices track and coalesce 3003 * unaligned requests, so we must pass everything down rather than 3004 * round here. Still, most devices will just silently ignore 3005 * unaligned requests (by returning -ENOTSUP), so we must fragment 3006 * the request accordingly. */ 3007 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 3008 assert(align % bs->bl.request_alignment == 0); 3009 head = offset % align; 3010 tail = (offset + bytes) % align; 3011 3012 bdrv_inc_in_flight(bs); 3013 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 3014 3015 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 3016 if (ret < 0) { 3017 goto out; 3018 } 3019 3020 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), 3021 align); 3022 assert(max_pdiscard >= bs->bl.request_alignment); 3023 3024 while (bytes > 0) { 3025 int64_t num = bytes; 3026 3027 if (head) { 3028 /* Make small requests to get to alignment boundaries. */ 3029 num = MIN(bytes, align - head); 3030 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 3031 num %= bs->bl.request_alignment; 3032 } 3033 head = (head + num) % align; 3034 assert(num < max_pdiscard); 3035 } else if (tail) { 3036 if (num > align) { 3037 /* Shorten the request to the last aligned cluster. */ 3038 num -= tail; 3039 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 3040 tail > bs->bl.request_alignment) { 3041 tail %= bs->bl.request_alignment; 3042 num -= tail; 3043 } 3044 } 3045 /* limit request size */ 3046 if (num > max_pdiscard) { 3047 num = max_pdiscard; 3048 } 3049 3050 if (!bs->drv) { 3051 ret = -ENOMEDIUM; 3052 goto out; 3053 } 3054 if (bs->drv->bdrv_co_pdiscard) { 3055 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 3056 } else { 3057 BlockAIOCB *acb; 3058 CoroutineIOCompletion co = { 3059 .coroutine = qemu_coroutine_self(), 3060 }; 3061 3062 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 3063 bdrv_co_io_em_complete, &co); 3064 if (acb == NULL) { 3065 ret = -EIO; 3066 goto out; 3067 } else { 3068 qemu_coroutine_yield(); 3069 ret = co.ret; 3070 } 3071 } 3072 if (ret && ret != -ENOTSUP) { 3073 goto out; 3074 } 3075 3076 offset += num; 3077 bytes -= num; 3078 } 3079 ret = 0; 3080 out: 3081 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 3082 tracked_request_end(&req); 3083 bdrv_dec_in_flight(bs); 3084 return ret; 3085 } 3086 3087 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3088 { 3089 BlockDriver *drv = bs->drv; 3090 CoroutineIOCompletion co = { 3091 .coroutine = qemu_coroutine_self(), 3092 }; 3093 BlockAIOCB *acb; 3094 IO_CODE(); 3095 assert_bdrv_graph_readable(); 3096 3097 bdrv_inc_in_flight(bs); 3098 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3099 co.ret = -ENOTSUP; 3100 goto out; 3101 } 3102 3103 if (drv->bdrv_co_ioctl) { 3104 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3105 } else { 3106 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3107 if (!acb) { 3108 co.ret = -ENOTSUP; 3109 goto out; 3110 } 3111 qemu_coroutine_yield(); 3112 } 3113 out: 3114 bdrv_dec_in_flight(bs); 3115 return co.ret; 3116 } 3117 3118 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3119 { 3120 IO_CODE(); 3121 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3122 } 3123 3124 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3125 { 3126 IO_CODE(); 3127 return memset(qemu_blockalign(bs, size), 0, size); 3128 } 3129 3130 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3131 { 3132 size_t align = bdrv_opt_mem_align(bs); 3133 IO_CODE(); 3134 3135 /* Ensure that NULL is never returned on success */ 3136 assert(align > 0); 3137 if (size == 0) { 3138 size = align; 3139 } 3140 3141 return qemu_try_memalign(align, size); 3142 } 3143 3144 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3145 { 3146 void *mem = qemu_try_blockalign(bs, size); 3147 IO_CODE(); 3148 3149 if (mem) { 3150 memset(mem, 0, size); 3151 } 3152 3153 return mem; 3154 } 3155 3156 void coroutine_fn bdrv_co_io_plug(BlockDriverState *bs) 3157 { 3158 BdrvChild *child; 3159 IO_CODE(); 3160 assert_bdrv_graph_readable(); 3161 3162 QLIST_FOREACH(child, &bs->children, next) { 3163 bdrv_co_io_plug(child->bs); 3164 } 3165 3166 if (qatomic_fetch_inc(&bs->io_plugged) == 0) { 3167 BlockDriver *drv = bs->drv; 3168 if (drv && drv->bdrv_co_io_plug) { 3169 drv->bdrv_co_io_plug(bs); 3170 } 3171 } 3172 } 3173 3174 void coroutine_fn bdrv_co_io_unplug(BlockDriverState *bs) 3175 { 3176 BdrvChild *child; 3177 IO_CODE(); 3178 assert_bdrv_graph_readable(); 3179 3180 assert(bs->io_plugged); 3181 if (qatomic_fetch_dec(&bs->io_plugged) == 1) { 3182 BlockDriver *drv = bs->drv; 3183 if (drv && drv->bdrv_co_io_unplug) { 3184 drv->bdrv_co_io_unplug(bs); 3185 } 3186 } 3187 3188 QLIST_FOREACH(child, &bs->children, next) { 3189 bdrv_co_io_unplug(child->bs); 3190 } 3191 } 3192 3193 /* Helper that undoes bdrv_register_buf() when it fails partway through */ 3194 static void GRAPH_RDLOCK 3195 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size, 3196 BdrvChild *final_child) 3197 { 3198 BdrvChild *child; 3199 3200 GLOBAL_STATE_CODE(); 3201 assert_bdrv_graph_readable(); 3202 3203 QLIST_FOREACH(child, &bs->children, next) { 3204 if (child == final_child) { 3205 break; 3206 } 3207 3208 bdrv_unregister_buf(child->bs, host, size); 3209 } 3210 3211 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3212 bs->drv->bdrv_unregister_buf(bs, host, size); 3213 } 3214 } 3215 3216 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, 3217 Error **errp) 3218 { 3219 BdrvChild *child; 3220 3221 GLOBAL_STATE_CODE(); 3222 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3223 3224 if (bs->drv && bs->drv->bdrv_register_buf) { 3225 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { 3226 return false; 3227 } 3228 } 3229 QLIST_FOREACH(child, &bs->children, next) { 3230 if (!bdrv_register_buf(child->bs, host, size, errp)) { 3231 bdrv_register_buf_rollback(bs, host, size, child); 3232 return false; 3233 } 3234 } 3235 return true; 3236 } 3237 3238 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size) 3239 { 3240 BdrvChild *child; 3241 3242 GLOBAL_STATE_CODE(); 3243 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3244 3245 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3246 bs->drv->bdrv_unregister_buf(bs, host, size); 3247 } 3248 QLIST_FOREACH(child, &bs->children, next) { 3249 bdrv_unregister_buf(child->bs, host, size); 3250 } 3251 } 3252 3253 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal( 3254 BdrvChild *src, int64_t src_offset, BdrvChild *dst, 3255 int64_t dst_offset, int64_t bytes, 3256 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3257 bool recurse_src) 3258 { 3259 BdrvTrackedRequest req; 3260 int ret; 3261 assert_bdrv_graph_readable(); 3262 3263 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3264 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3265 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3266 assert(!(read_flags & BDRV_REQ_NO_WAIT)); 3267 assert(!(write_flags & BDRV_REQ_NO_WAIT)); 3268 3269 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { 3270 return -ENOMEDIUM; 3271 } 3272 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0); 3273 if (ret) { 3274 return ret; 3275 } 3276 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3277 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3278 } 3279 3280 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { 3281 return -ENOMEDIUM; 3282 } 3283 ret = bdrv_check_request32(src_offset, bytes, NULL, 0); 3284 if (ret) { 3285 return ret; 3286 } 3287 3288 if (!src->bs->drv->bdrv_co_copy_range_from 3289 || !dst->bs->drv->bdrv_co_copy_range_to 3290 || src->bs->encrypted || dst->bs->encrypted) { 3291 return -ENOTSUP; 3292 } 3293 3294 if (recurse_src) { 3295 bdrv_inc_in_flight(src->bs); 3296 tracked_request_begin(&req, src->bs, src_offset, bytes, 3297 BDRV_TRACKED_READ); 3298 3299 /* BDRV_REQ_SERIALISING is only for write operation */ 3300 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3301 bdrv_wait_serialising_requests(&req); 3302 3303 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3304 src, src_offset, 3305 dst, dst_offset, 3306 bytes, 3307 read_flags, write_flags); 3308 3309 tracked_request_end(&req); 3310 bdrv_dec_in_flight(src->bs); 3311 } else { 3312 bdrv_inc_in_flight(dst->bs); 3313 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3314 BDRV_TRACKED_WRITE); 3315 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3316 write_flags); 3317 if (!ret) { 3318 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3319 src, src_offset, 3320 dst, dst_offset, 3321 bytes, 3322 read_flags, write_flags); 3323 } 3324 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3325 tracked_request_end(&req); 3326 bdrv_dec_in_flight(dst->bs); 3327 } 3328 3329 return ret; 3330 } 3331 3332 /* Copy range from @src to @dst. 3333 * 3334 * See the comment of bdrv_co_copy_range for the parameter and return value 3335 * semantics. */ 3336 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, 3337 BdrvChild *dst, int64_t dst_offset, 3338 int64_t bytes, 3339 BdrvRequestFlags read_flags, 3340 BdrvRequestFlags write_flags) 3341 { 3342 IO_CODE(); 3343 assert_bdrv_graph_readable(); 3344 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3345 read_flags, write_flags); 3346 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3347 bytes, read_flags, write_flags, true); 3348 } 3349 3350 /* Copy range from @src to @dst. 3351 * 3352 * See the comment of bdrv_co_copy_range for the parameter and return value 3353 * semantics. */ 3354 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, 3355 BdrvChild *dst, int64_t dst_offset, 3356 int64_t bytes, 3357 BdrvRequestFlags read_flags, 3358 BdrvRequestFlags write_flags) 3359 { 3360 IO_CODE(); 3361 assert_bdrv_graph_readable(); 3362 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3363 read_flags, write_flags); 3364 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3365 bytes, read_flags, write_flags, false); 3366 } 3367 3368 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, 3369 BdrvChild *dst, int64_t dst_offset, 3370 int64_t bytes, BdrvRequestFlags read_flags, 3371 BdrvRequestFlags write_flags) 3372 { 3373 IO_CODE(); 3374 assert_bdrv_graph_readable(); 3375 3376 return bdrv_co_copy_range_from(src, src_offset, 3377 dst, dst_offset, 3378 bytes, read_flags, write_flags); 3379 } 3380 3381 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3382 { 3383 BdrvChild *c; 3384 QLIST_FOREACH(c, &bs->parents, next_parent) { 3385 if (c->klass->resize) { 3386 c->klass->resize(c); 3387 } 3388 } 3389 } 3390 3391 /** 3392 * Truncate file to 'offset' bytes (needed only for file protocols) 3393 * 3394 * If 'exact' is true, the file must be resized to exactly the given 3395 * 'offset'. Otherwise, it is sufficient for the node to be at least 3396 * 'offset' bytes in length. 3397 */ 3398 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3399 PreallocMode prealloc, BdrvRequestFlags flags, 3400 Error **errp) 3401 { 3402 BlockDriverState *bs = child->bs; 3403 BdrvChild *filtered, *backing; 3404 BlockDriver *drv = bs->drv; 3405 BdrvTrackedRequest req; 3406 int64_t old_size, new_bytes; 3407 int ret; 3408 IO_CODE(); 3409 assert_bdrv_graph_readable(); 3410 3411 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3412 if (!drv) { 3413 error_setg(errp, "No medium inserted"); 3414 return -ENOMEDIUM; 3415 } 3416 if (offset < 0) { 3417 error_setg(errp, "Image size cannot be negative"); 3418 return -EINVAL; 3419 } 3420 3421 ret = bdrv_check_request(offset, 0, errp); 3422 if (ret < 0) { 3423 return ret; 3424 } 3425 3426 old_size = bdrv_getlength(bs); 3427 if (old_size < 0) { 3428 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3429 return old_size; 3430 } 3431 3432 if (bdrv_is_read_only(bs)) { 3433 error_setg(errp, "Image is read-only"); 3434 return -EACCES; 3435 } 3436 3437 if (offset > old_size) { 3438 new_bytes = offset - old_size; 3439 } else { 3440 new_bytes = 0; 3441 } 3442 3443 bdrv_inc_in_flight(bs); 3444 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3445 BDRV_TRACKED_TRUNCATE); 3446 3447 /* If we are growing the image and potentially using preallocation for the 3448 * new area, we need to make sure that no write requests are made to it 3449 * concurrently or they might be overwritten by preallocation. */ 3450 if (new_bytes) { 3451 bdrv_make_request_serialising(&req, 1); 3452 } 3453 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3454 0); 3455 if (ret < 0) { 3456 error_setg_errno(errp, -ret, 3457 "Failed to prepare request for truncation"); 3458 goto out; 3459 } 3460 3461 filtered = bdrv_filter_child(bs); 3462 backing = bdrv_cow_child(bs); 3463 3464 /* 3465 * If the image has a backing file that is large enough that it would 3466 * provide data for the new area, we cannot leave it unallocated because 3467 * then the backing file content would become visible. Instead, zero-fill 3468 * the new area. 3469 * 3470 * Note that if the image has a backing file, but was opened without the 3471 * backing file, taking care of keeping things consistent with that backing 3472 * file is the user's responsibility. 3473 */ 3474 if (new_bytes && backing) { 3475 int64_t backing_len; 3476 3477 backing_len = bdrv_co_getlength(backing->bs); 3478 if (backing_len < 0) { 3479 ret = backing_len; 3480 error_setg_errno(errp, -ret, "Could not get backing file size"); 3481 goto out; 3482 } 3483 3484 if (backing_len > old_size) { 3485 flags |= BDRV_REQ_ZERO_WRITE; 3486 } 3487 } 3488 3489 if (drv->bdrv_co_truncate) { 3490 if (flags & ~bs->supported_truncate_flags) { 3491 error_setg(errp, "Block driver does not support requested flags"); 3492 ret = -ENOTSUP; 3493 goto out; 3494 } 3495 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3496 } else if (filtered) { 3497 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3498 } else { 3499 error_setg(errp, "Image format driver does not support resize"); 3500 ret = -ENOTSUP; 3501 goto out; 3502 } 3503 if (ret < 0) { 3504 goto out; 3505 } 3506 3507 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3508 if (ret < 0) { 3509 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3510 } else { 3511 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3512 } 3513 /* 3514 * It's possible that truncation succeeded but bdrv_refresh_total_sectors 3515 * failed, but the latter doesn't affect how we should finish the request. 3516 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. 3517 */ 3518 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3519 3520 out: 3521 tracked_request_end(&req); 3522 bdrv_dec_in_flight(bs); 3523 3524 return ret; 3525 } 3526 3527 void bdrv_cancel_in_flight(BlockDriverState *bs) 3528 { 3529 GLOBAL_STATE_CODE(); 3530 if (!bs || !bs->drv) { 3531 return; 3532 } 3533 3534 if (bs->drv->bdrv_cancel_in_flight) { 3535 bs->drv->bdrv_cancel_in_flight(bs); 3536 } 3537 } 3538 3539 int coroutine_fn 3540 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, 3541 QEMUIOVector *qiov, size_t qiov_offset) 3542 { 3543 BlockDriverState *bs = child->bs; 3544 BlockDriver *drv = bs->drv; 3545 int ret; 3546 IO_CODE(); 3547 assert_bdrv_graph_readable(); 3548 3549 if (!drv) { 3550 return -ENOMEDIUM; 3551 } 3552 3553 if (!drv->bdrv_co_preadv_snapshot) { 3554 return -ENOTSUP; 3555 } 3556 3557 bdrv_inc_in_flight(bs); 3558 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); 3559 bdrv_dec_in_flight(bs); 3560 3561 return ret; 3562 } 3563 3564 int coroutine_fn 3565 bdrv_co_snapshot_block_status(BlockDriverState *bs, 3566 bool want_zero, int64_t offset, int64_t bytes, 3567 int64_t *pnum, int64_t *map, 3568 BlockDriverState **file) 3569 { 3570 BlockDriver *drv = bs->drv; 3571 int ret; 3572 IO_CODE(); 3573 assert_bdrv_graph_readable(); 3574 3575 if (!drv) { 3576 return -ENOMEDIUM; 3577 } 3578 3579 if (!drv->bdrv_co_snapshot_block_status) { 3580 return -ENOTSUP; 3581 } 3582 3583 bdrv_inc_in_flight(bs); 3584 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, 3585 pnum, map, file); 3586 bdrv_dec_in_flight(bs); 3587 3588 return ret; 3589 } 3590 3591 int coroutine_fn 3592 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) 3593 { 3594 BlockDriver *drv = bs->drv; 3595 int ret; 3596 IO_CODE(); 3597 assert_bdrv_graph_readable(); 3598 3599 if (!drv) { 3600 return -ENOMEDIUM; 3601 } 3602 3603 if (!drv->bdrv_co_pdiscard_snapshot) { 3604 return -ENOTSUP; 3605 } 3606 3607 bdrv_inc_in_flight(bs); 3608 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); 3609 bdrv_dec_in_flight(bs); 3610 3611 return ret; 3612 } 3613