1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "block/dirty-bitmap.h" 34 #include "block/write-threshold.h" 35 #include "qemu/cutils.h" 36 #include "qemu/memalign.h" 37 #include "qapi/error.h" 38 #include "qemu/error-report.h" 39 #include "qemu/main-loop.h" 40 #include "sysemu/replay.h" 41 42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 44 45 static void coroutine_fn GRAPH_RDLOCK 46 bdrv_parent_cb_resize(BlockDriverState *bs); 47 48 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 49 int64_t offset, int64_t bytes, BdrvRequestFlags flags); 50 51 static void GRAPH_RDLOCK 52 bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) 53 { 54 BdrvChild *c, *next; 55 IO_OR_GS_CODE(); 56 assert_bdrv_graph_readable(); 57 58 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 59 if (c == ignore) { 60 continue; 61 } 62 bdrv_parent_drained_begin_single(c); 63 } 64 } 65 66 void bdrv_parent_drained_end_single(BdrvChild *c) 67 { 68 GLOBAL_STATE_CODE(); 69 70 assert(c->quiesced_parent); 71 c->quiesced_parent = false; 72 73 if (c->klass->drained_end) { 74 c->klass->drained_end(c); 75 } 76 } 77 78 static void GRAPH_RDLOCK 79 bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) 80 { 81 BdrvChild *c; 82 IO_OR_GS_CODE(); 83 assert_bdrv_graph_readable(); 84 85 QLIST_FOREACH(c, &bs->parents, next_parent) { 86 if (c == ignore) { 87 continue; 88 } 89 bdrv_parent_drained_end_single(c); 90 } 91 } 92 93 bool bdrv_parent_drained_poll_single(BdrvChild *c) 94 { 95 IO_OR_GS_CODE(); 96 97 if (c->klass->drained_poll) { 98 return c->klass->drained_poll(c); 99 } 100 return false; 101 } 102 103 static bool GRAPH_RDLOCK 104 bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 105 bool ignore_bds_parents) 106 { 107 BdrvChild *c, *next; 108 bool busy = false; 109 IO_OR_GS_CODE(); 110 assert_bdrv_graph_readable(); 111 112 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 113 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 114 continue; 115 } 116 busy |= bdrv_parent_drained_poll_single(c); 117 } 118 119 return busy; 120 } 121 122 void bdrv_parent_drained_begin_single(BdrvChild *c) 123 { 124 GLOBAL_STATE_CODE(); 125 126 assert(!c->quiesced_parent); 127 c->quiesced_parent = true; 128 129 if (c->klass->drained_begin) { 130 /* called with rdlock taken, but it doesn't really need it. */ 131 c->klass->drained_begin(c); 132 } 133 } 134 135 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 136 { 137 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment, 138 src->pdiscard_alignment); 139 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 140 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 141 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer, 142 src->max_hw_transfer); 143 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 144 src->opt_mem_alignment); 145 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 146 src->min_mem_alignment); 147 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 148 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); 149 } 150 151 typedef struct BdrvRefreshLimitsState { 152 BlockDriverState *bs; 153 BlockLimits old_bl; 154 } BdrvRefreshLimitsState; 155 156 static void bdrv_refresh_limits_abort(void *opaque) 157 { 158 BdrvRefreshLimitsState *s = opaque; 159 160 s->bs->bl = s->old_bl; 161 } 162 163 static TransactionActionDrv bdrv_refresh_limits_drv = { 164 .abort = bdrv_refresh_limits_abort, 165 .clean = g_free, 166 }; 167 168 /* @tran is allowed to be NULL, in this case no rollback is possible. */ 169 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) 170 { 171 ERRP_GUARD(); 172 BlockDriver *drv = bs->drv; 173 BdrvChild *c; 174 bool have_limits; 175 176 GLOBAL_STATE_CODE(); 177 178 if (tran) { 179 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); 180 *s = (BdrvRefreshLimitsState) { 181 .bs = bs, 182 .old_bl = bs->bl, 183 }; 184 tran_add(tran, &bdrv_refresh_limits_drv, s); 185 } 186 187 memset(&bs->bl, 0, sizeof(bs->bl)); 188 189 if (!drv) { 190 return; 191 } 192 193 /* Default alignment based on whether driver has byte interface */ 194 bs->bl.request_alignment = (drv->bdrv_co_preadv || 195 drv->bdrv_aio_preadv || 196 drv->bdrv_co_preadv_part) ? 1 : 512; 197 198 /* Take some limits from the children as a default */ 199 have_limits = false; 200 QLIST_FOREACH(c, &bs->children, next) { 201 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 202 { 203 bdrv_merge_limits(&bs->bl, &c->bs->bl); 204 have_limits = true; 205 } 206 207 if (c->role & BDRV_CHILD_FILTERED) { 208 bs->bl.has_variable_length |= c->bs->bl.has_variable_length; 209 } 210 } 211 212 if (!have_limits) { 213 bs->bl.min_mem_alignment = 512; 214 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); 215 216 /* Safe default since most protocols use readv()/writev()/etc */ 217 bs->bl.max_iov = IOV_MAX; 218 } 219 220 /* Then let the driver override it */ 221 if (drv->bdrv_refresh_limits) { 222 drv->bdrv_refresh_limits(bs, errp); 223 if (*errp) { 224 return; 225 } 226 } 227 228 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { 229 error_setg(errp, "Driver requires too large request alignment"); 230 } 231 } 232 233 /** 234 * The copy-on-read flag is actually a reference count so multiple users may 235 * use the feature without worrying about clobbering its previous state. 236 * Copy-on-read stays enabled until all users have called to disable it. 237 */ 238 void bdrv_enable_copy_on_read(BlockDriverState *bs) 239 { 240 IO_CODE(); 241 qatomic_inc(&bs->copy_on_read); 242 } 243 244 void bdrv_disable_copy_on_read(BlockDriverState *bs) 245 { 246 int old = qatomic_fetch_dec(&bs->copy_on_read); 247 IO_CODE(); 248 assert(old >= 1); 249 } 250 251 typedef struct { 252 Coroutine *co; 253 BlockDriverState *bs; 254 bool done; 255 bool begin; 256 bool poll; 257 BdrvChild *parent; 258 } BdrvCoDrainData; 259 260 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 261 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, 262 bool ignore_bds_parents) 263 { 264 GLOBAL_STATE_CODE(); 265 266 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 267 return true; 268 } 269 270 if (qatomic_read(&bs->in_flight)) { 271 return true; 272 } 273 274 return false; 275 } 276 277 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, 278 BdrvChild *ignore_parent) 279 { 280 GLOBAL_STATE_CODE(); 281 GRAPH_RDLOCK_GUARD_MAINLOOP(); 282 283 return bdrv_drain_poll(bs, ignore_parent, false); 284 } 285 286 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 287 bool poll); 288 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent); 289 290 static void bdrv_co_drain_bh_cb(void *opaque) 291 { 292 BdrvCoDrainData *data = opaque; 293 Coroutine *co = data->co; 294 BlockDriverState *bs = data->bs; 295 296 if (bs) { 297 bdrv_dec_in_flight(bs); 298 if (data->begin) { 299 bdrv_do_drained_begin(bs, data->parent, data->poll); 300 } else { 301 assert(!data->poll); 302 bdrv_do_drained_end(bs, data->parent); 303 } 304 } else { 305 assert(data->begin); 306 bdrv_drain_all_begin(); 307 } 308 309 data->done = true; 310 aio_co_wake(co); 311 } 312 313 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 314 bool begin, 315 BdrvChild *parent, 316 bool poll) 317 { 318 BdrvCoDrainData data; 319 Coroutine *self = qemu_coroutine_self(); 320 321 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 322 * other coroutines run if they were queued by aio_co_enter(). */ 323 324 assert(qemu_in_coroutine()); 325 data = (BdrvCoDrainData) { 326 .co = self, 327 .bs = bs, 328 .done = false, 329 .begin = begin, 330 .parent = parent, 331 .poll = poll, 332 }; 333 334 if (bs) { 335 bdrv_inc_in_flight(bs); 336 } 337 338 replay_bh_schedule_oneshot_event(qemu_get_aio_context(), 339 bdrv_co_drain_bh_cb, &data); 340 341 qemu_coroutine_yield(); 342 /* If we are resumed from some other event (such as an aio completion or a 343 * timer callback), it is a bug in the caller that should be fixed. */ 344 assert(data.done); 345 } 346 347 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, 348 bool poll) 349 { 350 IO_OR_GS_CODE(); 351 352 if (qemu_in_coroutine()) { 353 bdrv_co_yield_to_drain(bs, true, parent, poll); 354 return; 355 } 356 357 GLOBAL_STATE_CODE(); 358 359 /* Stop things in parent-to-child order */ 360 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 361 GRAPH_RDLOCK_GUARD_MAINLOOP(); 362 bdrv_parent_drained_begin(bs, parent); 363 if (bs->drv && bs->drv->bdrv_drain_begin) { 364 bs->drv->bdrv_drain_begin(bs); 365 } 366 } 367 368 /* 369 * Wait for drained requests to finish. 370 * 371 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 372 * call is needed so things in this AioContext can make progress even 373 * though we don't return to the main AioContext loop - this automatically 374 * includes other nodes in the same AioContext and therefore all child 375 * nodes. 376 */ 377 if (poll) { 378 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); 379 } 380 } 381 382 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent) 383 { 384 bdrv_do_drained_begin(bs, parent, false); 385 } 386 387 void coroutine_mixed_fn 388 bdrv_drained_begin(BlockDriverState *bs) 389 { 390 IO_OR_GS_CODE(); 391 bdrv_do_drained_begin(bs, NULL, true); 392 } 393 394 /** 395 * This function does not poll, nor must any of its recursively called 396 * functions. 397 */ 398 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) 399 { 400 int old_quiesce_counter; 401 402 IO_OR_GS_CODE(); 403 404 if (qemu_in_coroutine()) { 405 bdrv_co_yield_to_drain(bs, false, parent, false); 406 return; 407 } 408 409 /* At this point, we should be always running in the main loop. */ 410 GLOBAL_STATE_CODE(); 411 assert(bs->quiesce_counter > 0); 412 GLOBAL_STATE_CODE(); 413 414 /* Re-enable things in child-to-parent order */ 415 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 416 if (old_quiesce_counter == 1) { 417 GRAPH_RDLOCK_GUARD_MAINLOOP(); 418 if (bs->drv && bs->drv->bdrv_drain_end) { 419 bs->drv->bdrv_drain_end(bs); 420 } 421 bdrv_parent_drained_end(bs, parent); 422 } 423 } 424 425 void bdrv_drained_end(BlockDriverState *bs) 426 { 427 IO_OR_GS_CODE(); 428 bdrv_do_drained_end(bs, NULL); 429 } 430 431 void bdrv_drain(BlockDriverState *bs) 432 { 433 IO_OR_GS_CODE(); 434 bdrv_drained_begin(bs); 435 bdrv_drained_end(bs); 436 } 437 438 static void bdrv_drain_assert_idle(BlockDriverState *bs) 439 { 440 BdrvChild *child, *next; 441 GLOBAL_STATE_CODE(); 442 GRAPH_RDLOCK_GUARD_MAINLOOP(); 443 444 assert(qatomic_read(&bs->in_flight) == 0); 445 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 446 bdrv_drain_assert_idle(child->bs); 447 } 448 } 449 450 unsigned int bdrv_drain_all_count = 0; 451 452 static bool bdrv_drain_all_poll(void) 453 { 454 BlockDriverState *bs = NULL; 455 bool result = false; 456 457 GLOBAL_STATE_CODE(); 458 GRAPH_RDLOCK_GUARD_MAINLOOP(); 459 460 /* 461 * bdrv_drain_poll() can't make changes to the graph and we hold the BQL, 462 * so iterating bdrv_next_all_states() is safe. 463 */ 464 while ((bs = bdrv_next_all_states(bs))) { 465 result |= bdrv_drain_poll(bs, NULL, true); 466 } 467 468 return result; 469 } 470 471 /* 472 * Wait for pending requests to complete across all BlockDriverStates 473 * 474 * This function does not flush data to disk, use bdrv_flush_all() for that 475 * after calling this function. 476 * 477 * This pauses all block jobs and disables external clients. It must 478 * be paired with bdrv_drain_all_end(). 479 * 480 * NOTE: no new block jobs or BlockDriverStates can be created between 481 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 482 */ 483 void bdrv_drain_all_begin_nopoll(void) 484 { 485 BlockDriverState *bs = NULL; 486 GLOBAL_STATE_CODE(); 487 488 /* 489 * bdrv queue is managed by record/replay, 490 * waiting for finishing the I/O requests may 491 * be infinite 492 */ 493 if (replay_events_enabled()) { 494 return; 495 } 496 497 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 498 * loop AioContext, so make sure we're in the main context. */ 499 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 500 assert(bdrv_drain_all_count < INT_MAX); 501 bdrv_drain_all_count++; 502 503 /* Quiesce all nodes, without polling in-flight requests yet. The graph 504 * cannot change during this loop. */ 505 while ((bs = bdrv_next_all_states(bs))) { 506 bdrv_do_drained_begin(bs, NULL, false); 507 } 508 } 509 510 void coroutine_mixed_fn bdrv_drain_all_begin(void) 511 { 512 BlockDriverState *bs = NULL; 513 514 if (qemu_in_coroutine()) { 515 bdrv_co_yield_to_drain(NULL, true, NULL, true); 516 return; 517 } 518 519 /* 520 * bdrv queue is managed by record/replay, 521 * waiting for finishing the I/O requests may 522 * be infinite 523 */ 524 if (replay_events_enabled()) { 525 return; 526 } 527 528 bdrv_drain_all_begin_nopoll(); 529 530 /* Now poll the in-flight requests */ 531 AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll()); 532 533 while ((bs = bdrv_next_all_states(bs))) { 534 bdrv_drain_assert_idle(bs); 535 } 536 } 537 538 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 539 { 540 GLOBAL_STATE_CODE(); 541 542 g_assert(bs->quiesce_counter > 0); 543 g_assert(!bs->refcnt); 544 545 while (bs->quiesce_counter) { 546 bdrv_do_drained_end(bs, NULL); 547 } 548 } 549 550 void bdrv_drain_all_end(void) 551 { 552 BlockDriverState *bs = NULL; 553 GLOBAL_STATE_CODE(); 554 555 /* 556 * bdrv queue is managed by record/replay, 557 * waiting for finishing the I/O requests may 558 * be endless 559 */ 560 if (replay_events_enabled()) { 561 return; 562 } 563 564 while ((bs = bdrv_next_all_states(bs))) { 565 bdrv_do_drained_end(bs, NULL); 566 } 567 568 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 569 assert(bdrv_drain_all_count > 0); 570 bdrv_drain_all_count--; 571 } 572 573 void bdrv_drain_all(void) 574 { 575 GLOBAL_STATE_CODE(); 576 bdrv_drain_all_begin(); 577 bdrv_drain_all_end(); 578 } 579 580 /** 581 * Remove an active request from the tracked requests list 582 * 583 * This function should be called when a tracked request is completing. 584 */ 585 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) 586 { 587 if (req->serialising) { 588 qatomic_dec(&req->bs->serialising_in_flight); 589 } 590 591 qemu_mutex_lock(&req->bs->reqs_lock); 592 QLIST_REMOVE(req, list); 593 qemu_mutex_unlock(&req->bs->reqs_lock); 594 595 /* 596 * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called 597 * anymore because the request has been removed from the list, so it's safe 598 * to restart the queue outside reqs_lock to minimize the critical section. 599 */ 600 qemu_co_queue_restart_all(&req->wait_queue); 601 } 602 603 /** 604 * Add an active request to the tracked requests list 605 */ 606 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, 607 BlockDriverState *bs, 608 int64_t offset, 609 int64_t bytes, 610 enum BdrvTrackedRequestType type) 611 { 612 bdrv_check_request(offset, bytes, &error_abort); 613 614 *req = (BdrvTrackedRequest){ 615 .bs = bs, 616 .offset = offset, 617 .bytes = bytes, 618 .type = type, 619 .co = qemu_coroutine_self(), 620 .serialising = false, 621 .overlap_offset = offset, 622 .overlap_bytes = bytes, 623 }; 624 625 qemu_co_queue_init(&req->wait_queue); 626 627 qemu_mutex_lock(&bs->reqs_lock); 628 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 629 qemu_mutex_unlock(&bs->reqs_lock); 630 } 631 632 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 633 int64_t offset, int64_t bytes) 634 { 635 bdrv_check_request(offset, bytes, &error_abort); 636 637 /* aaaa bbbb */ 638 if (offset >= req->overlap_offset + req->overlap_bytes) { 639 return false; 640 } 641 /* bbbb aaaa */ 642 if (req->overlap_offset >= offset + bytes) { 643 return false; 644 } 645 return true; 646 } 647 648 /* Called with self->bs->reqs_lock held */ 649 static coroutine_fn BdrvTrackedRequest * 650 bdrv_find_conflicting_request(BdrvTrackedRequest *self) 651 { 652 BdrvTrackedRequest *req; 653 654 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { 655 if (req == self || (!req->serialising && !self->serialising)) { 656 continue; 657 } 658 if (tracked_request_overlaps(req, self->overlap_offset, 659 self->overlap_bytes)) 660 { 661 /* 662 * Hitting this means there was a reentrant request, for 663 * example, a block driver issuing nested requests. This must 664 * never happen since it means deadlock. 665 */ 666 assert(qemu_coroutine_self() != req->co); 667 668 /* 669 * If the request is already (indirectly) waiting for us, or 670 * will wait for us as soon as it wakes up, then just go on 671 * (instead of producing a deadlock in the former case). 672 */ 673 if (!req->waiting_for) { 674 return req; 675 } 676 } 677 } 678 679 return NULL; 680 } 681 682 /* Called with self->bs->reqs_lock held */ 683 static void coroutine_fn 684 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) 685 { 686 BdrvTrackedRequest *req; 687 688 while ((req = bdrv_find_conflicting_request(self))) { 689 self->waiting_for = req; 690 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); 691 self->waiting_for = NULL; 692 } 693 } 694 695 /* Called with req->bs->reqs_lock held */ 696 static void tracked_request_set_serialising(BdrvTrackedRequest *req, 697 uint64_t align) 698 { 699 int64_t overlap_offset = req->offset & ~(align - 1); 700 int64_t overlap_bytes = 701 ROUND_UP(req->offset + req->bytes, align) - overlap_offset; 702 703 bdrv_check_request(req->offset, req->bytes, &error_abort); 704 705 if (!req->serialising) { 706 qatomic_inc(&req->bs->serialising_in_flight); 707 req->serialising = true; 708 } 709 710 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 711 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 712 } 713 714 /** 715 * Return the tracked request on @bs for the current coroutine, or 716 * NULL if there is none. 717 */ 718 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 719 { 720 BdrvTrackedRequest *req; 721 Coroutine *self = qemu_coroutine_self(); 722 IO_CODE(); 723 724 QLIST_FOREACH(req, &bs->tracked_requests, list) { 725 if (req->co == self) { 726 return req; 727 } 728 } 729 730 return NULL; 731 } 732 733 /** 734 * Round a region to subcluster (if supported) or cluster boundaries 735 */ 736 void coroutine_fn GRAPH_RDLOCK 737 bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes, 738 int64_t *align_offset, int64_t *align_bytes) 739 { 740 BlockDriverInfo bdi; 741 IO_CODE(); 742 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) { 743 *align_offset = offset; 744 *align_bytes = bytes; 745 } else { 746 int64_t c = bdi.subcluster_size; 747 *align_offset = QEMU_ALIGN_DOWN(offset, c); 748 *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c); 749 } 750 } 751 752 static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs) 753 { 754 BlockDriverInfo bdi; 755 int ret; 756 757 ret = bdrv_co_get_info(bs, &bdi); 758 if (ret < 0 || bdi.cluster_size == 0) { 759 return bs->bl.request_alignment; 760 } else { 761 return bdi.cluster_size; 762 } 763 } 764 765 void bdrv_inc_in_flight(BlockDriverState *bs) 766 { 767 IO_CODE(); 768 qatomic_inc(&bs->in_flight); 769 } 770 771 void bdrv_wakeup(BlockDriverState *bs) 772 { 773 IO_CODE(); 774 aio_wait_kick(); 775 } 776 777 void bdrv_dec_in_flight(BlockDriverState *bs) 778 { 779 IO_CODE(); 780 qatomic_dec(&bs->in_flight); 781 bdrv_wakeup(bs); 782 } 783 784 static void coroutine_fn 785 bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 786 { 787 BlockDriverState *bs = self->bs; 788 789 if (!qatomic_read(&bs->serialising_in_flight)) { 790 return; 791 } 792 793 qemu_mutex_lock(&bs->reqs_lock); 794 bdrv_wait_serialising_requests_locked(self); 795 qemu_mutex_unlock(&bs->reqs_lock); 796 } 797 798 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, 799 uint64_t align) 800 { 801 IO_CODE(); 802 803 qemu_mutex_lock(&req->bs->reqs_lock); 804 805 tracked_request_set_serialising(req, align); 806 bdrv_wait_serialising_requests_locked(req); 807 808 qemu_mutex_unlock(&req->bs->reqs_lock); 809 } 810 811 int bdrv_check_qiov_request(int64_t offset, int64_t bytes, 812 QEMUIOVector *qiov, size_t qiov_offset, 813 Error **errp) 814 { 815 /* 816 * Check generic offset/bytes correctness 817 */ 818 819 if (offset < 0) { 820 error_setg(errp, "offset is negative: %" PRIi64, offset); 821 return -EIO; 822 } 823 824 if (bytes < 0) { 825 error_setg(errp, "bytes is negative: %" PRIi64, bytes); 826 return -EIO; 827 } 828 829 if (bytes > BDRV_MAX_LENGTH) { 830 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 831 bytes, BDRV_MAX_LENGTH); 832 return -EIO; 833 } 834 835 if (offset > BDRV_MAX_LENGTH) { 836 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")", 837 offset, BDRV_MAX_LENGTH); 838 return -EIO; 839 } 840 841 if (offset > BDRV_MAX_LENGTH - bytes) { 842 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") " 843 "exceeds maximum(%" PRIi64 ")", offset, bytes, 844 BDRV_MAX_LENGTH); 845 return -EIO; 846 } 847 848 if (!qiov) { 849 return 0; 850 } 851 852 /* 853 * Check qiov and qiov_offset 854 */ 855 856 if (qiov_offset > qiov->size) { 857 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)", 858 qiov_offset, qiov->size); 859 return -EIO; 860 } 861 862 if (bytes > qiov->size - qiov_offset) { 863 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io " 864 "vector size(%zu)", bytes, qiov_offset, qiov->size); 865 return -EIO; 866 } 867 868 return 0; 869 } 870 871 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp) 872 { 873 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp); 874 } 875 876 static int bdrv_check_request32(int64_t offset, int64_t bytes, 877 QEMUIOVector *qiov, size_t qiov_offset) 878 { 879 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 880 if (ret < 0) { 881 return ret; 882 } 883 884 if (bytes > BDRV_REQUEST_MAX_BYTES) { 885 return -EIO; 886 } 887 888 return 0; 889 } 890 891 /* 892 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 893 * The operation is sped up by checking the block status and only writing 894 * zeroes to the device if they currently do not return zeroes. Optional 895 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 896 * BDRV_REQ_FUA). 897 * 898 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 899 */ 900 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 901 { 902 int ret; 903 int64_t target_size, bytes, offset = 0; 904 BlockDriverState *bs = child->bs; 905 IO_CODE(); 906 907 target_size = bdrv_getlength(bs); 908 if (target_size < 0) { 909 return target_size; 910 } 911 912 for (;;) { 913 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 914 if (bytes <= 0) { 915 return 0; 916 } 917 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 918 if (ret < 0) { 919 return ret; 920 } 921 if (ret & BDRV_BLOCK_ZERO) { 922 offset += bytes; 923 continue; 924 } 925 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 926 if (ret < 0) { 927 return ret; 928 } 929 offset += bytes; 930 } 931 } 932 933 /* 934 * Writes to the file and ensures that no writes are reordered across this 935 * request (acts as a barrier) 936 * 937 * Returns 0 on success, -errno in error cases. 938 */ 939 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, 940 int64_t bytes, const void *buf, 941 BdrvRequestFlags flags) 942 { 943 int ret; 944 IO_CODE(); 945 assert_bdrv_graph_readable(); 946 947 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags); 948 if (ret < 0) { 949 return ret; 950 } 951 952 ret = bdrv_co_flush(child->bs); 953 if (ret < 0) { 954 return ret; 955 } 956 957 return 0; 958 } 959 960 typedef struct CoroutineIOCompletion { 961 Coroutine *coroutine; 962 int ret; 963 } CoroutineIOCompletion; 964 965 static void bdrv_co_io_em_complete(void *opaque, int ret) 966 { 967 CoroutineIOCompletion *co = opaque; 968 969 co->ret = ret; 970 aio_co_wake(co->coroutine); 971 } 972 973 static int coroutine_fn GRAPH_RDLOCK 974 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 975 QEMUIOVector *qiov, size_t qiov_offset, int flags) 976 { 977 BlockDriver *drv = bs->drv; 978 int64_t sector_num; 979 unsigned int nb_sectors; 980 QEMUIOVector local_qiov; 981 int ret; 982 assert_bdrv_graph_readable(); 983 984 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 985 assert(!(flags & ~bs->supported_read_flags)); 986 987 if (!drv) { 988 return -ENOMEDIUM; 989 } 990 991 if (drv->bdrv_co_preadv_part) { 992 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 993 flags); 994 } 995 996 if (qiov_offset > 0 || bytes != qiov->size) { 997 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 998 qiov = &local_qiov; 999 } 1000 1001 if (drv->bdrv_co_preadv) { 1002 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 1003 goto out; 1004 } 1005 1006 if (drv->bdrv_aio_preadv) { 1007 BlockAIOCB *acb; 1008 CoroutineIOCompletion co = { 1009 .coroutine = qemu_coroutine_self(), 1010 }; 1011 1012 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1013 bdrv_co_io_em_complete, &co); 1014 if (acb == NULL) { 1015 ret = -EIO; 1016 goto out; 1017 } else { 1018 qemu_coroutine_yield(); 1019 ret = co.ret; 1020 goto out; 1021 } 1022 } 1023 1024 sector_num = offset >> BDRV_SECTOR_BITS; 1025 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1026 1027 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1028 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1029 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1030 assert(drv->bdrv_co_readv); 1031 1032 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1033 1034 out: 1035 if (qiov == &local_qiov) { 1036 qemu_iovec_destroy(&local_qiov); 1037 } 1038 1039 return ret; 1040 } 1041 1042 static int coroutine_fn GRAPH_RDLOCK 1043 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, 1044 QEMUIOVector *qiov, size_t qiov_offset, 1045 BdrvRequestFlags flags) 1046 { 1047 BlockDriver *drv = bs->drv; 1048 bool emulate_fua = false; 1049 int64_t sector_num; 1050 unsigned int nb_sectors; 1051 QEMUIOVector local_qiov; 1052 int ret; 1053 assert_bdrv_graph_readable(); 1054 1055 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1056 1057 if (!drv) { 1058 return -ENOMEDIUM; 1059 } 1060 1061 if ((flags & BDRV_REQ_FUA) && 1062 (~bs->supported_write_flags & BDRV_REQ_FUA)) { 1063 flags &= ~BDRV_REQ_FUA; 1064 emulate_fua = true; 1065 } 1066 1067 flags &= bs->supported_write_flags; 1068 1069 if (drv->bdrv_co_pwritev_part) { 1070 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1071 flags); 1072 goto emulate_flags; 1073 } 1074 1075 if (qiov_offset > 0 || bytes != qiov->size) { 1076 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1077 qiov = &local_qiov; 1078 } 1079 1080 if (drv->bdrv_co_pwritev) { 1081 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); 1082 goto emulate_flags; 1083 } 1084 1085 if (drv->bdrv_aio_pwritev) { 1086 BlockAIOCB *acb; 1087 CoroutineIOCompletion co = { 1088 .coroutine = qemu_coroutine_self(), 1089 }; 1090 1091 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, 1092 bdrv_co_io_em_complete, &co); 1093 if (acb == NULL) { 1094 ret = -EIO; 1095 } else { 1096 qemu_coroutine_yield(); 1097 ret = co.ret; 1098 } 1099 goto emulate_flags; 1100 } 1101 1102 sector_num = offset >> BDRV_SECTOR_BITS; 1103 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1104 1105 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1106 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1107 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1108 1109 assert(drv->bdrv_co_writev); 1110 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); 1111 1112 emulate_flags: 1113 if (ret == 0 && emulate_fua) { 1114 ret = bdrv_co_flush(bs); 1115 } 1116 1117 if (qiov == &local_qiov) { 1118 qemu_iovec_destroy(&local_qiov); 1119 } 1120 1121 return ret; 1122 } 1123 1124 static int coroutine_fn GRAPH_RDLOCK 1125 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, 1126 int64_t bytes, QEMUIOVector *qiov, 1127 size_t qiov_offset) 1128 { 1129 BlockDriver *drv = bs->drv; 1130 QEMUIOVector local_qiov; 1131 int ret; 1132 assert_bdrv_graph_readable(); 1133 1134 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1135 1136 if (!drv) { 1137 return -ENOMEDIUM; 1138 } 1139 1140 if (!block_driver_can_compress(drv)) { 1141 return -ENOTSUP; 1142 } 1143 1144 if (drv->bdrv_co_pwritev_compressed_part) { 1145 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1146 qiov, qiov_offset); 1147 } 1148 1149 if (qiov_offset == 0) { 1150 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1151 } 1152 1153 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1154 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1155 qemu_iovec_destroy(&local_qiov); 1156 1157 return ret; 1158 } 1159 1160 static int coroutine_fn GRAPH_RDLOCK 1161 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, 1162 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1163 { 1164 BlockDriverState *bs = child->bs; 1165 1166 /* Perform I/O through a temporary buffer so that users who scribble over 1167 * their read buffer while the operation is in progress do not end up 1168 * modifying the image file. This is critical for zero-copy guest I/O 1169 * where anything might happen inside guest memory. 1170 */ 1171 void *bounce_buffer = NULL; 1172 1173 BlockDriver *drv = bs->drv; 1174 int64_t align_offset; 1175 int64_t align_bytes; 1176 int64_t skip_bytes; 1177 int ret; 1178 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1179 BDRV_REQUEST_MAX_BYTES); 1180 int64_t progress = 0; 1181 bool skip_write; 1182 1183 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1184 1185 if (!drv) { 1186 return -ENOMEDIUM; 1187 } 1188 1189 /* 1190 * Do not write anything when the BDS is inactive. That is not 1191 * allowed, and it would not help. 1192 */ 1193 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1194 1195 /* FIXME We cannot require callers to have write permissions when all they 1196 * are doing is a read request. If we did things right, write permissions 1197 * would be obtained anyway, but internally by the copy-on-read code. As 1198 * long as it is implemented here rather than in a separate filter driver, 1199 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1200 * it could request permissions. Therefore we have to bypass the permission 1201 * system for the moment. */ 1202 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1203 1204 /* Cover entire cluster so no additional backing file I/O is required when 1205 * allocating cluster in the image file. Note that this value may exceed 1206 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1207 * is one reason we loop rather than doing it all at once. 1208 */ 1209 bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes); 1210 skip_bytes = offset - align_offset; 1211 1212 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1213 align_offset, align_bytes); 1214 1215 while (align_bytes) { 1216 int64_t pnum; 1217 1218 if (skip_write) { 1219 ret = 1; /* "already allocated", so nothing will be copied */ 1220 pnum = MIN(align_bytes, max_transfer); 1221 } else { 1222 ret = bdrv_co_is_allocated(bs, align_offset, 1223 MIN(align_bytes, max_transfer), &pnum); 1224 if (ret < 0) { 1225 /* 1226 * Safe to treat errors in querying allocation as if 1227 * unallocated; we'll probably fail again soon on the 1228 * read, but at least that will set a decent errno. 1229 */ 1230 pnum = MIN(align_bytes, max_transfer); 1231 } 1232 1233 /* Stop at EOF if the image ends in the middle of the cluster */ 1234 if (ret == 0 && pnum == 0) { 1235 assert(progress >= bytes); 1236 break; 1237 } 1238 1239 assert(skip_bytes < pnum); 1240 } 1241 1242 if (ret <= 0) { 1243 QEMUIOVector local_qiov; 1244 1245 /* Must copy-on-read; use the bounce buffer */ 1246 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1247 if (!bounce_buffer) { 1248 int64_t max_we_need = MAX(pnum, align_bytes - pnum); 1249 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1250 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1251 1252 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1253 if (!bounce_buffer) { 1254 ret = -ENOMEM; 1255 goto err; 1256 } 1257 } 1258 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1259 1260 ret = bdrv_driver_preadv(bs, align_offset, pnum, 1261 &local_qiov, 0, 0); 1262 if (ret < 0) { 1263 goto err; 1264 } 1265 1266 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE); 1267 if (drv->bdrv_co_pwrite_zeroes && 1268 buffer_is_zero(bounce_buffer, pnum)) { 1269 /* FIXME: Should we (perhaps conditionally) be setting 1270 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1271 * that still correctly reads as zero? */ 1272 ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum, 1273 BDRV_REQ_WRITE_UNCHANGED); 1274 } else { 1275 /* This does not change the data on the disk, it is not 1276 * necessary to flush even in cache=writethrough mode. 1277 */ 1278 ret = bdrv_driver_pwritev(bs, align_offset, pnum, 1279 &local_qiov, 0, 1280 BDRV_REQ_WRITE_UNCHANGED); 1281 } 1282 1283 if (ret < 0) { 1284 /* It might be okay to ignore write errors for guest 1285 * requests. If this is a deliberate copy-on-read 1286 * then we don't want to ignore the error. Simply 1287 * report it in all cases. 1288 */ 1289 goto err; 1290 } 1291 1292 if (!(flags & BDRV_REQ_PREFETCH)) { 1293 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1294 bounce_buffer + skip_bytes, 1295 MIN(pnum - skip_bytes, bytes - progress)); 1296 } 1297 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1298 /* Read directly into the destination */ 1299 ret = bdrv_driver_preadv(bs, offset + progress, 1300 MIN(pnum - skip_bytes, bytes - progress), 1301 qiov, qiov_offset + progress, 0); 1302 if (ret < 0) { 1303 goto err; 1304 } 1305 } 1306 1307 align_offset += pnum; 1308 align_bytes -= pnum; 1309 progress += pnum - skip_bytes; 1310 skip_bytes = 0; 1311 } 1312 ret = 0; 1313 1314 err: 1315 qemu_vfree(bounce_buffer); 1316 return ret; 1317 } 1318 1319 /* 1320 * Forwards an already correctly aligned request to the BlockDriver. This 1321 * handles copy on read, zeroing after EOF, and fragmentation of large 1322 * reads; any other features must be implemented by the caller. 1323 */ 1324 static int coroutine_fn GRAPH_RDLOCK 1325 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req, 1326 int64_t offset, int64_t bytes, int64_t align, 1327 QEMUIOVector *qiov, size_t qiov_offset, int flags) 1328 { 1329 BlockDriverState *bs = child->bs; 1330 int64_t total_bytes, max_bytes; 1331 int ret = 0; 1332 int64_t bytes_remaining = bytes; 1333 int max_transfer; 1334 1335 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 1336 assert(is_power_of_2(align)); 1337 assert((offset & (align - 1)) == 0); 1338 assert((bytes & (align - 1)) == 0); 1339 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1340 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1341 align); 1342 1343 /* 1344 * TODO: We would need a per-BDS .supported_read_flags and 1345 * potential fallback support, if we ever implement any read flags 1346 * to pass through to drivers. For now, there aren't any 1347 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint. 1348 */ 1349 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH | 1350 BDRV_REQ_REGISTERED_BUF))); 1351 1352 /* Handle Copy on Read and associated serialisation */ 1353 if (flags & BDRV_REQ_COPY_ON_READ) { 1354 /* If we touch the same cluster it counts as an overlap. This 1355 * guarantees that allocating writes will be serialized and not race 1356 * with each other for the same cluster. For example, in copy-on-read 1357 * it ensures that the CoR read and write operations are atomic and 1358 * guest writes cannot interleave between them. */ 1359 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); 1360 } else { 1361 bdrv_wait_serialising_requests(req); 1362 } 1363 1364 if (flags & BDRV_REQ_COPY_ON_READ) { 1365 int64_t pnum; 1366 1367 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */ 1368 flags &= ~BDRV_REQ_COPY_ON_READ; 1369 1370 ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum); 1371 if (ret < 0) { 1372 goto out; 1373 } 1374 1375 if (!ret || pnum != bytes) { 1376 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1377 qiov, qiov_offset, flags); 1378 goto out; 1379 } else if (flags & BDRV_REQ_PREFETCH) { 1380 goto out; 1381 } 1382 } 1383 1384 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1385 total_bytes = bdrv_co_getlength(bs); 1386 if (total_bytes < 0) { 1387 ret = total_bytes; 1388 goto out; 1389 } 1390 1391 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); 1392 1393 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1394 if (bytes <= max_bytes && bytes <= max_transfer) { 1395 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); 1396 goto out; 1397 } 1398 1399 while (bytes_remaining) { 1400 int64_t num; 1401 1402 if (max_bytes) { 1403 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1404 assert(num); 1405 1406 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1407 num, qiov, 1408 qiov_offset + bytes - bytes_remaining, 1409 flags); 1410 max_bytes -= num; 1411 } else { 1412 num = bytes_remaining; 1413 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1414 0, bytes_remaining); 1415 } 1416 if (ret < 0) { 1417 goto out; 1418 } 1419 bytes_remaining -= num; 1420 } 1421 1422 out: 1423 return ret < 0 ? ret : 0; 1424 } 1425 1426 /* 1427 * Request padding 1428 * 1429 * |<---- align ----->| |<----- align ---->| 1430 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1431 * | | | | | | 1432 * -*----------$-------*-------- ... --------*-----$------------*--- 1433 * | | | | | | 1434 * | offset | | end | 1435 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1436 * [buf ... ) [tail_buf ) 1437 * 1438 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1439 * is placed at the beginning of @buf and @tail at the @end. 1440 * 1441 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1442 * around tail, if tail exists. 1443 * 1444 * @merge_reads is true for small requests, 1445 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1446 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1447 * 1448 * @write is true for write requests, false for read requests. 1449 * 1450 * If padding makes the vector too long (exceeding IOV_MAX), then we need to 1451 * merge existing vector elements into a single one. @collapse_bounce_buf acts 1452 * as the bounce buffer in such cases. @pre_collapse_qiov has the pre-collapse 1453 * I/O vector elements so for read requests, the data can be copied back after 1454 * the read is done. 1455 */ 1456 typedef struct BdrvRequestPadding { 1457 uint8_t *buf; 1458 size_t buf_len; 1459 uint8_t *tail_buf; 1460 size_t head; 1461 size_t tail; 1462 bool merge_reads; 1463 bool write; 1464 QEMUIOVector local_qiov; 1465 1466 uint8_t *collapse_bounce_buf; 1467 size_t collapse_len; 1468 QEMUIOVector pre_collapse_qiov; 1469 } BdrvRequestPadding; 1470 1471 static bool bdrv_init_padding(BlockDriverState *bs, 1472 int64_t offset, int64_t bytes, 1473 bool write, 1474 BdrvRequestPadding *pad) 1475 { 1476 int64_t align = bs->bl.request_alignment; 1477 int64_t sum; 1478 1479 bdrv_check_request(offset, bytes, &error_abort); 1480 assert(align <= INT_MAX); /* documented in block/block_int.h */ 1481 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */ 1482 1483 memset(pad, 0, sizeof(*pad)); 1484 1485 pad->head = offset & (align - 1); 1486 pad->tail = ((offset + bytes) & (align - 1)); 1487 if (pad->tail) { 1488 pad->tail = align - pad->tail; 1489 } 1490 1491 if (!pad->head && !pad->tail) { 1492 return false; 1493 } 1494 1495 assert(bytes); /* Nothing good in aligning zero-length requests */ 1496 1497 sum = pad->head + bytes + pad->tail; 1498 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1499 pad->buf = qemu_blockalign(bs, pad->buf_len); 1500 pad->merge_reads = sum == pad->buf_len; 1501 if (pad->tail) { 1502 pad->tail_buf = pad->buf + pad->buf_len - align; 1503 } 1504 1505 pad->write = write; 1506 1507 return true; 1508 } 1509 1510 static int coroutine_fn GRAPH_RDLOCK 1511 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req, 1512 BdrvRequestPadding *pad, bool zero_middle) 1513 { 1514 QEMUIOVector local_qiov; 1515 BlockDriverState *bs = child->bs; 1516 uint64_t align = bs->bl.request_alignment; 1517 int ret; 1518 1519 assert(req->serialising && pad->buf); 1520 1521 if (pad->head || pad->merge_reads) { 1522 int64_t bytes = pad->merge_reads ? pad->buf_len : align; 1523 1524 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1525 1526 if (pad->head) { 1527 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1528 } 1529 if (pad->merge_reads && pad->tail) { 1530 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1531 } 1532 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1533 align, &local_qiov, 0, 0); 1534 if (ret < 0) { 1535 return ret; 1536 } 1537 if (pad->head) { 1538 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1539 } 1540 if (pad->merge_reads && pad->tail) { 1541 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1542 } 1543 1544 if (pad->merge_reads) { 1545 goto zero_mem; 1546 } 1547 } 1548 1549 if (pad->tail) { 1550 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1551 1552 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1553 ret = bdrv_aligned_preadv( 1554 child, req, 1555 req->overlap_offset + req->overlap_bytes - align, 1556 align, align, &local_qiov, 0, 0); 1557 if (ret < 0) { 1558 return ret; 1559 } 1560 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1561 } 1562 1563 zero_mem: 1564 if (zero_middle) { 1565 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1566 } 1567 1568 return 0; 1569 } 1570 1571 /** 1572 * Free *pad's associated buffers, and perform any necessary finalization steps. 1573 */ 1574 static void bdrv_padding_finalize(BdrvRequestPadding *pad) 1575 { 1576 if (pad->collapse_bounce_buf) { 1577 if (!pad->write) { 1578 /* 1579 * If padding required elements in the vector to be collapsed into a 1580 * bounce buffer, copy the bounce buffer content back 1581 */ 1582 qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0, 1583 pad->collapse_bounce_buf, pad->collapse_len); 1584 } 1585 qemu_vfree(pad->collapse_bounce_buf); 1586 qemu_iovec_destroy(&pad->pre_collapse_qiov); 1587 } 1588 if (pad->buf) { 1589 qemu_vfree(pad->buf); 1590 qemu_iovec_destroy(&pad->local_qiov); 1591 } 1592 memset(pad, 0, sizeof(*pad)); 1593 } 1594 1595 /* 1596 * Create pad->local_qiov by wrapping @iov in the padding head and tail, while 1597 * ensuring that the resulting vector will not exceed IOV_MAX elements. 1598 * 1599 * To ensure this, when necessary, the first two or three elements of @iov are 1600 * merged into pad->collapse_bounce_buf and replaced by a reference to that 1601 * bounce buffer in pad->local_qiov. 1602 * 1603 * After performing a read request, the data from the bounce buffer must be 1604 * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()). 1605 */ 1606 static int bdrv_create_padded_qiov(BlockDriverState *bs, 1607 BdrvRequestPadding *pad, 1608 struct iovec *iov, int niov, 1609 size_t iov_offset, size_t bytes) 1610 { 1611 int padded_niov, surplus_count, collapse_count; 1612 1613 /* Assert this invariant */ 1614 assert(niov <= IOV_MAX); 1615 1616 /* 1617 * Cannot pad if resulting length would exceed SIZE_MAX. Returning an error 1618 * to the guest is not ideal, but there is little else we can do. At least 1619 * this will practically never happen on 64-bit systems. 1620 */ 1621 if (SIZE_MAX - pad->head < bytes || 1622 SIZE_MAX - pad->head - bytes < pad->tail) 1623 { 1624 return -EINVAL; 1625 } 1626 1627 /* Length of the resulting IOV if we just concatenated everything */ 1628 padded_niov = !!pad->head + niov + !!pad->tail; 1629 1630 qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX)); 1631 1632 if (pad->head) { 1633 qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head); 1634 } 1635 1636 /* 1637 * If padded_niov > IOV_MAX, we cannot just concatenate everything. 1638 * Instead, merge the first two or three elements of @iov to reduce the 1639 * number of vector elements as necessary. 1640 */ 1641 if (padded_niov > IOV_MAX) { 1642 /* 1643 * Only head and tail can have lead to the number of entries exceeding 1644 * IOV_MAX, so we can exceed it by the head and tail at most. We need 1645 * to reduce the number of elements by `surplus_count`, so we merge that 1646 * many elements plus one into one element. 1647 */ 1648 surplus_count = padded_niov - IOV_MAX; 1649 assert(surplus_count <= !!pad->head + !!pad->tail); 1650 collapse_count = surplus_count + 1; 1651 1652 /* 1653 * Move the elements to collapse into `pad->pre_collapse_qiov`, then 1654 * advance `iov` (and associated variables) by those elements. 1655 */ 1656 qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count); 1657 qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov, 1658 collapse_count, iov_offset, SIZE_MAX); 1659 iov += collapse_count; 1660 iov_offset = 0; 1661 niov -= collapse_count; 1662 bytes -= pad->pre_collapse_qiov.size; 1663 1664 /* 1665 * Construct the bounce buffer to match the length of the to-collapse 1666 * vector elements, and for write requests, initialize it with the data 1667 * from those elements. Then add it to `pad->local_qiov`. 1668 */ 1669 pad->collapse_len = pad->pre_collapse_qiov.size; 1670 pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len); 1671 if (pad->write) { 1672 qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0, 1673 pad->collapse_bounce_buf, pad->collapse_len); 1674 } 1675 qemu_iovec_add(&pad->local_qiov, 1676 pad->collapse_bounce_buf, pad->collapse_len); 1677 } 1678 1679 qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes); 1680 1681 if (pad->tail) { 1682 qemu_iovec_add(&pad->local_qiov, 1683 pad->buf + pad->buf_len - pad->tail, pad->tail); 1684 } 1685 1686 assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX)); 1687 return 0; 1688 } 1689 1690 /* 1691 * bdrv_pad_request 1692 * 1693 * Exchange request parameters with padded request if needed. Don't include RMW 1694 * read of padding, bdrv_padding_rmw_read() should be called separately if 1695 * needed. 1696 * 1697 * @write is true for write requests, false for read requests. 1698 * 1699 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: 1700 * - on function start they represent original request 1701 * - on failure or when padding is not needed they are unchanged 1702 * - on success when padding is needed they represent padded request 1703 */ 1704 static int bdrv_pad_request(BlockDriverState *bs, 1705 QEMUIOVector **qiov, size_t *qiov_offset, 1706 int64_t *offset, int64_t *bytes, 1707 bool write, 1708 BdrvRequestPadding *pad, bool *padded, 1709 BdrvRequestFlags *flags) 1710 { 1711 int ret; 1712 struct iovec *sliced_iov; 1713 int sliced_niov; 1714 size_t sliced_head, sliced_tail; 1715 1716 /* Should have been checked by the caller already */ 1717 ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset); 1718 if (ret < 0) { 1719 return ret; 1720 } 1721 1722 if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) { 1723 if (padded) { 1724 *padded = false; 1725 } 1726 return 0; 1727 } 1728 1729 /* 1730 * For prefetching in stream_populate(), no qiov is passed along, because 1731 * only copy-on-read matters. 1732 */ 1733 if (*qiov) { 1734 sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, 1735 &sliced_head, &sliced_tail, 1736 &sliced_niov); 1737 1738 /* Guaranteed by bdrv_check_request32() */ 1739 assert(*bytes <= SIZE_MAX); 1740 ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, 1741 sliced_head, *bytes); 1742 if (ret < 0) { 1743 bdrv_padding_finalize(pad); 1744 return ret; 1745 } 1746 *qiov = &pad->local_qiov; 1747 *qiov_offset = 0; 1748 } 1749 1750 *bytes += pad->head + pad->tail; 1751 *offset -= pad->head; 1752 if (padded) { 1753 *padded = true; 1754 } 1755 if (flags) { 1756 /* Can't use optimization hint with bounce buffer */ 1757 *flags &= ~BDRV_REQ_REGISTERED_BUF; 1758 } 1759 1760 return 0; 1761 } 1762 1763 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1764 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 1765 BdrvRequestFlags flags) 1766 { 1767 IO_CODE(); 1768 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1769 } 1770 1771 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1772 int64_t offset, int64_t bytes, 1773 QEMUIOVector *qiov, size_t qiov_offset, 1774 BdrvRequestFlags flags) 1775 { 1776 BlockDriverState *bs = child->bs; 1777 BdrvTrackedRequest req; 1778 BdrvRequestPadding pad; 1779 int ret; 1780 IO_CODE(); 1781 1782 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); 1783 1784 if (!bdrv_co_is_inserted(bs)) { 1785 return -ENOMEDIUM; 1786 } 1787 1788 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 1789 if (ret < 0) { 1790 return ret; 1791 } 1792 1793 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1794 /* 1795 * Aligning zero request is nonsense. Even if driver has special meaning 1796 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1797 * it to driver due to request_alignment. 1798 * 1799 * Still, no reason to return an error if someone do unaligned 1800 * zero-length read occasionally. 1801 */ 1802 return 0; 1803 } 1804 1805 bdrv_inc_in_flight(bs); 1806 1807 /* Don't do copy-on-read if we read data before write operation */ 1808 if (qatomic_read(&bs->copy_on_read)) { 1809 flags |= BDRV_REQ_COPY_ON_READ; 1810 } 1811 1812 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false, 1813 &pad, NULL, &flags); 1814 if (ret < 0) { 1815 goto fail; 1816 } 1817 1818 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1819 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1820 bs->bl.request_alignment, 1821 qiov, qiov_offset, flags); 1822 tracked_request_end(&req); 1823 bdrv_padding_finalize(&pad); 1824 1825 fail: 1826 bdrv_dec_in_flight(bs); 1827 1828 return ret; 1829 } 1830 1831 static int coroutine_fn GRAPH_RDLOCK 1832 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 1833 BdrvRequestFlags flags) 1834 { 1835 BlockDriver *drv = bs->drv; 1836 QEMUIOVector qiov; 1837 void *buf = NULL; 1838 int ret = 0; 1839 bool need_flush = false; 1840 int head = 0; 1841 int tail = 0; 1842 1843 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, 1844 INT64_MAX); 1845 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1846 bs->bl.request_alignment); 1847 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1848 1849 assert_bdrv_graph_readable(); 1850 bdrv_check_request(offset, bytes, &error_abort); 1851 1852 if (!drv) { 1853 return -ENOMEDIUM; 1854 } 1855 1856 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1857 return -ENOTSUP; 1858 } 1859 1860 /* By definition there is no user buffer so this flag doesn't make sense */ 1861 if (flags & BDRV_REQ_REGISTERED_BUF) { 1862 return -EINVAL; 1863 } 1864 1865 /* Invalidate the cached block-status data range if this write overlaps */ 1866 bdrv_bsc_invalidate_range(bs, offset, bytes); 1867 1868 assert(alignment % bs->bl.request_alignment == 0); 1869 head = offset % alignment; 1870 tail = (offset + bytes) % alignment; 1871 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1872 assert(max_write_zeroes >= bs->bl.request_alignment); 1873 1874 while (bytes > 0 && !ret) { 1875 int64_t num = bytes; 1876 1877 /* Align request. Block drivers can expect the "bulk" of the request 1878 * to be aligned, and that unaligned requests do not cross cluster 1879 * boundaries. 1880 */ 1881 if (head) { 1882 /* Make a small request up to the first aligned sector. For 1883 * convenience, limit this request to max_transfer even if 1884 * we don't need to fall back to writes. */ 1885 num = MIN(MIN(bytes, max_transfer), alignment - head); 1886 head = (head + num) % alignment; 1887 assert(num < max_write_zeroes); 1888 } else if (tail && num > alignment) { 1889 /* Shorten the request to the last aligned sector. */ 1890 num -= tail; 1891 } 1892 1893 /* limit request size */ 1894 if (num > max_write_zeroes) { 1895 num = max_write_zeroes; 1896 } 1897 1898 ret = -ENOTSUP; 1899 /* First try the efficient write zeroes operation */ 1900 if (drv->bdrv_co_pwrite_zeroes) { 1901 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1902 flags & bs->supported_zero_flags); 1903 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1904 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1905 need_flush = true; 1906 } 1907 } else { 1908 assert(!bs->supported_zero_flags); 1909 } 1910 1911 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1912 /* Fall back to bounce buffer if write zeroes is unsupported */ 1913 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1914 1915 if ((flags & BDRV_REQ_FUA) && 1916 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1917 /* No need for bdrv_driver_pwrite() to do a fallback 1918 * flush on each chunk; use just one at the end */ 1919 write_flags &= ~BDRV_REQ_FUA; 1920 need_flush = true; 1921 } 1922 num = MIN(num, max_transfer); 1923 if (buf == NULL) { 1924 buf = qemu_try_blockalign0(bs, num); 1925 if (buf == NULL) { 1926 ret = -ENOMEM; 1927 goto fail; 1928 } 1929 } 1930 qemu_iovec_init_buf(&qiov, buf, num); 1931 1932 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1933 1934 /* Keep bounce buffer around if it is big enough for all 1935 * all future requests. 1936 */ 1937 if (num < max_transfer) { 1938 qemu_vfree(buf); 1939 buf = NULL; 1940 } 1941 } 1942 1943 offset += num; 1944 bytes -= num; 1945 } 1946 1947 fail: 1948 if (ret == 0 && need_flush) { 1949 ret = bdrv_co_flush(bs); 1950 } 1951 qemu_vfree(buf); 1952 return ret; 1953 } 1954 1955 static inline int coroutine_fn GRAPH_RDLOCK 1956 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, 1957 BdrvTrackedRequest *req, int flags) 1958 { 1959 BlockDriverState *bs = child->bs; 1960 1961 bdrv_check_request(offset, bytes, &error_abort); 1962 1963 if (bdrv_is_read_only(bs)) { 1964 return -EPERM; 1965 } 1966 1967 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1968 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1969 assert(!(flags & ~BDRV_REQ_MASK)); 1970 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); 1971 1972 if (flags & BDRV_REQ_SERIALISING) { 1973 QEMU_LOCK_GUARD(&bs->reqs_lock); 1974 1975 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); 1976 1977 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { 1978 return -EBUSY; 1979 } 1980 1981 bdrv_wait_serialising_requests_locked(req); 1982 } else { 1983 bdrv_wait_serialising_requests(req); 1984 } 1985 1986 assert(req->overlap_offset <= offset); 1987 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1988 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || 1989 child->perm & BLK_PERM_RESIZE); 1990 1991 switch (req->type) { 1992 case BDRV_TRACKED_WRITE: 1993 case BDRV_TRACKED_DISCARD: 1994 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1995 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1996 } else { 1997 assert(child->perm & BLK_PERM_WRITE); 1998 } 1999 bdrv_write_threshold_check_write(bs, offset, bytes); 2000 return 0; 2001 case BDRV_TRACKED_TRUNCATE: 2002 assert(child->perm & BLK_PERM_RESIZE); 2003 return 0; 2004 default: 2005 abort(); 2006 } 2007 } 2008 2009 static inline void coroutine_fn GRAPH_RDLOCK 2010 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, 2011 BdrvTrackedRequest *req, int ret) 2012 { 2013 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 2014 BlockDriverState *bs = child->bs; 2015 2016 bdrv_check_request(offset, bytes, &error_abort); 2017 2018 qatomic_inc(&bs->write_gen); 2019 2020 /* 2021 * Discard cannot extend the image, but in error handling cases, such as 2022 * when reverting a qcow2 cluster allocation, the discarded range can pass 2023 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 2024 * here. Instead, just skip it, since semantically a discard request 2025 * beyond EOF cannot expand the image anyway. 2026 */ 2027 if (ret == 0 && 2028 (req->type == BDRV_TRACKED_TRUNCATE || 2029 end_sector > bs->total_sectors) && 2030 req->type != BDRV_TRACKED_DISCARD) { 2031 bs->total_sectors = end_sector; 2032 bdrv_parent_cb_resize(bs); 2033 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 2034 } 2035 if (req->bytes) { 2036 switch (req->type) { 2037 case BDRV_TRACKED_WRITE: 2038 stat64_max(&bs->wr_highest_offset, offset + bytes); 2039 /* fall through, to set dirty bits */ 2040 case BDRV_TRACKED_DISCARD: 2041 bdrv_set_dirty(bs, offset, bytes); 2042 break; 2043 default: 2044 break; 2045 } 2046 } 2047 } 2048 2049 /* 2050 * Forwards an already correctly aligned write request to the BlockDriver, 2051 * after possibly fragmenting it. 2052 */ 2053 static int coroutine_fn GRAPH_RDLOCK 2054 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req, 2055 int64_t offset, int64_t bytes, int64_t align, 2056 QEMUIOVector *qiov, size_t qiov_offset, 2057 BdrvRequestFlags flags) 2058 { 2059 BlockDriverState *bs = child->bs; 2060 BlockDriver *drv = bs->drv; 2061 int ret; 2062 2063 int64_t bytes_remaining = bytes; 2064 int max_transfer; 2065 2066 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort); 2067 2068 if (!drv) { 2069 return -ENOMEDIUM; 2070 } 2071 2072 if (bdrv_has_readonly_bitmaps(bs)) { 2073 return -EPERM; 2074 } 2075 2076 assert(is_power_of_2(align)); 2077 assert((offset & (align - 1)) == 0); 2078 assert((bytes & (align - 1)) == 0); 2079 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 2080 align); 2081 2082 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 2083 2084 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 2085 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 2086 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 2087 flags |= BDRV_REQ_ZERO_WRITE; 2088 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 2089 flags |= BDRV_REQ_MAY_UNMAP; 2090 } 2091 2092 /* Can't use optimization hint with bufferless zero write */ 2093 flags &= ~BDRV_REQ_REGISTERED_BUF; 2094 } 2095 2096 if (ret < 0) { 2097 /* Do nothing, write notifier decided to fail this request */ 2098 } else if (flags & BDRV_REQ_ZERO_WRITE) { 2099 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO); 2100 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 2101 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 2102 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 2103 qiov, qiov_offset); 2104 } else if (bytes <= max_transfer) { 2105 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 2106 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 2107 } else { 2108 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); 2109 while (bytes_remaining) { 2110 int num = MIN(bytes_remaining, max_transfer); 2111 int local_flags = flags; 2112 2113 assert(num); 2114 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 2115 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 2116 /* If FUA is going to be emulated by flush, we only 2117 * need to flush on the last iteration */ 2118 local_flags &= ~BDRV_REQ_FUA; 2119 } 2120 2121 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 2122 num, qiov, 2123 qiov_offset + bytes - bytes_remaining, 2124 local_flags); 2125 if (ret < 0) { 2126 break; 2127 } 2128 bytes_remaining -= num; 2129 } 2130 } 2131 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE); 2132 2133 if (ret >= 0) { 2134 ret = 0; 2135 } 2136 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 2137 2138 return ret; 2139 } 2140 2141 static int coroutine_fn GRAPH_RDLOCK 2142 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes, 2143 BdrvRequestFlags flags, BdrvTrackedRequest *req) 2144 { 2145 BlockDriverState *bs = child->bs; 2146 QEMUIOVector local_qiov; 2147 uint64_t align = bs->bl.request_alignment; 2148 int ret = 0; 2149 bool padding; 2150 BdrvRequestPadding pad; 2151 2152 /* This flag doesn't make sense for padding or zero writes */ 2153 flags &= ~BDRV_REQ_REGISTERED_BUF; 2154 2155 padding = bdrv_init_padding(bs, offset, bytes, true, &pad); 2156 if (padding) { 2157 assert(!(flags & BDRV_REQ_NO_WAIT)); 2158 bdrv_make_request_serialising(req, align); 2159 2160 bdrv_padding_rmw_read(child, req, &pad, true); 2161 2162 if (pad.head || pad.merge_reads) { 2163 int64_t aligned_offset = offset & ~(align - 1); 2164 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 2165 2166 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 2167 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 2168 align, &local_qiov, 0, 2169 flags & ~BDRV_REQ_ZERO_WRITE); 2170 if (ret < 0 || pad.merge_reads) { 2171 /* Error or all work is done */ 2172 goto out; 2173 } 2174 offset += write_bytes - pad.head; 2175 bytes -= write_bytes - pad.head; 2176 } 2177 } 2178 2179 assert(!bytes || (offset & (align - 1)) == 0); 2180 if (bytes >= align) { 2181 /* Write the aligned part in the middle. */ 2182 int64_t aligned_bytes = bytes & ~(align - 1); 2183 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2184 NULL, 0, flags); 2185 if (ret < 0) { 2186 goto out; 2187 } 2188 bytes -= aligned_bytes; 2189 offset += aligned_bytes; 2190 } 2191 2192 assert(!bytes || (offset & (align - 1)) == 0); 2193 if (bytes) { 2194 assert(align == pad.tail + bytes); 2195 2196 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2197 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2198 &local_qiov, 0, 2199 flags & ~BDRV_REQ_ZERO_WRITE); 2200 } 2201 2202 out: 2203 bdrv_padding_finalize(&pad); 2204 2205 return ret; 2206 } 2207 2208 /* 2209 * Handle a write request in coroutine context 2210 */ 2211 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2212 int64_t offset, int64_t bytes, QEMUIOVector *qiov, 2213 BdrvRequestFlags flags) 2214 { 2215 IO_CODE(); 2216 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2217 } 2218 2219 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2220 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, 2221 BdrvRequestFlags flags) 2222 { 2223 BlockDriverState *bs = child->bs; 2224 BdrvTrackedRequest req; 2225 uint64_t align = bs->bl.request_alignment; 2226 BdrvRequestPadding pad; 2227 int ret; 2228 bool padded = false; 2229 IO_CODE(); 2230 2231 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); 2232 2233 if (!bdrv_co_is_inserted(bs)) { 2234 return -ENOMEDIUM; 2235 } 2236 2237 if (flags & BDRV_REQ_ZERO_WRITE) { 2238 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); 2239 } else { 2240 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); 2241 } 2242 if (ret < 0) { 2243 return ret; 2244 } 2245 2246 /* If the request is misaligned then we can't make it efficient */ 2247 if ((flags & BDRV_REQ_NO_FALLBACK) && 2248 !QEMU_IS_ALIGNED(offset | bytes, align)) 2249 { 2250 return -ENOTSUP; 2251 } 2252 2253 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2254 /* 2255 * Aligning zero request is nonsense. Even if driver has special meaning 2256 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2257 * it to driver due to request_alignment. 2258 * 2259 * Still, no reason to return an error if someone do unaligned 2260 * zero-length write occasionally. 2261 */ 2262 return 0; 2263 } 2264 2265 if (!(flags & BDRV_REQ_ZERO_WRITE)) { 2266 /* 2267 * Pad request for following read-modify-write cycle. 2268 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do 2269 * alignment only if there is no ZERO flag. 2270 */ 2271 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true, 2272 &pad, &padded, &flags); 2273 if (ret < 0) { 2274 return ret; 2275 } 2276 } 2277 2278 bdrv_inc_in_flight(bs); 2279 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2280 2281 if (flags & BDRV_REQ_ZERO_WRITE) { 2282 assert(!padded); 2283 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2284 goto out; 2285 } 2286 2287 if (padded) { 2288 /* 2289 * Request was unaligned to request_alignment and therefore 2290 * padded. We are going to do read-modify-write, and must 2291 * serialize the request to prevent interactions of the 2292 * widened region with other transactions. 2293 */ 2294 assert(!(flags & BDRV_REQ_NO_WAIT)); 2295 bdrv_make_request_serialising(&req, align); 2296 bdrv_padding_rmw_read(child, &req, &pad, false); 2297 } 2298 2299 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2300 qiov, qiov_offset, flags); 2301 2302 bdrv_padding_finalize(&pad); 2303 2304 out: 2305 tracked_request_end(&req); 2306 bdrv_dec_in_flight(bs); 2307 2308 return ret; 2309 } 2310 2311 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2312 int64_t bytes, BdrvRequestFlags flags) 2313 { 2314 IO_CODE(); 2315 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2316 assert_bdrv_graph_readable(); 2317 2318 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2319 flags &= ~BDRV_REQ_MAY_UNMAP; 2320 } 2321 2322 return bdrv_co_pwritev(child, offset, bytes, NULL, 2323 BDRV_REQ_ZERO_WRITE | flags); 2324 } 2325 2326 /* 2327 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2328 */ 2329 int bdrv_flush_all(void) 2330 { 2331 BdrvNextIterator it; 2332 BlockDriverState *bs = NULL; 2333 int result = 0; 2334 2335 GLOBAL_STATE_CODE(); 2336 GRAPH_RDLOCK_GUARD_MAINLOOP(); 2337 2338 /* 2339 * bdrv queue is managed by record/replay, 2340 * creating new flush request for stopping 2341 * the VM may break the determinism 2342 */ 2343 if (replay_events_enabled()) { 2344 return result; 2345 } 2346 2347 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2348 int ret = bdrv_flush(bs); 2349 if (ret < 0 && !result) { 2350 result = ret; 2351 } 2352 } 2353 2354 return result; 2355 } 2356 2357 /* 2358 * Returns the allocation status of the specified sectors. 2359 * Drivers not implementing the functionality are assumed to not support 2360 * backing files, hence all their sectors are reported as allocated. 2361 * 2362 * If 'want_zero' is true, the caller is querying for mapping 2363 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2364 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2365 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2366 * 2367 * If 'offset' is beyond the end of the disk image the return value is 2368 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2369 * 2370 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2371 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2372 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2373 * 2374 * 'pnum' is set to the number of bytes (including and immediately 2375 * following the specified offset) that are easily known to be in the 2376 * same allocated/unallocated state. Note that a second call starting 2377 * at the original offset plus returned pnum may have the same status. 2378 * The returned value is non-zero on success except at end-of-file. 2379 * 2380 * Returns negative errno on failure. Otherwise, if the 2381 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2382 * set to the host mapping and BDS corresponding to the guest offset. 2383 */ 2384 static int coroutine_fn GRAPH_RDLOCK 2385 bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, 2386 int64_t offset, int64_t bytes, 2387 int64_t *pnum, int64_t *map, BlockDriverState **file) 2388 { 2389 int64_t total_size; 2390 int64_t n; /* bytes */ 2391 int ret; 2392 int64_t local_map = 0; 2393 BlockDriverState *local_file = NULL; 2394 int64_t aligned_offset, aligned_bytes; 2395 uint32_t align; 2396 bool has_filtered_child; 2397 2398 assert(pnum); 2399 assert_bdrv_graph_readable(); 2400 *pnum = 0; 2401 total_size = bdrv_co_getlength(bs); 2402 if (total_size < 0) { 2403 ret = total_size; 2404 goto early_out; 2405 } 2406 2407 if (offset >= total_size) { 2408 ret = BDRV_BLOCK_EOF; 2409 goto early_out; 2410 } 2411 if (!bytes) { 2412 ret = 0; 2413 goto early_out; 2414 } 2415 2416 n = total_size - offset; 2417 if (n < bytes) { 2418 bytes = n; 2419 } 2420 2421 /* Must be non-NULL or bdrv_co_getlength() would have failed */ 2422 assert(bs->drv); 2423 has_filtered_child = bdrv_filter_child(bs); 2424 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2425 *pnum = bytes; 2426 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2427 if (offset + bytes == total_size) { 2428 ret |= BDRV_BLOCK_EOF; 2429 } 2430 if (bs->drv->protocol_name) { 2431 ret |= BDRV_BLOCK_OFFSET_VALID; 2432 local_map = offset; 2433 local_file = bs; 2434 } 2435 goto early_out; 2436 } 2437 2438 bdrv_inc_in_flight(bs); 2439 2440 /* Round out to request_alignment boundaries */ 2441 align = bs->bl.request_alignment; 2442 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2443 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2444 2445 if (bs->drv->bdrv_co_block_status) { 2446 /* 2447 * Use the block-status cache only for protocol nodes: Format 2448 * drivers are generally quick to inquire the status, but protocol 2449 * drivers often need to get information from outside of qemu, so 2450 * we do not have control over the actual implementation. There 2451 * have been cases where inquiring the status took an unreasonably 2452 * long time, and we can do nothing in qemu to fix it. 2453 * This is especially problematic for images with large data areas, 2454 * because finding the few holes in them and giving them special 2455 * treatment does not gain much performance. Therefore, we try to 2456 * cache the last-identified data region. 2457 * 2458 * Second, limiting ourselves to protocol nodes allows us to assume 2459 * the block status for data regions to be DATA | OFFSET_VALID, and 2460 * that the host offset is the same as the guest offset. 2461 * 2462 * Note that it is possible that external writers zero parts of 2463 * the cached regions without the cache being invalidated, and so 2464 * we may report zeroes as data. This is not catastrophic, 2465 * however, because reporting zeroes as data is fine. 2466 */ 2467 if (QLIST_EMPTY(&bs->children) && 2468 bdrv_bsc_is_data(bs, aligned_offset, pnum)) 2469 { 2470 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2471 local_file = bs; 2472 local_map = aligned_offset; 2473 } else { 2474 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2475 aligned_bytes, pnum, &local_map, 2476 &local_file); 2477 2478 /* 2479 * Note that checking QLIST_EMPTY(&bs->children) is also done when 2480 * the cache is queried above. Technically, we do not need to check 2481 * it here; the worst that can happen is that we fill the cache for 2482 * non-protocol nodes, and then it is never used. However, filling 2483 * the cache requires an RCU update, so double check here to avoid 2484 * such an update if possible. 2485 * 2486 * Check want_zero, because we only want to update the cache when we 2487 * have accurate information about what is zero and what is data. 2488 */ 2489 if (want_zero && 2490 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && 2491 QLIST_EMPTY(&bs->children)) 2492 { 2493 /* 2494 * When a protocol driver reports BLOCK_OFFSET_VALID, the 2495 * returned local_map value must be the same as the offset we 2496 * have passed (aligned_offset), and local_bs must be the node 2497 * itself. 2498 * Assert this, because we follow this rule when reading from 2499 * the cache (see the `local_file = bs` and 2500 * `local_map = aligned_offset` assignments above), and the 2501 * result the cache delivers must be the same as the driver 2502 * would deliver. 2503 */ 2504 assert(local_file == bs); 2505 assert(local_map == aligned_offset); 2506 bdrv_bsc_fill(bs, aligned_offset, *pnum); 2507 } 2508 } 2509 } else { 2510 /* Default code for filters */ 2511 2512 local_file = bdrv_filter_bs(bs); 2513 assert(local_file); 2514 2515 *pnum = aligned_bytes; 2516 local_map = aligned_offset; 2517 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2518 } 2519 if (ret < 0) { 2520 *pnum = 0; 2521 goto out; 2522 } 2523 2524 /* 2525 * The driver's result must be a non-zero multiple of request_alignment. 2526 * Clamp pnum and adjust map to original request. 2527 */ 2528 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2529 align > offset - aligned_offset); 2530 if (ret & BDRV_BLOCK_RECURSE) { 2531 assert(ret & BDRV_BLOCK_DATA); 2532 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2533 assert(!(ret & BDRV_BLOCK_ZERO)); 2534 } 2535 2536 *pnum -= offset - aligned_offset; 2537 if (*pnum > bytes) { 2538 *pnum = bytes; 2539 } 2540 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2541 local_map += offset - aligned_offset; 2542 } 2543 2544 if (ret & BDRV_BLOCK_RAW) { 2545 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2546 ret = bdrv_co_do_block_status(local_file, want_zero, local_map, 2547 *pnum, pnum, &local_map, &local_file); 2548 goto out; 2549 } 2550 2551 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2552 ret |= BDRV_BLOCK_ALLOCATED; 2553 } else if (bs->drv->supports_backing) { 2554 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2555 2556 if (!cow_bs) { 2557 ret |= BDRV_BLOCK_ZERO; 2558 } else if (want_zero) { 2559 int64_t size2 = bdrv_co_getlength(cow_bs); 2560 2561 if (size2 >= 0 && offset >= size2) { 2562 ret |= BDRV_BLOCK_ZERO; 2563 } 2564 } 2565 } 2566 2567 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2568 local_file && local_file != bs && 2569 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2570 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2571 int64_t file_pnum; 2572 int ret2; 2573 2574 ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map, 2575 *pnum, &file_pnum, NULL, NULL); 2576 if (ret2 >= 0) { 2577 /* Ignore errors. This is just providing extra information, it 2578 * is useful but not necessary. 2579 */ 2580 if (ret2 & BDRV_BLOCK_EOF && 2581 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2582 /* 2583 * It is valid for the format block driver to read 2584 * beyond the end of the underlying file's current 2585 * size; such areas read as zero. 2586 */ 2587 ret |= BDRV_BLOCK_ZERO; 2588 } else { 2589 /* Limit request to the range reported by the protocol driver */ 2590 *pnum = file_pnum; 2591 ret |= (ret2 & BDRV_BLOCK_ZERO); 2592 } 2593 } 2594 2595 /* 2596 * Now that the recursive search was done, clear the flag. Otherwise, 2597 * with more complicated block graphs like snapshot-access -> 2598 * copy-before-write -> qcow2, where the return value will be propagated 2599 * further up to a parent bdrv_co_do_block_status() call, both the 2600 * BDRV_BLOCK_RECURSE and BDRV_BLOCK_ZERO flags would be set, which is 2601 * not allowed. 2602 */ 2603 ret &= ~BDRV_BLOCK_RECURSE; 2604 } 2605 2606 out: 2607 bdrv_dec_in_flight(bs); 2608 if (ret >= 0 && offset + *pnum == total_size) { 2609 ret |= BDRV_BLOCK_EOF; 2610 } 2611 early_out: 2612 if (file) { 2613 *file = local_file; 2614 } 2615 if (map) { 2616 *map = local_map; 2617 } 2618 return ret; 2619 } 2620 2621 int coroutine_fn 2622 bdrv_co_common_block_status_above(BlockDriverState *bs, 2623 BlockDriverState *base, 2624 bool include_base, 2625 bool want_zero, 2626 int64_t offset, 2627 int64_t bytes, 2628 int64_t *pnum, 2629 int64_t *map, 2630 BlockDriverState **file, 2631 int *depth) 2632 { 2633 int ret; 2634 BlockDriverState *p; 2635 int64_t eof = 0; 2636 int dummy; 2637 IO_CODE(); 2638 2639 assert(!include_base || base); /* Can't include NULL base */ 2640 assert_bdrv_graph_readable(); 2641 2642 if (!depth) { 2643 depth = &dummy; 2644 } 2645 *depth = 0; 2646 2647 if (!include_base && bs == base) { 2648 *pnum = bytes; 2649 return 0; 2650 } 2651 2652 ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum, 2653 map, file); 2654 ++*depth; 2655 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2656 return ret; 2657 } 2658 2659 if (ret & BDRV_BLOCK_EOF) { 2660 eof = offset + *pnum; 2661 } 2662 2663 assert(*pnum <= bytes); 2664 bytes = *pnum; 2665 2666 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2667 p = bdrv_filter_or_cow_bs(p)) 2668 { 2669 ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum, 2670 map, file); 2671 ++*depth; 2672 if (ret < 0) { 2673 return ret; 2674 } 2675 if (*pnum == 0) { 2676 /* 2677 * The top layer deferred to this layer, and because this layer is 2678 * short, any zeroes that we synthesize beyond EOF behave as if they 2679 * were allocated at this layer. 2680 * 2681 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2682 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2683 * below. 2684 */ 2685 assert(ret & BDRV_BLOCK_EOF); 2686 *pnum = bytes; 2687 if (file) { 2688 *file = p; 2689 } 2690 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2691 break; 2692 } 2693 if (ret & BDRV_BLOCK_ALLOCATED) { 2694 /* 2695 * We've found the node and the status, we must break. 2696 * 2697 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2698 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2699 * below. 2700 */ 2701 ret &= ~BDRV_BLOCK_EOF; 2702 break; 2703 } 2704 2705 if (p == base) { 2706 assert(include_base); 2707 break; 2708 } 2709 2710 /* 2711 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2712 * let's continue the diving. 2713 */ 2714 assert(*pnum <= bytes); 2715 bytes = *pnum; 2716 } 2717 2718 if (offset + *pnum == eof) { 2719 ret |= BDRV_BLOCK_EOF; 2720 } 2721 2722 return ret; 2723 } 2724 2725 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2726 BlockDriverState *base, 2727 int64_t offset, int64_t bytes, 2728 int64_t *pnum, int64_t *map, 2729 BlockDriverState **file) 2730 { 2731 IO_CODE(); 2732 return bdrv_co_common_block_status_above(bs, base, false, true, offset, 2733 bytes, pnum, map, file, NULL); 2734 } 2735 2736 int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset, 2737 int64_t bytes, int64_t *pnum, 2738 int64_t *map, BlockDriverState **file) 2739 { 2740 IO_CODE(); 2741 return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2742 offset, bytes, pnum, map, file); 2743 } 2744 2745 /* 2746 * Check @bs (and its backing chain) to see if the range defined 2747 * by @offset and @bytes is known to read as zeroes. 2748 * Return 1 if that is the case, 0 otherwise and -errno on error. 2749 * This test is meant to be fast rather than accurate so returning 0 2750 * does not guarantee non-zero data. 2751 */ 2752 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2753 int64_t bytes) 2754 { 2755 int ret; 2756 int64_t pnum = bytes; 2757 IO_CODE(); 2758 2759 if (!bytes) { 2760 return 1; 2761 } 2762 2763 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, 2764 bytes, &pnum, NULL, NULL, NULL); 2765 2766 if (ret < 0) { 2767 return ret; 2768 } 2769 2770 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2771 } 2772 2773 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, 2774 int64_t bytes, int64_t *pnum) 2775 { 2776 int ret; 2777 int64_t dummy; 2778 IO_CODE(); 2779 2780 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, 2781 bytes, pnum ? pnum : &dummy, NULL, 2782 NULL, NULL); 2783 if (ret < 0) { 2784 return ret; 2785 } 2786 return !!(ret & BDRV_BLOCK_ALLOCATED); 2787 } 2788 2789 /* 2790 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2791 * 2792 * Return a positive depth if (a prefix of) the given range is allocated 2793 * in any image between BASE and TOP (BASE is only included if include_base 2794 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2795 * BASE can be NULL to check if the given offset is allocated in any 2796 * image of the chain. Return 0 otherwise, or negative errno on 2797 * failure. 2798 * 2799 * 'pnum' is set to the number of bytes (including and immediately 2800 * following the specified offset) that are known to be in the same 2801 * allocated/unallocated state. Note that a subsequent call starting 2802 * at 'offset + *pnum' may return the same allocation status (in other 2803 * words, the result is not necessarily the maximum possible range); 2804 * but 'pnum' will only be 0 when end of file is reached. 2805 */ 2806 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs, 2807 BlockDriverState *base, 2808 bool include_base, int64_t offset, 2809 int64_t bytes, int64_t *pnum) 2810 { 2811 int depth; 2812 int ret; 2813 IO_CODE(); 2814 2815 ret = bdrv_co_common_block_status_above(bs, base, include_base, false, 2816 offset, bytes, pnum, NULL, NULL, 2817 &depth); 2818 if (ret < 0) { 2819 return ret; 2820 } 2821 2822 if (ret & BDRV_BLOCK_ALLOCATED) { 2823 return depth; 2824 } 2825 return 0; 2826 } 2827 2828 int coroutine_fn 2829 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2830 { 2831 BlockDriver *drv = bs->drv; 2832 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2833 int ret; 2834 IO_CODE(); 2835 assert_bdrv_graph_readable(); 2836 2837 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2838 if (ret < 0) { 2839 return ret; 2840 } 2841 2842 if (!drv) { 2843 return -ENOMEDIUM; 2844 } 2845 2846 bdrv_inc_in_flight(bs); 2847 2848 if (drv->bdrv_co_load_vmstate) { 2849 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos); 2850 } else if (child_bs) { 2851 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2852 } else { 2853 ret = -ENOTSUP; 2854 } 2855 2856 bdrv_dec_in_flight(bs); 2857 2858 return ret; 2859 } 2860 2861 int coroutine_fn 2862 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2863 { 2864 BlockDriver *drv = bs->drv; 2865 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2866 int ret; 2867 IO_CODE(); 2868 assert_bdrv_graph_readable(); 2869 2870 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 2871 if (ret < 0) { 2872 return ret; 2873 } 2874 2875 if (!drv) { 2876 return -ENOMEDIUM; 2877 } 2878 2879 bdrv_inc_in_flight(bs); 2880 2881 if (drv->bdrv_co_save_vmstate) { 2882 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos); 2883 } else if (child_bs) { 2884 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2885 } else { 2886 ret = -ENOTSUP; 2887 } 2888 2889 bdrv_dec_in_flight(bs); 2890 2891 return ret; 2892 } 2893 2894 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2895 int64_t pos, int size) 2896 { 2897 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2898 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2899 IO_CODE(); 2900 2901 return ret < 0 ? ret : size; 2902 } 2903 2904 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2905 int64_t pos, int size) 2906 { 2907 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2908 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2909 IO_CODE(); 2910 2911 return ret < 0 ? ret : size; 2912 } 2913 2914 /**************************************************************/ 2915 /* async I/Os */ 2916 2917 /** 2918 * Synchronously cancels an acb. Must be called with the BQL held and the acb 2919 * must be processed with the BQL held too (IOThreads are not allowed). 2920 * 2921 * Use bdrv_aio_cancel_async() instead when possible. 2922 */ 2923 void bdrv_aio_cancel(BlockAIOCB *acb) 2924 { 2925 GLOBAL_STATE_CODE(); 2926 qemu_aio_ref(acb); 2927 bdrv_aio_cancel_async(acb); 2928 AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1); 2929 qemu_aio_unref(acb); 2930 } 2931 2932 /* Async version of aio cancel. The caller is not blocked if the acb implements 2933 * cancel_async, otherwise we do nothing and let the request normally complete. 2934 * In either case the completion callback must be called. */ 2935 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2936 { 2937 IO_CODE(); 2938 if (acb->aiocb_info->cancel_async) { 2939 acb->aiocb_info->cancel_async(acb); 2940 } 2941 } 2942 2943 /**************************************************************/ 2944 /* Coroutine block device emulation */ 2945 2946 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2947 { 2948 BdrvChild *primary_child = bdrv_primary_child(bs); 2949 BdrvChild *child; 2950 int current_gen; 2951 int ret = 0; 2952 IO_CODE(); 2953 2954 assert_bdrv_graph_readable(); 2955 bdrv_inc_in_flight(bs); 2956 2957 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) || 2958 bdrv_is_sg(bs)) { 2959 goto early_exit; 2960 } 2961 2962 qemu_mutex_lock(&bs->reqs_lock); 2963 current_gen = qatomic_read(&bs->write_gen); 2964 2965 /* Wait until any previous flushes are completed */ 2966 while (bs->active_flush_req) { 2967 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2968 } 2969 2970 /* Flushes reach this point in nondecreasing current_gen order. */ 2971 bs->active_flush_req = true; 2972 qemu_mutex_unlock(&bs->reqs_lock); 2973 2974 /* Write back all layers by calling one driver function */ 2975 if (bs->drv->bdrv_co_flush) { 2976 ret = bs->drv->bdrv_co_flush(bs); 2977 goto out; 2978 } 2979 2980 /* Write back cached data to the OS even with cache=unsafe */ 2981 BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2982 if (bs->drv->bdrv_co_flush_to_os) { 2983 ret = bs->drv->bdrv_co_flush_to_os(bs); 2984 if (ret < 0) { 2985 goto out; 2986 } 2987 } 2988 2989 /* But don't actually force it to the disk with cache=unsafe */ 2990 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2991 goto flush_children; 2992 } 2993 2994 /* Check if we really need to flush anything */ 2995 if (bs->flushed_gen == current_gen) { 2996 goto flush_children; 2997 } 2998 2999 BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 3000 if (!bs->drv) { 3001 /* bs->drv->bdrv_co_flush() might have ejected the BDS 3002 * (even in case of apparent success) */ 3003 ret = -ENOMEDIUM; 3004 goto out; 3005 } 3006 if (bs->drv->bdrv_co_flush_to_disk) { 3007 ret = bs->drv->bdrv_co_flush_to_disk(bs); 3008 } else if (bs->drv->bdrv_aio_flush) { 3009 BlockAIOCB *acb; 3010 CoroutineIOCompletion co = { 3011 .coroutine = qemu_coroutine_self(), 3012 }; 3013 3014 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 3015 if (acb == NULL) { 3016 ret = -EIO; 3017 } else { 3018 qemu_coroutine_yield(); 3019 ret = co.ret; 3020 } 3021 } else { 3022 /* 3023 * Some block drivers always operate in either writethrough or unsafe 3024 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 3025 * know how the server works (because the behaviour is hardcoded or 3026 * depends on server-side configuration), so we can't ensure that 3027 * everything is safe on disk. Returning an error doesn't work because 3028 * that would break guests even if the server operates in writethrough 3029 * mode. 3030 * 3031 * Let's hope the user knows what he's doing. 3032 */ 3033 ret = 0; 3034 } 3035 3036 if (ret < 0) { 3037 goto out; 3038 } 3039 3040 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 3041 * in the case of cache=unsafe, so there are no useless flushes. 3042 */ 3043 flush_children: 3044 ret = 0; 3045 QLIST_FOREACH(child, &bs->children, next) { 3046 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 3047 int this_child_ret = bdrv_co_flush(child->bs); 3048 if (!ret) { 3049 ret = this_child_ret; 3050 } 3051 } 3052 } 3053 3054 out: 3055 /* Notify any pending flushes that we have completed */ 3056 if (ret == 0) { 3057 bs->flushed_gen = current_gen; 3058 } 3059 3060 qemu_mutex_lock(&bs->reqs_lock); 3061 bs->active_flush_req = false; 3062 /* Return value is ignored - it's ok if wait queue is empty */ 3063 qemu_co_queue_next(&bs->flush_queue); 3064 qemu_mutex_unlock(&bs->reqs_lock); 3065 3066 early_exit: 3067 bdrv_dec_in_flight(bs); 3068 return ret; 3069 } 3070 3071 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 3072 int64_t bytes) 3073 { 3074 BdrvTrackedRequest req; 3075 int ret; 3076 int64_t max_pdiscard; 3077 int head, tail, align; 3078 BlockDriverState *bs = child->bs; 3079 IO_CODE(); 3080 assert_bdrv_graph_readable(); 3081 3082 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { 3083 return -ENOMEDIUM; 3084 } 3085 3086 if (bdrv_has_readonly_bitmaps(bs)) { 3087 return -EPERM; 3088 } 3089 3090 ret = bdrv_check_request(offset, bytes, NULL); 3091 if (ret < 0) { 3092 return ret; 3093 } 3094 3095 /* Do nothing if disabled. */ 3096 if (!(bs->open_flags & BDRV_O_UNMAP)) { 3097 return 0; 3098 } 3099 3100 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 3101 return 0; 3102 } 3103 3104 /* Invalidate the cached block-status data range if this discard overlaps */ 3105 bdrv_bsc_invalidate_range(bs, offset, bytes); 3106 3107 /* Discard is advisory, but some devices track and coalesce 3108 * unaligned requests, so we must pass everything down rather than 3109 * round here. Still, most devices will just silently ignore 3110 * unaligned requests (by returning -ENOTSUP), so we must fragment 3111 * the request accordingly. */ 3112 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 3113 assert(align % bs->bl.request_alignment == 0); 3114 head = offset % align; 3115 tail = (offset + bytes) % align; 3116 3117 bdrv_inc_in_flight(bs); 3118 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 3119 3120 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 3121 if (ret < 0) { 3122 goto out; 3123 } 3124 3125 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), 3126 align); 3127 assert(max_pdiscard >= bs->bl.request_alignment); 3128 3129 while (bytes > 0) { 3130 int64_t num = bytes; 3131 3132 if (head) { 3133 /* Make small requests to get to alignment boundaries. */ 3134 num = MIN(bytes, align - head); 3135 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 3136 num %= bs->bl.request_alignment; 3137 } 3138 head = (head + num) % align; 3139 assert(num < max_pdiscard); 3140 } else if (tail) { 3141 if (num > align) { 3142 /* Shorten the request to the last aligned cluster. */ 3143 num -= tail; 3144 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 3145 tail > bs->bl.request_alignment) { 3146 tail %= bs->bl.request_alignment; 3147 num -= tail; 3148 } 3149 } 3150 /* limit request size */ 3151 if (num > max_pdiscard) { 3152 num = max_pdiscard; 3153 } 3154 3155 if (!bs->drv) { 3156 ret = -ENOMEDIUM; 3157 goto out; 3158 } 3159 if (bs->drv->bdrv_co_pdiscard) { 3160 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 3161 } else { 3162 BlockAIOCB *acb; 3163 CoroutineIOCompletion co = { 3164 .coroutine = qemu_coroutine_self(), 3165 }; 3166 3167 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 3168 bdrv_co_io_em_complete, &co); 3169 if (acb == NULL) { 3170 ret = -EIO; 3171 goto out; 3172 } else { 3173 qemu_coroutine_yield(); 3174 ret = co.ret; 3175 } 3176 } 3177 if (ret && ret != -ENOTSUP) { 3178 goto out; 3179 } 3180 3181 offset += num; 3182 bytes -= num; 3183 } 3184 ret = 0; 3185 out: 3186 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 3187 tracked_request_end(&req); 3188 bdrv_dec_in_flight(bs); 3189 return ret; 3190 } 3191 3192 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 3193 { 3194 BlockDriver *drv = bs->drv; 3195 CoroutineIOCompletion co = { 3196 .coroutine = qemu_coroutine_self(), 3197 }; 3198 BlockAIOCB *acb; 3199 IO_CODE(); 3200 assert_bdrv_graph_readable(); 3201 3202 bdrv_inc_in_flight(bs); 3203 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 3204 co.ret = -ENOTSUP; 3205 goto out; 3206 } 3207 3208 if (drv->bdrv_co_ioctl) { 3209 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 3210 } else { 3211 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 3212 if (!acb) { 3213 co.ret = -ENOTSUP; 3214 goto out; 3215 } 3216 qemu_coroutine_yield(); 3217 } 3218 out: 3219 bdrv_dec_in_flight(bs); 3220 return co.ret; 3221 } 3222 3223 int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset, 3224 unsigned int *nr_zones, 3225 BlockZoneDescriptor *zones) 3226 { 3227 BlockDriver *drv = bs->drv; 3228 CoroutineIOCompletion co = { 3229 .coroutine = qemu_coroutine_self(), 3230 }; 3231 IO_CODE(); 3232 3233 bdrv_inc_in_flight(bs); 3234 if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) { 3235 co.ret = -ENOTSUP; 3236 goto out; 3237 } 3238 co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones); 3239 out: 3240 bdrv_dec_in_flight(bs); 3241 return co.ret; 3242 } 3243 3244 int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, 3245 int64_t offset, int64_t len) 3246 { 3247 BlockDriver *drv = bs->drv; 3248 CoroutineIOCompletion co = { 3249 .coroutine = qemu_coroutine_self(), 3250 }; 3251 IO_CODE(); 3252 3253 bdrv_inc_in_flight(bs); 3254 if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) { 3255 co.ret = -ENOTSUP; 3256 goto out; 3257 } 3258 co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len); 3259 out: 3260 bdrv_dec_in_flight(bs); 3261 return co.ret; 3262 } 3263 3264 int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset, 3265 QEMUIOVector *qiov, 3266 BdrvRequestFlags flags) 3267 { 3268 int ret; 3269 BlockDriver *drv = bs->drv; 3270 CoroutineIOCompletion co = { 3271 .coroutine = qemu_coroutine_self(), 3272 }; 3273 IO_CODE(); 3274 3275 ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL); 3276 if (ret < 0) { 3277 return ret; 3278 } 3279 3280 bdrv_inc_in_flight(bs); 3281 if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) { 3282 co.ret = -ENOTSUP; 3283 goto out; 3284 } 3285 co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags); 3286 out: 3287 bdrv_dec_in_flight(bs); 3288 return co.ret; 3289 } 3290 3291 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3292 { 3293 IO_CODE(); 3294 return qemu_memalign(bdrv_opt_mem_align(bs), size); 3295 } 3296 3297 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 3298 { 3299 IO_CODE(); 3300 return memset(qemu_blockalign(bs, size), 0, size); 3301 } 3302 3303 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 3304 { 3305 size_t align = bdrv_opt_mem_align(bs); 3306 IO_CODE(); 3307 3308 /* Ensure that NULL is never returned on success */ 3309 assert(align > 0); 3310 if (size == 0) { 3311 size = align; 3312 } 3313 3314 return qemu_try_memalign(align, size); 3315 } 3316 3317 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 3318 { 3319 void *mem = qemu_try_blockalign(bs, size); 3320 IO_CODE(); 3321 3322 if (mem) { 3323 memset(mem, 0, size); 3324 } 3325 3326 return mem; 3327 } 3328 3329 /* Helper that undoes bdrv_register_buf() when it fails partway through */ 3330 static void GRAPH_RDLOCK 3331 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size, 3332 BdrvChild *final_child) 3333 { 3334 BdrvChild *child; 3335 3336 GLOBAL_STATE_CODE(); 3337 assert_bdrv_graph_readable(); 3338 3339 QLIST_FOREACH(child, &bs->children, next) { 3340 if (child == final_child) { 3341 break; 3342 } 3343 3344 bdrv_unregister_buf(child->bs, host, size); 3345 } 3346 3347 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3348 bs->drv->bdrv_unregister_buf(bs, host, size); 3349 } 3350 } 3351 3352 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, 3353 Error **errp) 3354 { 3355 BdrvChild *child; 3356 3357 GLOBAL_STATE_CODE(); 3358 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3359 3360 if (bs->drv && bs->drv->bdrv_register_buf) { 3361 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { 3362 return false; 3363 } 3364 } 3365 QLIST_FOREACH(child, &bs->children, next) { 3366 if (!bdrv_register_buf(child->bs, host, size, errp)) { 3367 bdrv_register_buf_rollback(bs, host, size, child); 3368 return false; 3369 } 3370 } 3371 return true; 3372 } 3373 3374 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size) 3375 { 3376 BdrvChild *child; 3377 3378 GLOBAL_STATE_CODE(); 3379 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3380 3381 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3382 bs->drv->bdrv_unregister_buf(bs, host, size); 3383 } 3384 QLIST_FOREACH(child, &bs->children, next) { 3385 bdrv_unregister_buf(child->bs, host, size); 3386 } 3387 } 3388 3389 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal( 3390 BdrvChild *src, int64_t src_offset, BdrvChild *dst, 3391 int64_t dst_offset, int64_t bytes, 3392 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3393 bool recurse_src) 3394 { 3395 BdrvTrackedRequest req; 3396 int ret; 3397 assert_bdrv_graph_readable(); 3398 3399 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3400 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3401 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3402 assert(!(read_flags & BDRV_REQ_NO_WAIT)); 3403 assert(!(write_flags & BDRV_REQ_NO_WAIT)); 3404 3405 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { 3406 return -ENOMEDIUM; 3407 } 3408 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0); 3409 if (ret) { 3410 return ret; 3411 } 3412 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3413 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3414 } 3415 3416 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { 3417 return -ENOMEDIUM; 3418 } 3419 ret = bdrv_check_request32(src_offset, bytes, NULL, 0); 3420 if (ret) { 3421 return ret; 3422 } 3423 3424 if (!src->bs->drv->bdrv_co_copy_range_from 3425 || !dst->bs->drv->bdrv_co_copy_range_to 3426 || src->bs->encrypted || dst->bs->encrypted) { 3427 return -ENOTSUP; 3428 } 3429 3430 if (recurse_src) { 3431 bdrv_inc_in_flight(src->bs); 3432 tracked_request_begin(&req, src->bs, src_offset, bytes, 3433 BDRV_TRACKED_READ); 3434 3435 /* BDRV_REQ_SERIALISING is only for write operation */ 3436 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3437 bdrv_wait_serialising_requests(&req); 3438 3439 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3440 src, src_offset, 3441 dst, dst_offset, 3442 bytes, 3443 read_flags, write_flags); 3444 3445 tracked_request_end(&req); 3446 bdrv_dec_in_flight(src->bs); 3447 } else { 3448 bdrv_inc_in_flight(dst->bs); 3449 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3450 BDRV_TRACKED_WRITE); 3451 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3452 write_flags); 3453 if (!ret) { 3454 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3455 src, src_offset, 3456 dst, dst_offset, 3457 bytes, 3458 read_flags, write_flags); 3459 } 3460 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3461 tracked_request_end(&req); 3462 bdrv_dec_in_flight(dst->bs); 3463 } 3464 3465 return ret; 3466 } 3467 3468 /* Copy range from @src to @dst. 3469 * 3470 * See the comment of bdrv_co_copy_range for the parameter and return value 3471 * semantics. */ 3472 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset, 3473 BdrvChild *dst, int64_t dst_offset, 3474 int64_t bytes, 3475 BdrvRequestFlags read_flags, 3476 BdrvRequestFlags write_flags) 3477 { 3478 IO_CODE(); 3479 assert_bdrv_graph_readable(); 3480 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3481 read_flags, write_flags); 3482 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3483 bytes, read_flags, write_flags, true); 3484 } 3485 3486 /* Copy range from @src to @dst. 3487 * 3488 * See the comment of bdrv_co_copy_range for the parameter and return value 3489 * semantics. */ 3490 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset, 3491 BdrvChild *dst, int64_t dst_offset, 3492 int64_t bytes, 3493 BdrvRequestFlags read_flags, 3494 BdrvRequestFlags write_flags) 3495 { 3496 IO_CODE(); 3497 assert_bdrv_graph_readable(); 3498 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3499 read_flags, write_flags); 3500 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3501 bytes, read_flags, write_flags, false); 3502 } 3503 3504 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, 3505 BdrvChild *dst, int64_t dst_offset, 3506 int64_t bytes, BdrvRequestFlags read_flags, 3507 BdrvRequestFlags write_flags) 3508 { 3509 IO_CODE(); 3510 assert_bdrv_graph_readable(); 3511 3512 return bdrv_co_copy_range_from(src, src_offset, 3513 dst, dst_offset, 3514 bytes, read_flags, write_flags); 3515 } 3516 3517 static void coroutine_fn GRAPH_RDLOCK 3518 bdrv_parent_cb_resize(BlockDriverState *bs) 3519 { 3520 BdrvChild *c; 3521 3522 assert_bdrv_graph_readable(); 3523 3524 QLIST_FOREACH(c, &bs->parents, next_parent) { 3525 if (c->klass->resize) { 3526 c->klass->resize(c); 3527 } 3528 } 3529 } 3530 3531 /** 3532 * Truncate file to 'offset' bytes (needed only for file protocols) 3533 * 3534 * If 'exact' is true, the file must be resized to exactly the given 3535 * 'offset'. Otherwise, it is sufficient for the node to be at least 3536 * 'offset' bytes in length. 3537 */ 3538 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3539 PreallocMode prealloc, BdrvRequestFlags flags, 3540 Error **errp) 3541 { 3542 BlockDriverState *bs = child->bs; 3543 BdrvChild *filtered, *backing; 3544 BlockDriver *drv = bs->drv; 3545 BdrvTrackedRequest req; 3546 int64_t old_size, new_bytes; 3547 int ret; 3548 IO_CODE(); 3549 assert_bdrv_graph_readable(); 3550 3551 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3552 if (!drv) { 3553 error_setg(errp, "No medium inserted"); 3554 return -ENOMEDIUM; 3555 } 3556 if (offset < 0) { 3557 error_setg(errp, "Image size cannot be negative"); 3558 return -EINVAL; 3559 } 3560 3561 ret = bdrv_check_request(offset, 0, errp); 3562 if (ret < 0) { 3563 return ret; 3564 } 3565 3566 old_size = bdrv_co_getlength(bs); 3567 if (old_size < 0) { 3568 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3569 return old_size; 3570 } 3571 3572 if (bdrv_is_read_only(bs)) { 3573 error_setg(errp, "Image is read-only"); 3574 return -EACCES; 3575 } 3576 3577 if (offset > old_size) { 3578 new_bytes = offset - old_size; 3579 } else { 3580 new_bytes = 0; 3581 } 3582 3583 bdrv_inc_in_flight(bs); 3584 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3585 BDRV_TRACKED_TRUNCATE); 3586 3587 /* If we are growing the image and potentially using preallocation for the 3588 * new area, we need to make sure that no write requests are made to it 3589 * concurrently or they might be overwritten by preallocation. */ 3590 if (new_bytes) { 3591 bdrv_make_request_serialising(&req, 1); 3592 } 3593 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3594 0); 3595 if (ret < 0) { 3596 error_setg_errno(errp, -ret, 3597 "Failed to prepare request for truncation"); 3598 goto out; 3599 } 3600 3601 filtered = bdrv_filter_child(bs); 3602 backing = bdrv_cow_child(bs); 3603 3604 /* 3605 * If the image has a backing file that is large enough that it would 3606 * provide data for the new area, we cannot leave it unallocated because 3607 * then the backing file content would become visible. Instead, zero-fill 3608 * the new area. 3609 * 3610 * Note that if the image has a backing file, but was opened without the 3611 * backing file, taking care of keeping things consistent with that backing 3612 * file is the user's responsibility. 3613 */ 3614 if (new_bytes && backing) { 3615 int64_t backing_len; 3616 3617 backing_len = bdrv_co_getlength(backing->bs); 3618 if (backing_len < 0) { 3619 ret = backing_len; 3620 error_setg_errno(errp, -ret, "Could not get backing file size"); 3621 goto out; 3622 } 3623 3624 if (backing_len > old_size) { 3625 flags |= BDRV_REQ_ZERO_WRITE; 3626 } 3627 } 3628 3629 if (drv->bdrv_co_truncate) { 3630 if (flags & ~bs->supported_truncate_flags) { 3631 error_setg(errp, "Block driver does not support requested flags"); 3632 ret = -ENOTSUP; 3633 goto out; 3634 } 3635 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3636 } else if (filtered) { 3637 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3638 } else { 3639 error_setg(errp, "Image format driver does not support resize"); 3640 ret = -ENOTSUP; 3641 goto out; 3642 } 3643 if (ret < 0) { 3644 goto out; 3645 } 3646 3647 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3648 if (ret < 0) { 3649 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3650 } else { 3651 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3652 } 3653 /* 3654 * It's possible that truncation succeeded but bdrv_refresh_total_sectors 3655 * failed, but the latter doesn't affect how we should finish the request. 3656 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. 3657 */ 3658 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3659 3660 out: 3661 tracked_request_end(&req); 3662 bdrv_dec_in_flight(bs); 3663 3664 return ret; 3665 } 3666 3667 void bdrv_cancel_in_flight(BlockDriverState *bs) 3668 { 3669 GLOBAL_STATE_CODE(); 3670 GRAPH_RDLOCK_GUARD_MAINLOOP(); 3671 3672 if (!bs || !bs->drv) { 3673 return; 3674 } 3675 3676 if (bs->drv->bdrv_cancel_in_flight) { 3677 bs->drv->bdrv_cancel_in_flight(bs); 3678 } 3679 } 3680 3681 int coroutine_fn 3682 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, 3683 QEMUIOVector *qiov, size_t qiov_offset) 3684 { 3685 BlockDriverState *bs = child->bs; 3686 BlockDriver *drv = bs->drv; 3687 int ret; 3688 IO_CODE(); 3689 assert_bdrv_graph_readable(); 3690 3691 if (!drv) { 3692 return -ENOMEDIUM; 3693 } 3694 3695 if (!drv->bdrv_co_preadv_snapshot) { 3696 return -ENOTSUP; 3697 } 3698 3699 bdrv_inc_in_flight(bs); 3700 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); 3701 bdrv_dec_in_flight(bs); 3702 3703 return ret; 3704 } 3705 3706 int coroutine_fn 3707 bdrv_co_snapshot_block_status(BlockDriverState *bs, 3708 bool want_zero, int64_t offset, int64_t bytes, 3709 int64_t *pnum, int64_t *map, 3710 BlockDriverState **file) 3711 { 3712 BlockDriver *drv = bs->drv; 3713 int ret; 3714 IO_CODE(); 3715 assert_bdrv_graph_readable(); 3716 3717 if (!drv) { 3718 return -ENOMEDIUM; 3719 } 3720 3721 if (!drv->bdrv_co_snapshot_block_status) { 3722 return -ENOTSUP; 3723 } 3724 3725 bdrv_inc_in_flight(bs); 3726 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, 3727 pnum, map, file); 3728 bdrv_dec_in_flight(bs); 3729 3730 return ret; 3731 } 3732 3733 int coroutine_fn 3734 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) 3735 { 3736 BlockDriver *drv = bs->drv; 3737 int ret; 3738 IO_CODE(); 3739 assert_bdrv_graph_readable(); 3740 3741 if (!drv) { 3742 return -ENOMEDIUM; 3743 } 3744 3745 if (!drv->bdrv_co_pdiscard_snapshot) { 3746 return -ENOTSUP; 3747 } 3748 3749 bdrv_inc_in_flight(bs); 3750 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); 3751 bdrv_dec_in_flight(bs); 3752 3753 return ret; 3754 } 3755