1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "block/coroutines.h" 33 #include "qemu/cutils.h" 34 #include "qapi/error.h" 35 #include "qemu/error-report.h" 36 #include "qemu/main-loop.h" 37 #include "sysemu/replay.h" 38 39 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 40 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 41 42 static void bdrv_parent_cb_resize(BlockDriverState *bs); 43 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 44 int64_t offset, int bytes, BdrvRequestFlags flags); 45 46 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 47 bool ignore_bds_parents) 48 { 49 BdrvChild *c, *next; 50 51 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 52 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 53 continue; 54 } 55 bdrv_parent_drained_begin_single(c, false); 56 } 57 } 58 59 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, 60 int *drained_end_counter) 61 { 62 assert(c->parent_quiesce_counter > 0); 63 c->parent_quiesce_counter--; 64 if (c->klass->drained_end) { 65 c->klass->drained_end(c, drained_end_counter); 66 } 67 } 68 69 void bdrv_parent_drained_end_single(BdrvChild *c) 70 { 71 int drained_end_counter = 0; 72 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); 73 BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); 74 } 75 76 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 77 bool ignore_bds_parents, 78 int *drained_end_counter) 79 { 80 BdrvChild *c; 81 82 QLIST_FOREACH(c, &bs->parents, next_parent) { 83 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 84 continue; 85 } 86 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter); 87 } 88 } 89 90 static bool bdrv_parent_drained_poll_single(BdrvChild *c) 91 { 92 if (c->klass->drained_poll) { 93 return c->klass->drained_poll(c); 94 } 95 return false; 96 } 97 98 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 99 bool ignore_bds_parents) 100 { 101 BdrvChild *c, *next; 102 bool busy = false; 103 104 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 105 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { 106 continue; 107 } 108 busy |= bdrv_parent_drained_poll_single(c); 109 } 110 111 return busy; 112 } 113 114 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) 115 { 116 c->parent_quiesce_counter++; 117 if (c->klass->drained_begin) { 118 c->klass->drained_begin(c); 119 } 120 if (poll) { 121 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); 122 } 123 } 124 125 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 126 { 127 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 128 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 130 src->opt_mem_alignment); 131 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 132 src->min_mem_alignment); 133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 134 } 135 136 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 137 { 138 BlockDriver *drv = bs->drv; 139 BdrvChild *c; 140 bool have_limits; 141 Error *local_err = NULL; 142 143 memset(&bs->bl, 0, sizeof(bs->bl)); 144 145 if (!drv) { 146 return; 147 } 148 149 /* Default alignment based on whether driver has byte interface */ 150 bs->bl.request_alignment = (drv->bdrv_co_preadv || 151 drv->bdrv_aio_preadv || 152 drv->bdrv_co_preadv_part) ? 1 : 512; 153 154 /* Take some limits from the children as a default */ 155 have_limits = false; 156 QLIST_FOREACH(c, &bs->children, next) { 157 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) 158 { 159 bdrv_refresh_limits(c->bs, &local_err); 160 if (local_err) { 161 error_propagate(errp, local_err); 162 return; 163 } 164 bdrv_merge_limits(&bs->bl, &c->bs->bl); 165 have_limits = true; 166 } 167 } 168 169 if (!have_limits) { 170 bs->bl.min_mem_alignment = 512; 171 bs->bl.opt_mem_alignment = qemu_real_host_page_size; 172 173 /* Safe default since most protocols use readv()/writev()/etc */ 174 bs->bl.max_iov = IOV_MAX; 175 } 176 177 /* Then let the driver override it */ 178 if (drv->bdrv_refresh_limits) { 179 drv->bdrv_refresh_limits(bs, errp); 180 } 181 } 182 183 /** 184 * The copy-on-read flag is actually a reference count so multiple users may 185 * use the feature without worrying about clobbering its previous state. 186 * Copy-on-read stays enabled until all users have called to disable it. 187 */ 188 void bdrv_enable_copy_on_read(BlockDriverState *bs) 189 { 190 qatomic_inc(&bs->copy_on_read); 191 } 192 193 void bdrv_disable_copy_on_read(BlockDriverState *bs) 194 { 195 int old = qatomic_fetch_dec(&bs->copy_on_read); 196 assert(old >= 1); 197 } 198 199 typedef struct { 200 Coroutine *co; 201 BlockDriverState *bs; 202 bool done; 203 bool begin; 204 bool recursive; 205 bool poll; 206 BdrvChild *parent; 207 bool ignore_bds_parents; 208 int *drained_end_counter; 209 } BdrvCoDrainData; 210 211 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) 212 { 213 BdrvCoDrainData *data = opaque; 214 BlockDriverState *bs = data->bs; 215 216 if (data->begin) { 217 bs->drv->bdrv_co_drain_begin(bs); 218 } else { 219 bs->drv->bdrv_co_drain_end(bs); 220 } 221 222 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ 223 qatomic_mb_set(&data->done, true); 224 if (!data->begin) { 225 qatomic_dec(data->drained_end_counter); 226 } 227 bdrv_dec_in_flight(bs); 228 229 g_free(data); 230 } 231 232 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ 233 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, 234 int *drained_end_counter) 235 { 236 BdrvCoDrainData *data; 237 238 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || 239 (!begin && !bs->drv->bdrv_co_drain_end)) { 240 return; 241 } 242 243 data = g_new(BdrvCoDrainData, 1); 244 *data = (BdrvCoDrainData) { 245 .bs = bs, 246 .done = false, 247 .begin = begin, 248 .drained_end_counter = drained_end_counter, 249 }; 250 251 if (!begin) { 252 qatomic_inc(drained_end_counter); 253 } 254 255 /* Make sure the driver callback completes during the polling phase for 256 * drain_begin. */ 257 bdrv_inc_in_flight(bs); 258 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); 259 aio_co_schedule(bdrv_get_aio_context(bs), data->co); 260 } 261 262 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 263 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 264 BdrvChild *ignore_parent, bool ignore_bds_parents) 265 { 266 BdrvChild *child, *next; 267 268 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 269 return true; 270 } 271 272 if (qatomic_read(&bs->in_flight)) { 273 return true; 274 } 275 276 if (recursive) { 277 assert(!ignore_bds_parents); 278 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 279 if (bdrv_drain_poll(child->bs, recursive, child, false)) { 280 return true; 281 } 282 } 283 } 284 285 return false; 286 } 287 288 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, 289 BdrvChild *ignore_parent) 290 { 291 return bdrv_drain_poll(bs, recursive, ignore_parent, false); 292 } 293 294 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 295 BdrvChild *parent, bool ignore_bds_parents, 296 bool poll); 297 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 298 BdrvChild *parent, bool ignore_bds_parents, 299 int *drained_end_counter); 300 301 static void bdrv_co_drain_bh_cb(void *opaque) 302 { 303 BdrvCoDrainData *data = opaque; 304 Coroutine *co = data->co; 305 BlockDriverState *bs = data->bs; 306 307 if (bs) { 308 AioContext *ctx = bdrv_get_aio_context(bs); 309 AioContext *co_ctx = qemu_coroutine_get_aio_context(co); 310 311 /* 312 * When the coroutine yielded, the lock for its home context was 313 * released, so we need to re-acquire it here. If it explicitly 314 * acquired a different context, the lock is still held and we don't 315 * want to lock it a second time (or AIO_WAIT_WHILE() would hang). 316 */ 317 if (ctx == co_ctx) { 318 aio_context_acquire(ctx); 319 } 320 bdrv_dec_in_flight(bs); 321 if (data->begin) { 322 assert(!data->drained_end_counter); 323 bdrv_do_drained_begin(bs, data->recursive, data->parent, 324 data->ignore_bds_parents, data->poll); 325 } else { 326 assert(!data->poll); 327 bdrv_do_drained_end(bs, data->recursive, data->parent, 328 data->ignore_bds_parents, 329 data->drained_end_counter); 330 } 331 if (ctx == co_ctx) { 332 aio_context_release(ctx); 333 } 334 } else { 335 assert(data->begin); 336 bdrv_drain_all_begin(); 337 } 338 339 data->done = true; 340 aio_co_wake(co); 341 } 342 343 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 344 bool begin, bool recursive, 345 BdrvChild *parent, 346 bool ignore_bds_parents, 347 bool poll, 348 int *drained_end_counter) 349 { 350 BdrvCoDrainData data; 351 352 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 353 * other coroutines run if they were queued by aio_co_enter(). */ 354 355 assert(qemu_in_coroutine()); 356 data = (BdrvCoDrainData) { 357 .co = qemu_coroutine_self(), 358 .bs = bs, 359 .done = false, 360 .begin = begin, 361 .recursive = recursive, 362 .parent = parent, 363 .ignore_bds_parents = ignore_bds_parents, 364 .poll = poll, 365 .drained_end_counter = drained_end_counter, 366 }; 367 368 if (bs) { 369 bdrv_inc_in_flight(bs); 370 } 371 replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs), 372 bdrv_co_drain_bh_cb, &data); 373 374 qemu_coroutine_yield(); 375 /* If we are resumed from some other event (such as an aio completion or a 376 * timer callback), it is a bug in the caller that should be fixed. */ 377 assert(data.done); 378 } 379 380 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 381 BdrvChild *parent, bool ignore_bds_parents) 382 { 383 assert(!qemu_in_coroutine()); 384 385 /* Stop things in parent-to-child order */ 386 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { 387 aio_disable_external(bdrv_get_aio_context(bs)); 388 } 389 390 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); 391 bdrv_drain_invoke(bs, true, NULL); 392 } 393 394 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 395 BdrvChild *parent, bool ignore_bds_parents, 396 bool poll) 397 { 398 BdrvChild *child, *next; 399 400 if (qemu_in_coroutine()) { 401 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, 402 poll, NULL); 403 return; 404 } 405 406 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); 407 408 if (recursive) { 409 assert(!ignore_bds_parents); 410 bs->recursive_quiesce_counter++; 411 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 412 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, 413 false); 414 } 415 } 416 417 /* 418 * Wait for drained requests to finish. 419 * 420 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 421 * call is needed so things in this AioContext can make progress even 422 * though we don't return to the main AioContext loop - this automatically 423 * includes other nodes in the same AioContext and therefore all child 424 * nodes. 425 */ 426 if (poll) { 427 assert(!ignore_bds_parents); 428 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); 429 } 430 } 431 432 void bdrv_drained_begin(BlockDriverState *bs) 433 { 434 bdrv_do_drained_begin(bs, false, NULL, false, true); 435 } 436 437 void bdrv_subtree_drained_begin(BlockDriverState *bs) 438 { 439 bdrv_do_drained_begin(bs, true, NULL, false, true); 440 } 441 442 /** 443 * This function does not poll, nor must any of its recursively called 444 * functions. The *drained_end_counter pointee will be incremented 445 * once for every background operation scheduled, and decremented once 446 * the operation settles. Therefore, the pointer must remain valid 447 * until the pointee reaches 0. That implies that whoever sets up the 448 * pointee has to poll until it is 0. 449 * 450 * We use atomic operations to access *drained_end_counter, because 451 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of 452 * @bs may contain nodes in different AioContexts, 453 * (2) bdrv_drain_all_end() uses the same counter for all nodes, 454 * regardless of which AioContext they are in. 455 */ 456 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 457 BdrvChild *parent, bool ignore_bds_parents, 458 int *drained_end_counter) 459 { 460 BdrvChild *child; 461 int old_quiesce_counter; 462 463 assert(drained_end_counter != NULL); 464 465 if (qemu_in_coroutine()) { 466 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, 467 false, drained_end_counter); 468 return; 469 } 470 assert(bs->quiesce_counter > 0); 471 472 /* Re-enable things in child-to-parent order */ 473 bdrv_drain_invoke(bs, false, drained_end_counter); 474 bdrv_parent_drained_end(bs, parent, ignore_bds_parents, 475 drained_end_counter); 476 477 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); 478 if (old_quiesce_counter == 1) { 479 aio_enable_external(bdrv_get_aio_context(bs)); 480 } 481 482 if (recursive) { 483 assert(!ignore_bds_parents); 484 bs->recursive_quiesce_counter--; 485 QLIST_FOREACH(child, &bs->children, next) { 486 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents, 487 drained_end_counter); 488 } 489 } 490 } 491 492 void bdrv_drained_end(BlockDriverState *bs) 493 { 494 int drained_end_counter = 0; 495 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); 496 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 497 } 498 499 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) 500 { 501 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); 502 } 503 504 void bdrv_subtree_drained_end(BlockDriverState *bs) 505 { 506 int drained_end_counter = 0; 507 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); 508 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 509 } 510 511 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) 512 { 513 int i; 514 515 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { 516 bdrv_do_drained_begin(child->bs, true, child, false, true); 517 } 518 } 519 520 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) 521 { 522 int drained_end_counter = 0; 523 int i; 524 525 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { 526 bdrv_do_drained_end(child->bs, true, child, false, 527 &drained_end_counter); 528 } 529 530 BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); 531 } 532 533 /* 534 * Wait for pending requests to complete on a single BlockDriverState subtree, 535 * and suspend block driver's internal I/O until next request arrives. 536 * 537 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 538 * AioContext. 539 */ 540 void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 541 { 542 assert(qemu_in_coroutine()); 543 bdrv_drained_begin(bs); 544 bdrv_drained_end(bs); 545 } 546 547 void bdrv_drain(BlockDriverState *bs) 548 { 549 bdrv_drained_begin(bs); 550 bdrv_drained_end(bs); 551 } 552 553 static void bdrv_drain_assert_idle(BlockDriverState *bs) 554 { 555 BdrvChild *child, *next; 556 557 assert(qatomic_read(&bs->in_flight) == 0); 558 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 559 bdrv_drain_assert_idle(child->bs); 560 } 561 } 562 563 unsigned int bdrv_drain_all_count = 0; 564 565 static bool bdrv_drain_all_poll(void) 566 { 567 BlockDriverState *bs = NULL; 568 bool result = false; 569 570 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 571 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 572 while ((bs = bdrv_next_all_states(bs))) { 573 AioContext *aio_context = bdrv_get_aio_context(bs); 574 aio_context_acquire(aio_context); 575 result |= bdrv_drain_poll(bs, false, NULL, true); 576 aio_context_release(aio_context); 577 } 578 579 return result; 580 } 581 582 /* 583 * Wait for pending requests to complete across all BlockDriverStates 584 * 585 * This function does not flush data to disk, use bdrv_flush_all() for that 586 * after calling this function. 587 * 588 * This pauses all block jobs and disables external clients. It must 589 * be paired with bdrv_drain_all_end(). 590 * 591 * NOTE: no new block jobs or BlockDriverStates can be created between 592 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 593 */ 594 void bdrv_drain_all_begin(void) 595 { 596 BlockDriverState *bs = NULL; 597 598 if (qemu_in_coroutine()) { 599 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); 600 return; 601 } 602 603 /* 604 * bdrv queue is managed by record/replay, 605 * waiting for finishing the I/O requests may 606 * be infinite 607 */ 608 if (replay_events_enabled()) { 609 return; 610 } 611 612 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 613 * loop AioContext, so make sure we're in the main context. */ 614 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 615 assert(bdrv_drain_all_count < INT_MAX); 616 bdrv_drain_all_count++; 617 618 /* Quiesce all nodes, without polling in-flight requests yet. The graph 619 * cannot change during this loop. */ 620 while ((bs = bdrv_next_all_states(bs))) { 621 AioContext *aio_context = bdrv_get_aio_context(bs); 622 623 aio_context_acquire(aio_context); 624 bdrv_do_drained_begin(bs, false, NULL, true, false); 625 aio_context_release(aio_context); 626 } 627 628 /* Now poll the in-flight requests */ 629 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 630 631 while ((bs = bdrv_next_all_states(bs))) { 632 bdrv_drain_assert_idle(bs); 633 } 634 } 635 636 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) 637 { 638 int drained_end_counter = 0; 639 640 g_assert(bs->quiesce_counter > 0); 641 g_assert(!bs->refcnt); 642 643 while (bs->quiesce_counter) { 644 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 645 } 646 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); 647 } 648 649 void bdrv_drain_all_end(void) 650 { 651 BlockDriverState *bs = NULL; 652 int drained_end_counter = 0; 653 654 /* 655 * bdrv queue is managed by record/replay, 656 * waiting for finishing the I/O requests may 657 * be endless 658 */ 659 if (replay_events_enabled()) { 660 return; 661 } 662 663 while ((bs = bdrv_next_all_states(bs))) { 664 AioContext *aio_context = bdrv_get_aio_context(bs); 665 666 aio_context_acquire(aio_context); 667 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 668 aio_context_release(aio_context); 669 } 670 671 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 672 AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0); 673 674 assert(bdrv_drain_all_count > 0); 675 bdrv_drain_all_count--; 676 } 677 678 void bdrv_drain_all(void) 679 { 680 bdrv_drain_all_begin(); 681 bdrv_drain_all_end(); 682 } 683 684 /** 685 * Remove an active request from the tracked requests list 686 * 687 * This function should be called when a tracked request is completing. 688 */ 689 static void tracked_request_end(BdrvTrackedRequest *req) 690 { 691 if (req->serialising) { 692 qatomic_dec(&req->bs->serialising_in_flight); 693 } 694 695 qemu_co_mutex_lock(&req->bs->reqs_lock); 696 QLIST_REMOVE(req, list); 697 qemu_co_queue_restart_all(&req->wait_queue); 698 qemu_co_mutex_unlock(&req->bs->reqs_lock); 699 } 700 701 /** 702 * Add an active request to the tracked requests list 703 */ 704 static void tracked_request_begin(BdrvTrackedRequest *req, 705 BlockDriverState *bs, 706 int64_t offset, 707 uint64_t bytes, 708 enum BdrvTrackedRequestType type) 709 { 710 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes); 711 712 *req = (BdrvTrackedRequest){ 713 .bs = bs, 714 .offset = offset, 715 .bytes = bytes, 716 .type = type, 717 .co = qemu_coroutine_self(), 718 .serialising = false, 719 .overlap_offset = offset, 720 .overlap_bytes = bytes, 721 }; 722 723 qemu_co_queue_init(&req->wait_queue); 724 725 qemu_co_mutex_lock(&bs->reqs_lock); 726 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 727 qemu_co_mutex_unlock(&bs->reqs_lock); 728 } 729 730 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 731 int64_t offset, uint64_t bytes) 732 { 733 /* aaaa bbbb */ 734 if (offset >= req->overlap_offset + req->overlap_bytes) { 735 return false; 736 } 737 /* bbbb aaaa */ 738 if (req->overlap_offset >= offset + bytes) { 739 return false; 740 } 741 return true; 742 } 743 744 static bool coroutine_fn 745 bdrv_wait_serialising_requests_locked(BlockDriverState *bs, 746 BdrvTrackedRequest *self) 747 { 748 BdrvTrackedRequest *req; 749 bool retry; 750 bool waited = false; 751 752 do { 753 retry = false; 754 QLIST_FOREACH(req, &bs->tracked_requests, list) { 755 if (req == self || (!req->serialising && !self->serialising)) { 756 continue; 757 } 758 if (tracked_request_overlaps(req, self->overlap_offset, 759 self->overlap_bytes)) 760 { 761 /* Hitting this means there was a reentrant request, for 762 * example, a block driver issuing nested requests. This must 763 * never happen since it means deadlock. 764 */ 765 assert(qemu_coroutine_self() != req->co); 766 767 /* If the request is already (indirectly) waiting for us, or 768 * will wait for us as soon as it wakes up, then just go on 769 * (instead of producing a deadlock in the former case). */ 770 if (!req->waiting_for) { 771 self->waiting_for = req; 772 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); 773 self->waiting_for = NULL; 774 retry = true; 775 waited = true; 776 break; 777 } 778 } 779 } 780 } while (retry); 781 return waited; 782 } 783 784 bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 785 { 786 BlockDriverState *bs = req->bs; 787 int64_t overlap_offset = req->offset & ~(align - 1); 788 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 789 - overlap_offset; 790 bool waited; 791 792 qemu_co_mutex_lock(&bs->reqs_lock); 793 if (!req->serialising) { 794 qatomic_inc(&req->bs->serialising_in_flight); 795 req->serialising = true; 796 } 797 798 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 799 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 800 waited = bdrv_wait_serialising_requests_locked(bs, req); 801 qemu_co_mutex_unlock(&bs->reqs_lock); 802 return waited; 803 } 804 805 /** 806 * Return the tracked request on @bs for the current coroutine, or 807 * NULL if there is none. 808 */ 809 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) 810 { 811 BdrvTrackedRequest *req; 812 Coroutine *self = qemu_coroutine_self(); 813 814 QLIST_FOREACH(req, &bs->tracked_requests, list) { 815 if (req->co == self) { 816 return req; 817 } 818 } 819 820 return NULL; 821 } 822 823 /** 824 * Round a region to cluster boundaries 825 */ 826 void bdrv_round_to_clusters(BlockDriverState *bs, 827 int64_t offset, int64_t bytes, 828 int64_t *cluster_offset, 829 int64_t *cluster_bytes) 830 { 831 BlockDriverInfo bdi; 832 833 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 834 *cluster_offset = offset; 835 *cluster_bytes = bytes; 836 } else { 837 int64_t c = bdi.cluster_size; 838 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 839 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 840 } 841 } 842 843 static int bdrv_get_cluster_size(BlockDriverState *bs) 844 { 845 BlockDriverInfo bdi; 846 int ret; 847 848 ret = bdrv_get_info(bs, &bdi); 849 if (ret < 0 || bdi.cluster_size == 0) { 850 return bs->bl.request_alignment; 851 } else { 852 return bdi.cluster_size; 853 } 854 } 855 856 void bdrv_inc_in_flight(BlockDriverState *bs) 857 { 858 qatomic_inc(&bs->in_flight); 859 } 860 861 void bdrv_wakeup(BlockDriverState *bs) 862 { 863 aio_wait_kick(); 864 } 865 866 void bdrv_dec_in_flight(BlockDriverState *bs) 867 { 868 qatomic_dec(&bs->in_flight); 869 bdrv_wakeup(bs); 870 } 871 872 static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) 873 { 874 BlockDriverState *bs = self->bs; 875 bool waited = false; 876 877 if (!qatomic_read(&bs->serialising_in_flight)) { 878 return false; 879 } 880 881 qemu_co_mutex_lock(&bs->reqs_lock); 882 waited = bdrv_wait_serialising_requests_locked(bs, self); 883 qemu_co_mutex_unlock(&bs->reqs_lock); 884 885 return waited; 886 } 887 888 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 889 size_t size) 890 { 891 if (size > BDRV_REQUEST_MAX_BYTES) { 892 return -EIO; 893 } 894 895 if (!bdrv_is_inserted(bs)) { 896 return -ENOMEDIUM; 897 } 898 899 if (offset < 0) { 900 return -EIO; 901 } 902 903 return 0; 904 } 905 906 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 907 int bytes, BdrvRequestFlags flags) 908 { 909 return bdrv_pwritev(child, offset, bytes, NULL, 910 BDRV_REQ_ZERO_WRITE | flags); 911 } 912 913 /* 914 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 915 * The operation is sped up by checking the block status and only writing 916 * zeroes to the device if they currently do not return zeroes. Optional 917 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 918 * BDRV_REQ_FUA). 919 * 920 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). 921 */ 922 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 923 { 924 int ret; 925 int64_t target_size, bytes, offset = 0; 926 BlockDriverState *bs = child->bs; 927 928 target_size = bdrv_getlength(bs); 929 if (target_size < 0) { 930 return target_size; 931 } 932 933 for (;;) { 934 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 935 if (bytes <= 0) { 936 return 0; 937 } 938 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 939 if (ret < 0) { 940 return ret; 941 } 942 if (ret & BDRV_BLOCK_ZERO) { 943 offset += bytes; 944 continue; 945 } 946 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 947 if (ret < 0) { 948 return ret; 949 } 950 offset += bytes; 951 } 952 } 953 954 /* See bdrv_pwrite() for the return codes */ 955 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) 956 { 957 int ret; 958 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 959 960 if (bytes < 0) { 961 return -EINVAL; 962 } 963 964 ret = bdrv_preadv(child, offset, bytes, &qiov, 0); 965 966 return ret < 0 ? ret : bytes; 967 } 968 969 /* Return no. of bytes on success or < 0 on error. Important errors are: 970 -EIO generic I/O error (may happen for all errors) 971 -ENOMEDIUM No media inserted. 972 -EINVAL Invalid offset or number of bytes 973 -EACCES Trying to write a read-only device 974 */ 975 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes) 976 { 977 int ret; 978 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 979 980 if (bytes < 0) { 981 return -EINVAL; 982 } 983 984 ret = bdrv_pwritev(child, offset, bytes, &qiov, 0); 985 986 return ret < 0 ? ret : bytes; 987 } 988 989 /* 990 * Writes to the file and ensures that no writes are reordered across this 991 * request (acts as a barrier) 992 * 993 * Returns 0 on success, -errno in error cases. 994 */ 995 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 996 const void *buf, int count) 997 { 998 int ret; 999 1000 ret = bdrv_pwrite(child, offset, buf, count); 1001 if (ret < 0) { 1002 return ret; 1003 } 1004 1005 ret = bdrv_flush(child->bs); 1006 if (ret < 0) { 1007 return ret; 1008 } 1009 1010 return 0; 1011 } 1012 1013 typedef struct CoroutineIOCompletion { 1014 Coroutine *coroutine; 1015 int ret; 1016 } CoroutineIOCompletion; 1017 1018 static void bdrv_co_io_em_complete(void *opaque, int ret) 1019 { 1020 CoroutineIOCompletion *co = opaque; 1021 1022 co->ret = ret; 1023 aio_co_wake(co->coroutine); 1024 } 1025 1026 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 1027 uint64_t offset, uint64_t bytes, 1028 QEMUIOVector *qiov, 1029 size_t qiov_offset, int flags) 1030 { 1031 BlockDriver *drv = bs->drv; 1032 int64_t sector_num; 1033 unsigned int nb_sectors; 1034 QEMUIOVector local_qiov; 1035 int ret; 1036 1037 assert(!(flags & ~BDRV_REQ_MASK)); 1038 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1039 1040 if (!drv) { 1041 return -ENOMEDIUM; 1042 } 1043 1044 if (drv->bdrv_co_preadv_part) { 1045 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, 1046 flags); 1047 } 1048 1049 if (qiov_offset > 0 || bytes != qiov->size) { 1050 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1051 qiov = &local_qiov; 1052 } 1053 1054 if (drv->bdrv_co_preadv) { 1055 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 1056 goto out; 1057 } 1058 1059 if (drv->bdrv_aio_preadv) { 1060 BlockAIOCB *acb; 1061 CoroutineIOCompletion co = { 1062 .coroutine = qemu_coroutine_self(), 1063 }; 1064 1065 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1066 bdrv_co_io_em_complete, &co); 1067 if (acb == NULL) { 1068 ret = -EIO; 1069 goto out; 1070 } else { 1071 qemu_coroutine_yield(); 1072 ret = co.ret; 1073 goto out; 1074 } 1075 } 1076 1077 sector_num = offset >> BDRV_SECTOR_BITS; 1078 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1079 1080 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1081 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1082 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1083 assert(drv->bdrv_co_readv); 1084 1085 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1086 1087 out: 1088 if (qiov == &local_qiov) { 1089 qemu_iovec_destroy(&local_qiov); 1090 } 1091 1092 return ret; 1093 } 1094 1095 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 1096 uint64_t offset, uint64_t bytes, 1097 QEMUIOVector *qiov, 1098 size_t qiov_offset, int flags) 1099 { 1100 BlockDriver *drv = bs->drv; 1101 int64_t sector_num; 1102 unsigned int nb_sectors; 1103 QEMUIOVector local_qiov; 1104 int ret; 1105 1106 assert(!(flags & ~BDRV_REQ_MASK)); 1107 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1108 1109 if (!drv) { 1110 return -ENOMEDIUM; 1111 } 1112 1113 if (drv->bdrv_co_pwritev_part) { 1114 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 1115 flags & bs->supported_write_flags); 1116 flags &= ~bs->supported_write_flags; 1117 goto emulate_flags; 1118 } 1119 1120 if (qiov_offset > 0 || bytes != qiov->size) { 1121 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1122 qiov = &local_qiov; 1123 } 1124 1125 if (drv->bdrv_co_pwritev) { 1126 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 1127 flags & bs->supported_write_flags); 1128 flags &= ~bs->supported_write_flags; 1129 goto emulate_flags; 1130 } 1131 1132 if (drv->bdrv_aio_pwritev) { 1133 BlockAIOCB *acb; 1134 CoroutineIOCompletion co = { 1135 .coroutine = qemu_coroutine_self(), 1136 }; 1137 1138 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, 1139 flags & bs->supported_write_flags, 1140 bdrv_co_io_em_complete, &co); 1141 flags &= ~bs->supported_write_flags; 1142 if (acb == NULL) { 1143 ret = -EIO; 1144 } else { 1145 qemu_coroutine_yield(); 1146 ret = co.ret; 1147 } 1148 goto emulate_flags; 1149 } 1150 1151 sector_num = offset >> BDRV_SECTOR_BITS; 1152 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1153 1154 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 1155 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 1156 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1157 1158 assert(drv->bdrv_co_writev); 1159 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, 1160 flags & bs->supported_write_flags); 1161 flags &= ~bs->supported_write_flags; 1162 1163 emulate_flags: 1164 if (ret == 0 && (flags & BDRV_REQ_FUA)) { 1165 ret = bdrv_co_flush(bs); 1166 } 1167 1168 if (qiov == &local_qiov) { 1169 qemu_iovec_destroy(&local_qiov); 1170 } 1171 1172 return ret; 1173 } 1174 1175 static int coroutine_fn 1176 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 1177 uint64_t bytes, QEMUIOVector *qiov, 1178 size_t qiov_offset) 1179 { 1180 BlockDriver *drv = bs->drv; 1181 QEMUIOVector local_qiov; 1182 int ret; 1183 1184 if (!drv) { 1185 return -ENOMEDIUM; 1186 } 1187 1188 if (!block_driver_can_compress(drv)) { 1189 return -ENOTSUP; 1190 } 1191 1192 if (drv->bdrv_co_pwritev_compressed_part) { 1193 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, 1194 qiov, qiov_offset); 1195 } 1196 1197 if (qiov_offset == 0) { 1198 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1199 } 1200 1201 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); 1202 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); 1203 qemu_iovec_destroy(&local_qiov); 1204 1205 return ret; 1206 } 1207 1208 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, 1209 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1210 size_t qiov_offset, int flags) 1211 { 1212 BlockDriverState *bs = child->bs; 1213 1214 /* Perform I/O through a temporary buffer so that users who scribble over 1215 * their read buffer while the operation is in progress do not end up 1216 * modifying the image file. This is critical for zero-copy guest I/O 1217 * where anything might happen inside guest memory. 1218 */ 1219 void *bounce_buffer = NULL; 1220 1221 BlockDriver *drv = bs->drv; 1222 int64_t cluster_offset; 1223 int64_t cluster_bytes; 1224 size_t skip_bytes; 1225 int ret; 1226 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1227 BDRV_REQUEST_MAX_BYTES); 1228 unsigned int progress = 0; 1229 bool skip_write; 1230 1231 if (!drv) { 1232 return -ENOMEDIUM; 1233 } 1234 1235 /* 1236 * Do not write anything when the BDS is inactive. That is not 1237 * allowed, and it would not help. 1238 */ 1239 skip_write = (bs->open_flags & BDRV_O_INACTIVE); 1240 1241 /* FIXME We cannot require callers to have write permissions when all they 1242 * are doing is a read request. If we did things right, write permissions 1243 * would be obtained anyway, but internally by the copy-on-read code. As 1244 * long as it is implemented here rather than in a separate filter driver, 1245 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1246 * it could request permissions. Therefore we have to bypass the permission 1247 * system for the moment. */ 1248 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1249 1250 /* Cover entire cluster so no additional backing file I/O is required when 1251 * allocating cluster in the image file. Note that this value may exceed 1252 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1253 * is one reason we loop rather than doing it all at once. 1254 */ 1255 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1256 skip_bytes = offset - cluster_offset; 1257 1258 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1259 cluster_offset, cluster_bytes); 1260 1261 while (cluster_bytes) { 1262 int64_t pnum; 1263 1264 if (skip_write) { 1265 ret = 1; /* "already allocated", so nothing will be copied */ 1266 pnum = MIN(cluster_bytes, max_transfer); 1267 } else { 1268 ret = bdrv_is_allocated(bs, cluster_offset, 1269 MIN(cluster_bytes, max_transfer), &pnum); 1270 if (ret < 0) { 1271 /* 1272 * Safe to treat errors in querying allocation as if 1273 * unallocated; we'll probably fail again soon on the 1274 * read, but at least that will set a decent errno. 1275 */ 1276 pnum = MIN(cluster_bytes, max_transfer); 1277 } 1278 1279 /* Stop at EOF if the image ends in the middle of the cluster */ 1280 if (ret == 0 && pnum == 0) { 1281 assert(progress >= bytes); 1282 break; 1283 } 1284 1285 assert(skip_bytes < pnum); 1286 } 1287 1288 if (ret <= 0) { 1289 QEMUIOVector local_qiov; 1290 1291 /* Must copy-on-read; use the bounce buffer */ 1292 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1293 if (!bounce_buffer) { 1294 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); 1295 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); 1296 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); 1297 1298 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); 1299 if (!bounce_buffer) { 1300 ret = -ENOMEM; 1301 goto err; 1302 } 1303 } 1304 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1305 1306 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1307 &local_qiov, 0, 0); 1308 if (ret < 0) { 1309 goto err; 1310 } 1311 1312 bdrv_debug_event(bs, BLKDBG_COR_WRITE); 1313 if (drv->bdrv_co_pwrite_zeroes && 1314 buffer_is_zero(bounce_buffer, pnum)) { 1315 /* FIXME: Should we (perhaps conditionally) be setting 1316 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1317 * that still correctly reads as zero? */ 1318 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1319 BDRV_REQ_WRITE_UNCHANGED); 1320 } else { 1321 /* This does not change the data on the disk, it is not 1322 * necessary to flush even in cache=writethrough mode. 1323 */ 1324 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1325 &local_qiov, 0, 1326 BDRV_REQ_WRITE_UNCHANGED); 1327 } 1328 1329 if (ret < 0) { 1330 /* It might be okay to ignore write errors for guest 1331 * requests. If this is a deliberate copy-on-read 1332 * then we don't want to ignore the error. Simply 1333 * report it in all cases. 1334 */ 1335 goto err; 1336 } 1337 1338 if (!(flags & BDRV_REQ_PREFETCH)) { 1339 qemu_iovec_from_buf(qiov, qiov_offset + progress, 1340 bounce_buffer + skip_bytes, 1341 MIN(pnum - skip_bytes, bytes - progress)); 1342 } 1343 } else if (!(flags & BDRV_REQ_PREFETCH)) { 1344 /* Read directly into the destination */ 1345 ret = bdrv_driver_preadv(bs, offset + progress, 1346 MIN(pnum - skip_bytes, bytes - progress), 1347 qiov, qiov_offset + progress, 0); 1348 if (ret < 0) { 1349 goto err; 1350 } 1351 } 1352 1353 cluster_offset += pnum; 1354 cluster_bytes -= pnum; 1355 progress += pnum - skip_bytes; 1356 skip_bytes = 0; 1357 } 1358 ret = 0; 1359 1360 err: 1361 qemu_vfree(bounce_buffer); 1362 return ret; 1363 } 1364 1365 /* 1366 * Forwards an already correctly aligned request to the BlockDriver. This 1367 * handles copy on read, zeroing after EOF, and fragmentation of large 1368 * reads; any other features must be implemented by the caller. 1369 */ 1370 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, 1371 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1372 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1373 { 1374 BlockDriverState *bs = child->bs; 1375 int64_t total_bytes, max_bytes; 1376 int ret = 0; 1377 uint64_t bytes_remaining = bytes; 1378 int max_transfer; 1379 1380 assert(is_power_of_2(align)); 1381 assert((offset & (align - 1)) == 0); 1382 assert((bytes & (align - 1)) == 0); 1383 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1384 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1385 align); 1386 1387 /* TODO: We would need a per-BDS .supported_read_flags and 1388 * potential fallback support, if we ever implement any read flags 1389 * to pass through to drivers. For now, there aren't any 1390 * passthrough flags. */ 1391 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH))); 1392 1393 /* Handle Copy on Read and associated serialisation */ 1394 if (flags & BDRV_REQ_COPY_ON_READ) { 1395 /* If we touch the same cluster it counts as an overlap. This 1396 * guarantees that allocating writes will be serialized and not race 1397 * with each other for the same cluster. For example, in copy-on-read 1398 * it ensures that the CoR read and write operations are atomic and 1399 * guest writes cannot interleave between them. */ 1400 bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); 1401 } else { 1402 bdrv_wait_serialising_requests(req); 1403 } 1404 1405 if (flags & BDRV_REQ_COPY_ON_READ) { 1406 int64_t pnum; 1407 1408 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1409 if (ret < 0) { 1410 goto out; 1411 } 1412 1413 if (!ret || pnum != bytes) { 1414 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, 1415 qiov, qiov_offset, flags); 1416 goto out; 1417 } else if (flags & BDRV_REQ_PREFETCH) { 1418 goto out; 1419 } 1420 } 1421 1422 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1423 total_bytes = bdrv_getlength(bs); 1424 if (total_bytes < 0) { 1425 ret = total_bytes; 1426 goto out; 1427 } 1428 1429 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1430 if (bytes <= max_bytes && bytes <= max_transfer) { 1431 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0); 1432 goto out; 1433 } 1434 1435 while (bytes_remaining) { 1436 int num; 1437 1438 if (max_bytes) { 1439 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1440 assert(num); 1441 1442 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1443 num, qiov, 1444 qiov_offset + bytes - bytes_remaining, 0); 1445 max_bytes -= num; 1446 } else { 1447 num = bytes_remaining; 1448 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, 1449 0, bytes_remaining); 1450 } 1451 if (ret < 0) { 1452 goto out; 1453 } 1454 bytes_remaining -= num; 1455 } 1456 1457 out: 1458 return ret < 0 ? ret : 0; 1459 } 1460 1461 /* 1462 * Request padding 1463 * 1464 * |<---- align ----->| |<----- align ---->| 1465 * |<- head ->|<------------- bytes ------------->|<-- tail -->| 1466 * | | | | | | 1467 * -*----------$-------*-------- ... --------*-----$------------*--- 1468 * | | | | | | 1469 * | offset | | end | 1470 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) 1471 * [buf ... ) [tail_buf ) 1472 * 1473 * @buf is an aligned allocation needed to store @head and @tail paddings. @head 1474 * is placed at the beginning of @buf and @tail at the @end. 1475 * 1476 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk 1477 * around tail, if tail exists. 1478 * 1479 * @merge_reads is true for small requests, 1480 * if @buf_len == @head + bytes + @tail. In this case it is possible that both 1481 * head and tail exist but @buf_len == align and @tail_buf == @buf. 1482 */ 1483 typedef struct BdrvRequestPadding { 1484 uint8_t *buf; 1485 size_t buf_len; 1486 uint8_t *tail_buf; 1487 size_t head; 1488 size_t tail; 1489 bool merge_reads; 1490 QEMUIOVector local_qiov; 1491 } BdrvRequestPadding; 1492 1493 static bool bdrv_init_padding(BlockDriverState *bs, 1494 int64_t offset, int64_t bytes, 1495 BdrvRequestPadding *pad) 1496 { 1497 uint64_t align = bs->bl.request_alignment; 1498 size_t sum; 1499 1500 memset(pad, 0, sizeof(*pad)); 1501 1502 pad->head = offset & (align - 1); 1503 pad->tail = ((offset + bytes) & (align - 1)); 1504 if (pad->tail) { 1505 pad->tail = align - pad->tail; 1506 } 1507 1508 if (!pad->head && !pad->tail) { 1509 return false; 1510 } 1511 1512 assert(bytes); /* Nothing good in aligning zero-length requests */ 1513 1514 sum = pad->head + bytes + pad->tail; 1515 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; 1516 pad->buf = qemu_blockalign(bs, pad->buf_len); 1517 pad->merge_reads = sum == pad->buf_len; 1518 if (pad->tail) { 1519 pad->tail_buf = pad->buf + pad->buf_len - align; 1520 } 1521 1522 return true; 1523 } 1524 1525 static int bdrv_padding_rmw_read(BdrvChild *child, 1526 BdrvTrackedRequest *req, 1527 BdrvRequestPadding *pad, 1528 bool zero_middle) 1529 { 1530 QEMUIOVector local_qiov; 1531 BlockDriverState *bs = child->bs; 1532 uint64_t align = bs->bl.request_alignment; 1533 int ret; 1534 1535 assert(req->serialising && pad->buf); 1536 1537 if (pad->head || pad->merge_reads) { 1538 uint64_t bytes = pad->merge_reads ? pad->buf_len : align; 1539 1540 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); 1541 1542 if (pad->head) { 1543 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1544 } 1545 if (pad->merge_reads && pad->tail) { 1546 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1547 } 1548 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, 1549 align, &local_qiov, 0, 0); 1550 if (ret < 0) { 1551 return ret; 1552 } 1553 if (pad->head) { 1554 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1555 } 1556 if (pad->merge_reads && pad->tail) { 1557 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1558 } 1559 1560 if (pad->merge_reads) { 1561 goto zero_mem; 1562 } 1563 } 1564 1565 if (pad->tail) { 1566 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); 1567 1568 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1569 ret = bdrv_aligned_preadv( 1570 child, req, 1571 req->overlap_offset + req->overlap_bytes - align, 1572 align, align, &local_qiov, 0, 0); 1573 if (ret < 0) { 1574 return ret; 1575 } 1576 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1577 } 1578 1579 zero_mem: 1580 if (zero_middle) { 1581 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); 1582 } 1583 1584 return 0; 1585 } 1586 1587 static void bdrv_padding_destroy(BdrvRequestPadding *pad) 1588 { 1589 if (pad->buf) { 1590 qemu_vfree(pad->buf); 1591 qemu_iovec_destroy(&pad->local_qiov); 1592 } 1593 } 1594 1595 /* 1596 * bdrv_pad_request 1597 * 1598 * Exchange request parameters with padded request if needed. Don't include RMW 1599 * read of padding, bdrv_padding_rmw_read() should be called separately if 1600 * needed. 1601 * 1602 * All parameters except @bs are in-out: they represent original request at 1603 * function call and padded (if padding needed) at function finish. 1604 * 1605 * Function always succeeds. 1606 */ 1607 static bool bdrv_pad_request(BlockDriverState *bs, 1608 QEMUIOVector **qiov, size_t *qiov_offset, 1609 int64_t *offset, unsigned int *bytes, 1610 BdrvRequestPadding *pad) 1611 { 1612 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { 1613 return false; 1614 } 1615 1616 qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, 1617 *qiov, *qiov_offset, *bytes, 1618 pad->buf + pad->buf_len - pad->tail, pad->tail); 1619 *bytes += pad->head + pad->tail; 1620 *offset -= pad->head; 1621 *qiov = &pad->local_qiov; 1622 *qiov_offset = 0; 1623 1624 return true; 1625 } 1626 1627 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1628 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1629 BdrvRequestFlags flags) 1630 { 1631 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); 1632 } 1633 1634 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, 1635 int64_t offset, unsigned int bytes, 1636 QEMUIOVector *qiov, size_t qiov_offset, 1637 BdrvRequestFlags flags) 1638 { 1639 BlockDriverState *bs = child->bs; 1640 BdrvTrackedRequest req; 1641 BdrvRequestPadding pad; 1642 int ret; 1643 1644 trace_bdrv_co_preadv(bs, offset, bytes, flags); 1645 1646 ret = bdrv_check_byte_request(bs, offset, bytes); 1647 if (ret < 0) { 1648 return ret; 1649 } 1650 1651 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 1652 /* 1653 * Aligning zero request is nonsense. Even if driver has special meaning 1654 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 1655 * it to driver due to request_alignment. 1656 * 1657 * Still, no reason to return an error if someone do unaligned 1658 * zero-length read occasionally. 1659 */ 1660 return 0; 1661 } 1662 1663 bdrv_inc_in_flight(bs); 1664 1665 /* Don't do copy-on-read if we read data before write operation */ 1666 if (qatomic_read(&bs->copy_on_read)) { 1667 flags |= BDRV_REQ_COPY_ON_READ; 1668 } 1669 1670 bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad); 1671 1672 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1673 ret = bdrv_aligned_preadv(child, &req, offset, bytes, 1674 bs->bl.request_alignment, 1675 qiov, qiov_offset, flags); 1676 tracked_request_end(&req); 1677 bdrv_dec_in_flight(bs); 1678 1679 bdrv_padding_destroy(&pad); 1680 1681 return ret; 1682 } 1683 1684 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1685 int64_t offset, int bytes, BdrvRequestFlags flags) 1686 { 1687 BlockDriver *drv = bs->drv; 1688 QEMUIOVector qiov; 1689 void *buf = NULL; 1690 int ret = 0; 1691 bool need_flush = false; 1692 int head = 0; 1693 int tail = 0; 1694 1695 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1696 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1697 bs->bl.request_alignment); 1698 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1699 1700 if (!drv) { 1701 return -ENOMEDIUM; 1702 } 1703 1704 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1705 return -ENOTSUP; 1706 } 1707 1708 assert(alignment % bs->bl.request_alignment == 0); 1709 head = offset % alignment; 1710 tail = (offset + bytes) % alignment; 1711 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1712 assert(max_write_zeroes >= bs->bl.request_alignment); 1713 1714 while (bytes > 0 && !ret) { 1715 int num = bytes; 1716 1717 /* Align request. Block drivers can expect the "bulk" of the request 1718 * to be aligned, and that unaligned requests do not cross cluster 1719 * boundaries. 1720 */ 1721 if (head) { 1722 /* Make a small request up to the first aligned sector. For 1723 * convenience, limit this request to max_transfer even if 1724 * we don't need to fall back to writes. */ 1725 num = MIN(MIN(bytes, max_transfer), alignment - head); 1726 head = (head + num) % alignment; 1727 assert(num < max_write_zeroes); 1728 } else if (tail && num > alignment) { 1729 /* Shorten the request to the last aligned sector. */ 1730 num -= tail; 1731 } 1732 1733 /* limit request size */ 1734 if (num > max_write_zeroes) { 1735 num = max_write_zeroes; 1736 } 1737 1738 ret = -ENOTSUP; 1739 /* First try the efficient write zeroes operation */ 1740 if (drv->bdrv_co_pwrite_zeroes) { 1741 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1742 flags & bs->supported_zero_flags); 1743 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1744 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1745 need_flush = true; 1746 } 1747 } else { 1748 assert(!bs->supported_zero_flags); 1749 } 1750 1751 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { 1752 /* Fall back to bounce buffer if write zeroes is unsupported */ 1753 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1754 1755 if ((flags & BDRV_REQ_FUA) && 1756 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1757 /* No need for bdrv_driver_pwrite() to do a fallback 1758 * flush on each chunk; use just one at the end */ 1759 write_flags &= ~BDRV_REQ_FUA; 1760 need_flush = true; 1761 } 1762 num = MIN(num, max_transfer); 1763 if (buf == NULL) { 1764 buf = qemu_try_blockalign0(bs, num); 1765 if (buf == NULL) { 1766 ret = -ENOMEM; 1767 goto fail; 1768 } 1769 } 1770 qemu_iovec_init_buf(&qiov, buf, num); 1771 1772 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); 1773 1774 /* Keep bounce buffer around if it is big enough for all 1775 * all future requests. 1776 */ 1777 if (num < max_transfer) { 1778 qemu_vfree(buf); 1779 buf = NULL; 1780 } 1781 } 1782 1783 offset += num; 1784 bytes -= num; 1785 } 1786 1787 fail: 1788 if (ret == 0 && need_flush) { 1789 ret = bdrv_co_flush(bs); 1790 } 1791 qemu_vfree(buf); 1792 return ret; 1793 } 1794 1795 static inline int coroutine_fn 1796 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, 1797 BdrvTrackedRequest *req, int flags) 1798 { 1799 BlockDriverState *bs = child->bs; 1800 bool waited; 1801 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1802 1803 if (bs->read_only) { 1804 return -EPERM; 1805 } 1806 1807 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1808 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1809 assert(!(flags & ~BDRV_REQ_MASK)); 1810 1811 if (flags & BDRV_REQ_SERIALISING) { 1812 waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); 1813 /* 1814 * For a misaligned request we should have already waited earlier, 1815 * because we come after bdrv_padding_rmw_read which must be called 1816 * with the request already marked as serialising. 1817 */ 1818 assert(!waited || 1819 (req->offset == req->overlap_offset && 1820 req->bytes == req->overlap_bytes)); 1821 } else { 1822 bdrv_wait_serialising_requests(req); 1823 } 1824 1825 assert(req->overlap_offset <= offset); 1826 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1827 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); 1828 1829 switch (req->type) { 1830 case BDRV_TRACKED_WRITE: 1831 case BDRV_TRACKED_DISCARD: 1832 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1833 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1834 } else { 1835 assert(child->perm & BLK_PERM_WRITE); 1836 } 1837 return notifier_with_return_list_notify(&bs->before_write_notifiers, 1838 req); 1839 case BDRV_TRACKED_TRUNCATE: 1840 assert(child->perm & BLK_PERM_RESIZE); 1841 return 0; 1842 default: 1843 abort(); 1844 } 1845 } 1846 1847 static inline void coroutine_fn 1848 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes, 1849 BdrvTrackedRequest *req, int ret) 1850 { 1851 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1852 BlockDriverState *bs = child->bs; 1853 1854 qatomic_inc(&bs->write_gen); 1855 1856 /* 1857 * Discard cannot extend the image, but in error handling cases, such as 1858 * when reverting a qcow2 cluster allocation, the discarded range can pass 1859 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1860 * here. Instead, just skip it, since semantically a discard request 1861 * beyond EOF cannot expand the image anyway. 1862 */ 1863 if (ret == 0 && 1864 (req->type == BDRV_TRACKED_TRUNCATE || 1865 end_sector > bs->total_sectors) && 1866 req->type != BDRV_TRACKED_DISCARD) { 1867 bs->total_sectors = end_sector; 1868 bdrv_parent_cb_resize(bs); 1869 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1870 } 1871 if (req->bytes) { 1872 switch (req->type) { 1873 case BDRV_TRACKED_WRITE: 1874 stat64_max(&bs->wr_highest_offset, offset + bytes); 1875 /* fall through, to set dirty bits */ 1876 case BDRV_TRACKED_DISCARD: 1877 bdrv_set_dirty(bs, offset, bytes); 1878 break; 1879 default: 1880 break; 1881 } 1882 } 1883 } 1884 1885 /* 1886 * Forwards an already correctly aligned write request to the BlockDriver, 1887 * after possibly fragmenting it. 1888 */ 1889 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, 1890 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1891 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) 1892 { 1893 BlockDriverState *bs = child->bs; 1894 BlockDriver *drv = bs->drv; 1895 int ret; 1896 1897 uint64_t bytes_remaining = bytes; 1898 int max_transfer; 1899 1900 if (!drv) { 1901 return -ENOMEDIUM; 1902 } 1903 1904 if (bdrv_has_readonly_bitmaps(bs)) { 1905 return -EPERM; 1906 } 1907 1908 assert(is_power_of_2(align)); 1909 assert((offset & (align - 1)) == 0); 1910 assert((bytes & (align - 1)) == 0); 1911 assert(!qiov || qiov_offset + bytes <= qiov->size); 1912 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1913 align); 1914 1915 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1916 1917 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1918 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1919 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { 1920 flags |= BDRV_REQ_ZERO_WRITE; 1921 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1922 flags |= BDRV_REQ_MAY_UNMAP; 1923 } 1924 } 1925 1926 if (ret < 0) { 1927 /* Do nothing, write notifier decided to fail this request */ 1928 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1929 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1930 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1931 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1932 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, 1933 qiov, qiov_offset); 1934 } else if (bytes <= max_transfer) { 1935 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1936 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); 1937 } else { 1938 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1939 while (bytes_remaining) { 1940 int num = MIN(bytes_remaining, max_transfer); 1941 int local_flags = flags; 1942 1943 assert(num); 1944 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1945 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1946 /* If FUA is going to be emulated by flush, we only 1947 * need to flush on the last iteration */ 1948 local_flags &= ~BDRV_REQ_FUA; 1949 } 1950 1951 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 1952 num, qiov, 1953 qiov_offset + bytes - bytes_remaining, 1954 local_flags); 1955 if (ret < 0) { 1956 break; 1957 } 1958 bytes_remaining -= num; 1959 } 1960 } 1961 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 1962 1963 if (ret >= 0) { 1964 ret = 0; 1965 } 1966 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 1967 1968 return ret; 1969 } 1970 1971 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, 1972 int64_t offset, 1973 unsigned int bytes, 1974 BdrvRequestFlags flags, 1975 BdrvTrackedRequest *req) 1976 { 1977 BlockDriverState *bs = child->bs; 1978 QEMUIOVector local_qiov; 1979 uint64_t align = bs->bl.request_alignment; 1980 int ret = 0; 1981 bool padding; 1982 BdrvRequestPadding pad; 1983 1984 padding = bdrv_init_padding(bs, offset, bytes, &pad); 1985 if (padding) { 1986 bdrv_mark_request_serialising(req, align); 1987 1988 bdrv_padding_rmw_read(child, req, &pad, true); 1989 1990 if (pad.head || pad.merge_reads) { 1991 int64_t aligned_offset = offset & ~(align - 1); 1992 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; 1993 1994 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); 1995 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, 1996 align, &local_qiov, 0, 1997 flags & ~BDRV_REQ_ZERO_WRITE); 1998 if (ret < 0 || pad.merge_reads) { 1999 /* Error or all work is done */ 2000 goto out; 2001 } 2002 offset += write_bytes - pad.head; 2003 bytes -= write_bytes - pad.head; 2004 } 2005 } 2006 2007 assert(!bytes || (offset & (align - 1)) == 0); 2008 if (bytes >= align) { 2009 /* Write the aligned part in the middle. */ 2010 uint64_t aligned_bytes = bytes & ~(align - 1); 2011 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 2012 NULL, 0, flags); 2013 if (ret < 0) { 2014 goto out; 2015 } 2016 bytes -= aligned_bytes; 2017 offset += aligned_bytes; 2018 } 2019 2020 assert(!bytes || (offset & (align - 1)) == 0); 2021 if (bytes) { 2022 assert(align == pad.tail + bytes); 2023 2024 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); 2025 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 2026 &local_qiov, 0, 2027 flags & ~BDRV_REQ_ZERO_WRITE); 2028 } 2029 2030 out: 2031 bdrv_padding_destroy(&pad); 2032 2033 return ret; 2034 } 2035 2036 /* 2037 * Handle a write request in coroutine context 2038 */ 2039 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 2040 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 2041 BdrvRequestFlags flags) 2042 { 2043 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); 2044 } 2045 2046 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, 2047 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, size_t qiov_offset, 2048 BdrvRequestFlags flags) 2049 { 2050 BlockDriverState *bs = child->bs; 2051 BdrvTrackedRequest req; 2052 uint64_t align = bs->bl.request_alignment; 2053 BdrvRequestPadding pad; 2054 int ret; 2055 2056 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags); 2057 2058 if (!bs->drv) { 2059 return -ENOMEDIUM; 2060 } 2061 2062 ret = bdrv_check_byte_request(bs, offset, bytes); 2063 if (ret < 0) { 2064 return ret; 2065 } 2066 2067 /* If the request is misaligned then we can't make it efficient */ 2068 if ((flags & BDRV_REQ_NO_FALLBACK) && 2069 !QEMU_IS_ALIGNED(offset | bytes, align)) 2070 { 2071 return -ENOTSUP; 2072 } 2073 2074 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { 2075 /* 2076 * Aligning zero request is nonsense. Even if driver has special meaning 2077 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass 2078 * it to driver due to request_alignment. 2079 * 2080 * Still, no reason to return an error if someone do unaligned 2081 * zero-length write occasionally. 2082 */ 2083 return 0; 2084 } 2085 2086 bdrv_inc_in_flight(bs); 2087 /* 2088 * Align write if necessary by performing a read-modify-write cycle. 2089 * Pad qiov with the read parts and be sure to have a tracked request not 2090 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 2091 */ 2092 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 2093 2094 if (flags & BDRV_REQ_ZERO_WRITE) { 2095 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 2096 goto out; 2097 } 2098 2099 if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) { 2100 bdrv_mark_request_serialising(&req, align); 2101 bdrv_padding_rmw_read(child, &req, &pad, false); 2102 } 2103 2104 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 2105 qiov, qiov_offset, flags); 2106 2107 bdrv_padding_destroy(&pad); 2108 2109 out: 2110 tracked_request_end(&req); 2111 bdrv_dec_in_flight(bs); 2112 2113 return ret; 2114 } 2115 2116 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 2117 int bytes, BdrvRequestFlags flags) 2118 { 2119 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 2120 2121 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 2122 flags &= ~BDRV_REQ_MAY_UNMAP; 2123 } 2124 2125 return bdrv_co_pwritev(child, offset, bytes, NULL, 2126 BDRV_REQ_ZERO_WRITE | flags); 2127 } 2128 2129 /* 2130 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 2131 */ 2132 int bdrv_flush_all(void) 2133 { 2134 BdrvNextIterator it; 2135 BlockDriverState *bs = NULL; 2136 int result = 0; 2137 2138 /* 2139 * bdrv queue is managed by record/replay, 2140 * creating new flush request for stopping 2141 * the VM may break the determinism 2142 */ 2143 if (replay_events_enabled()) { 2144 return result; 2145 } 2146 2147 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2148 AioContext *aio_context = bdrv_get_aio_context(bs); 2149 int ret; 2150 2151 aio_context_acquire(aio_context); 2152 ret = bdrv_flush(bs); 2153 if (ret < 0 && !result) { 2154 result = ret; 2155 } 2156 aio_context_release(aio_context); 2157 } 2158 2159 return result; 2160 } 2161 2162 /* 2163 * Returns the allocation status of the specified sectors. 2164 * Drivers not implementing the functionality are assumed to not support 2165 * backing files, hence all their sectors are reported as allocated. 2166 * 2167 * If 'want_zero' is true, the caller is querying for mapping 2168 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2169 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2170 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2171 * 2172 * If 'offset' is beyond the end of the disk image the return value is 2173 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2174 * 2175 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2176 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2177 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2178 * 2179 * 'pnum' is set to the number of bytes (including and immediately 2180 * following the specified offset) that are easily known to be in the 2181 * same allocated/unallocated state. Note that a second call starting 2182 * at the original offset plus returned pnum may have the same status. 2183 * The returned value is non-zero on success except at end-of-file. 2184 * 2185 * Returns negative errno on failure. Otherwise, if the 2186 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2187 * set to the host mapping and BDS corresponding to the guest offset. 2188 */ 2189 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, 2190 bool want_zero, 2191 int64_t offset, int64_t bytes, 2192 int64_t *pnum, int64_t *map, 2193 BlockDriverState **file) 2194 { 2195 int64_t total_size; 2196 int64_t n; /* bytes */ 2197 int ret; 2198 int64_t local_map = 0; 2199 BlockDriverState *local_file = NULL; 2200 int64_t aligned_offset, aligned_bytes; 2201 uint32_t align; 2202 bool has_filtered_child; 2203 2204 assert(pnum); 2205 *pnum = 0; 2206 total_size = bdrv_getlength(bs); 2207 if (total_size < 0) { 2208 ret = total_size; 2209 goto early_out; 2210 } 2211 2212 if (offset >= total_size) { 2213 ret = BDRV_BLOCK_EOF; 2214 goto early_out; 2215 } 2216 if (!bytes) { 2217 ret = 0; 2218 goto early_out; 2219 } 2220 2221 n = total_size - offset; 2222 if (n < bytes) { 2223 bytes = n; 2224 } 2225 2226 /* Must be non-NULL or bdrv_getlength() would have failed */ 2227 assert(bs->drv); 2228 has_filtered_child = bdrv_filter_child(bs); 2229 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { 2230 *pnum = bytes; 2231 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2232 if (offset + bytes == total_size) { 2233 ret |= BDRV_BLOCK_EOF; 2234 } 2235 if (bs->drv->protocol_name) { 2236 ret |= BDRV_BLOCK_OFFSET_VALID; 2237 local_map = offset; 2238 local_file = bs; 2239 } 2240 goto early_out; 2241 } 2242 2243 bdrv_inc_in_flight(bs); 2244 2245 /* Round out to request_alignment boundaries */ 2246 align = bs->bl.request_alignment; 2247 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2248 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2249 2250 if (bs->drv->bdrv_co_block_status) { 2251 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2252 aligned_bytes, pnum, &local_map, 2253 &local_file); 2254 } else { 2255 /* Default code for filters */ 2256 2257 local_file = bdrv_filter_bs(bs); 2258 assert(local_file); 2259 2260 *pnum = aligned_bytes; 2261 local_map = aligned_offset; 2262 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2263 } 2264 if (ret < 0) { 2265 *pnum = 0; 2266 goto out; 2267 } 2268 2269 /* 2270 * The driver's result must be a non-zero multiple of request_alignment. 2271 * Clamp pnum and adjust map to original request. 2272 */ 2273 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2274 align > offset - aligned_offset); 2275 if (ret & BDRV_BLOCK_RECURSE) { 2276 assert(ret & BDRV_BLOCK_DATA); 2277 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2278 assert(!(ret & BDRV_BLOCK_ZERO)); 2279 } 2280 2281 *pnum -= offset - aligned_offset; 2282 if (*pnum > bytes) { 2283 *pnum = bytes; 2284 } 2285 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2286 local_map += offset - aligned_offset; 2287 } 2288 2289 if (ret & BDRV_BLOCK_RAW) { 2290 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2291 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2292 *pnum, pnum, &local_map, &local_file); 2293 goto out; 2294 } 2295 2296 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2297 ret |= BDRV_BLOCK_ALLOCATED; 2298 } else if (bs->drv->supports_backing) { 2299 BlockDriverState *cow_bs = bdrv_cow_bs(bs); 2300 2301 if (!cow_bs) { 2302 ret |= BDRV_BLOCK_ZERO; 2303 } else if (want_zero) { 2304 int64_t size2 = bdrv_getlength(cow_bs); 2305 2306 if (size2 >= 0 && offset >= size2) { 2307 ret |= BDRV_BLOCK_ZERO; 2308 } 2309 } 2310 } 2311 2312 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2313 local_file && local_file != bs && 2314 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2315 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2316 int64_t file_pnum; 2317 int ret2; 2318 2319 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2320 *pnum, &file_pnum, NULL, NULL); 2321 if (ret2 >= 0) { 2322 /* Ignore errors. This is just providing extra information, it 2323 * is useful but not necessary. 2324 */ 2325 if (ret2 & BDRV_BLOCK_EOF && 2326 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2327 /* 2328 * It is valid for the format block driver to read 2329 * beyond the end of the underlying file's current 2330 * size; such areas read as zero. 2331 */ 2332 ret |= BDRV_BLOCK_ZERO; 2333 } else { 2334 /* Limit request to the range reported by the protocol driver */ 2335 *pnum = file_pnum; 2336 ret |= (ret2 & BDRV_BLOCK_ZERO); 2337 } 2338 } 2339 } 2340 2341 out: 2342 bdrv_dec_in_flight(bs); 2343 if (ret >= 0 && offset + *pnum == total_size) { 2344 ret |= BDRV_BLOCK_EOF; 2345 } 2346 early_out: 2347 if (file) { 2348 *file = local_file; 2349 } 2350 if (map) { 2351 *map = local_map; 2352 } 2353 return ret; 2354 } 2355 2356 int coroutine_fn 2357 bdrv_co_common_block_status_above(BlockDriverState *bs, 2358 BlockDriverState *base, 2359 bool include_base, 2360 bool want_zero, 2361 int64_t offset, 2362 int64_t bytes, 2363 int64_t *pnum, 2364 int64_t *map, 2365 BlockDriverState **file, 2366 int *depth) 2367 { 2368 int ret; 2369 BlockDriverState *p; 2370 int64_t eof = 0; 2371 int dummy; 2372 2373 assert(!include_base || base); /* Can't include NULL base */ 2374 2375 if (!depth) { 2376 depth = &dummy; 2377 } 2378 *depth = 0; 2379 2380 if (!include_base && bs == base) { 2381 *pnum = bytes; 2382 return 0; 2383 } 2384 2385 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); 2386 ++*depth; 2387 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { 2388 return ret; 2389 } 2390 2391 if (ret & BDRV_BLOCK_EOF) { 2392 eof = offset + *pnum; 2393 } 2394 2395 assert(*pnum <= bytes); 2396 bytes = *pnum; 2397 2398 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; 2399 p = bdrv_filter_or_cow_bs(p)) 2400 { 2401 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2402 file); 2403 ++*depth; 2404 if (ret < 0) { 2405 return ret; 2406 } 2407 if (*pnum == 0) { 2408 /* 2409 * The top layer deferred to this layer, and because this layer is 2410 * short, any zeroes that we synthesize beyond EOF behave as if they 2411 * were allocated at this layer. 2412 * 2413 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be 2414 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2415 * below. 2416 */ 2417 assert(ret & BDRV_BLOCK_EOF); 2418 *pnum = bytes; 2419 if (file) { 2420 *file = p; 2421 } 2422 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; 2423 break; 2424 } 2425 if (ret & BDRV_BLOCK_ALLOCATED) { 2426 /* 2427 * We've found the node and the status, we must break. 2428 * 2429 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be 2430 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see 2431 * below. 2432 */ 2433 ret &= ~BDRV_BLOCK_EOF; 2434 break; 2435 } 2436 2437 if (p == base) { 2438 assert(include_base); 2439 break; 2440 } 2441 2442 /* 2443 * OK, [offset, offset + *pnum) region is unallocated on this layer, 2444 * let's continue the diving. 2445 */ 2446 assert(*pnum <= bytes); 2447 bytes = *pnum; 2448 } 2449 2450 if (offset + *pnum == eof) { 2451 ret |= BDRV_BLOCK_EOF; 2452 } 2453 2454 return ret; 2455 } 2456 2457 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2458 int64_t offset, int64_t bytes, int64_t *pnum, 2459 int64_t *map, BlockDriverState **file) 2460 { 2461 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, 2462 pnum, map, file, NULL); 2463 } 2464 2465 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2466 int64_t *pnum, int64_t *map, BlockDriverState **file) 2467 { 2468 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), 2469 offset, bytes, pnum, map, file); 2470 } 2471 2472 /* 2473 * Check @bs (and its backing chain) to see if the range defined 2474 * by @offset and @bytes is known to read as zeroes. 2475 * Return 1 if that is the case, 0 otherwise and -errno on error. 2476 * This test is meant to be fast rather than accurate so returning 0 2477 * does not guarantee non-zero data. 2478 */ 2479 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, 2480 int64_t bytes) 2481 { 2482 int ret; 2483 int64_t pnum = bytes; 2484 2485 if (!bytes) { 2486 return 1; 2487 } 2488 2489 ret = bdrv_common_block_status_above(bs, NULL, false, false, offset, 2490 bytes, &pnum, NULL, NULL, NULL); 2491 2492 if (ret < 0) { 2493 return ret; 2494 } 2495 2496 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); 2497 } 2498 2499 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, 2500 int64_t bytes, int64_t *pnum) 2501 { 2502 int ret; 2503 int64_t dummy; 2504 2505 ret = bdrv_common_block_status_above(bs, bs, true, false, offset, 2506 bytes, pnum ? pnum : &dummy, NULL, 2507 NULL, NULL); 2508 if (ret < 0) { 2509 return ret; 2510 } 2511 return !!(ret & BDRV_BLOCK_ALLOCATED); 2512 } 2513 2514 /* 2515 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2516 * 2517 * Return a positive depth if (a prefix of) the given range is allocated 2518 * in any image between BASE and TOP (BASE is only included if include_base 2519 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. 2520 * BASE can be NULL to check if the given offset is allocated in any 2521 * image of the chain. Return 0 otherwise, or negative errno on 2522 * failure. 2523 * 2524 * 'pnum' is set to the number of bytes (including and immediately 2525 * following the specified offset) that are known to be in the same 2526 * allocated/unallocated state. Note that a subsequent call starting 2527 * at 'offset + *pnum' may return the same allocation status (in other 2528 * words, the result is not necessarily the maximum possible range); 2529 * but 'pnum' will only be 0 when end of file is reached. 2530 */ 2531 int bdrv_is_allocated_above(BlockDriverState *top, 2532 BlockDriverState *base, 2533 bool include_base, int64_t offset, 2534 int64_t bytes, int64_t *pnum) 2535 { 2536 int depth; 2537 int ret = bdrv_common_block_status_above(top, base, include_base, false, 2538 offset, bytes, pnum, NULL, NULL, 2539 &depth); 2540 if (ret < 0) { 2541 return ret; 2542 } 2543 2544 if (ret & BDRV_BLOCK_ALLOCATED) { 2545 return depth; 2546 } 2547 return 0; 2548 } 2549 2550 int coroutine_fn 2551 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2552 { 2553 BlockDriver *drv = bs->drv; 2554 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2555 int ret = -ENOTSUP; 2556 2557 if (!drv) { 2558 return -ENOMEDIUM; 2559 } 2560 2561 bdrv_inc_in_flight(bs); 2562 2563 if (drv->bdrv_load_vmstate) { 2564 ret = drv->bdrv_load_vmstate(bs, qiov, pos); 2565 } else if (child_bs) { 2566 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); 2567 } 2568 2569 bdrv_dec_in_flight(bs); 2570 2571 return ret; 2572 } 2573 2574 int coroutine_fn 2575 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2576 { 2577 BlockDriver *drv = bs->drv; 2578 BlockDriverState *child_bs = bdrv_primary_bs(bs); 2579 int ret = -ENOTSUP; 2580 2581 if (!drv) { 2582 return -ENOMEDIUM; 2583 } 2584 2585 bdrv_inc_in_flight(bs); 2586 2587 if (drv->bdrv_save_vmstate) { 2588 ret = drv->bdrv_save_vmstate(bs, qiov, pos); 2589 } else if (child_bs) { 2590 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); 2591 } 2592 2593 bdrv_dec_in_flight(bs); 2594 2595 return ret; 2596 } 2597 2598 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2599 int64_t pos, int size) 2600 { 2601 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2602 int ret = bdrv_writev_vmstate(bs, &qiov, pos); 2603 2604 return ret < 0 ? ret : size; 2605 } 2606 2607 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2608 int64_t pos, int size) 2609 { 2610 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2611 int ret = bdrv_readv_vmstate(bs, &qiov, pos); 2612 2613 return ret < 0 ? ret : size; 2614 } 2615 2616 /**************************************************************/ 2617 /* async I/Os */ 2618 2619 void bdrv_aio_cancel(BlockAIOCB *acb) 2620 { 2621 qemu_aio_ref(acb); 2622 bdrv_aio_cancel_async(acb); 2623 while (acb->refcnt > 1) { 2624 if (acb->aiocb_info->get_aio_context) { 2625 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2626 } else if (acb->bs) { 2627 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2628 * assert that we're not using an I/O thread. Thread-safe 2629 * code should use bdrv_aio_cancel_async exclusively. 2630 */ 2631 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2632 aio_poll(bdrv_get_aio_context(acb->bs), true); 2633 } else { 2634 abort(); 2635 } 2636 } 2637 qemu_aio_unref(acb); 2638 } 2639 2640 /* Async version of aio cancel. The caller is not blocked if the acb implements 2641 * cancel_async, otherwise we do nothing and let the request normally complete. 2642 * In either case the completion callback must be called. */ 2643 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2644 { 2645 if (acb->aiocb_info->cancel_async) { 2646 acb->aiocb_info->cancel_async(acb); 2647 } 2648 } 2649 2650 /**************************************************************/ 2651 /* Coroutine block device emulation */ 2652 2653 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2654 { 2655 BdrvChild *primary_child = bdrv_primary_child(bs); 2656 BdrvChild *child; 2657 int current_gen; 2658 int ret = 0; 2659 2660 bdrv_inc_in_flight(bs); 2661 2662 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 2663 bdrv_is_sg(bs)) { 2664 goto early_exit; 2665 } 2666 2667 qemu_co_mutex_lock(&bs->reqs_lock); 2668 current_gen = qatomic_read(&bs->write_gen); 2669 2670 /* Wait until any previous flushes are completed */ 2671 while (bs->active_flush_req) { 2672 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2673 } 2674 2675 /* Flushes reach this point in nondecreasing current_gen order. */ 2676 bs->active_flush_req = true; 2677 qemu_co_mutex_unlock(&bs->reqs_lock); 2678 2679 /* Write back all layers by calling one driver function */ 2680 if (bs->drv->bdrv_co_flush) { 2681 ret = bs->drv->bdrv_co_flush(bs); 2682 goto out; 2683 } 2684 2685 /* Write back cached data to the OS even with cache=unsafe */ 2686 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); 2687 if (bs->drv->bdrv_co_flush_to_os) { 2688 ret = bs->drv->bdrv_co_flush_to_os(bs); 2689 if (ret < 0) { 2690 goto out; 2691 } 2692 } 2693 2694 /* But don't actually force it to the disk with cache=unsafe */ 2695 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2696 goto flush_children; 2697 } 2698 2699 /* Check if we really need to flush anything */ 2700 if (bs->flushed_gen == current_gen) { 2701 goto flush_children; 2702 } 2703 2704 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); 2705 if (!bs->drv) { 2706 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2707 * (even in case of apparent success) */ 2708 ret = -ENOMEDIUM; 2709 goto out; 2710 } 2711 if (bs->drv->bdrv_co_flush_to_disk) { 2712 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2713 } else if (bs->drv->bdrv_aio_flush) { 2714 BlockAIOCB *acb; 2715 CoroutineIOCompletion co = { 2716 .coroutine = qemu_coroutine_self(), 2717 }; 2718 2719 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2720 if (acb == NULL) { 2721 ret = -EIO; 2722 } else { 2723 qemu_coroutine_yield(); 2724 ret = co.ret; 2725 } 2726 } else { 2727 /* 2728 * Some block drivers always operate in either writethrough or unsafe 2729 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2730 * know how the server works (because the behaviour is hardcoded or 2731 * depends on server-side configuration), so we can't ensure that 2732 * everything is safe on disk. Returning an error doesn't work because 2733 * that would break guests even if the server operates in writethrough 2734 * mode. 2735 * 2736 * Let's hope the user knows what he's doing. 2737 */ 2738 ret = 0; 2739 } 2740 2741 if (ret < 0) { 2742 goto out; 2743 } 2744 2745 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2746 * in the case of cache=unsafe, so there are no useless flushes. 2747 */ 2748 flush_children: 2749 ret = 0; 2750 QLIST_FOREACH(child, &bs->children, next) { 2751 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { 2752 int this_child_ret = bdrv_co_flush(child->bs); 2753 if (!ret) { 2754 ret = this_child_ret; 2755 } 2756 } 2757 } 2758 2759 out: 2760 /* Notify any pending flushes that we have completed */ 2761 if (ret == 0) { 2762 bs->flushed_gen = current_gen; 2763 } 2764 2765 qemu_co_mutex_lock(&bs->reqs_lock); 2766 bs->active_flush_req = false; 2767 /* Return value is ignored - it's ok if wait queue is empty */ 2768 qemu_co_queue_next(&bs->flush_queue); 2769 qemu_co_mutex_unlock(&bs->reqs_lock); 2770 2771 early_exit: 2772 bdrv_dec_in_flight(bs); 2773 return ret; 2774 } 2775 2776 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2777 int64_t bytes) 2778 { 2779 BdrvTrackedRequest req; 2780 int max_pdiscard, ret; 2781 int head, tail, align; 2782 BlockDriverState *bs = child->bs; 2783 2784 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { 2785 return -ENOMEDIUM; 2786 } 2787 2788 if (bdrv_has_readonly_bitmaps(bs)) { 2789 return -EPERM; 2790 } 2791 2792 if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) { 2793 return -EIO; 2794 } 2795 2796 /* Do nothing if disabled. */ 2797 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2798 return 0; 2799 } 2800 2801 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2802 return 0; 2803 } 2804 2805 /* Discard is advisory, but some devices track and coalesce 2806 * unaligned requests, so we must pass everything down rather than 2807 * round here. Still, most devices will just silently ignore 2808 * unaligned requests (by returning -ENOTSUP), so we must fragment 2809 * the request accordingly. */ 2810 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 2811 assert(align % bs->bl.request_alignment == 0); 2812 head = offset % align; 2813 tail = (offset + bytes) % align; 2814 2815 bdrv_inc_in_flight(bs); 2816 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 2817 2818 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 2819 if (ret < 0) { 2820 goto out; 2821 } 2822 2823 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), 2824 align); 2825 assert(max_pdiscard >= bs->bl.request_alignment); 2826 2827 while (bytes > 0) { 2828 int64_t num = bytes; 2829 2830 if (head) { 2831 /* Make small requests to get to alignment boundaries. */ 2832 num = MIN(bytes, align - head); 2833 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 2834 num %= bs->bl.request_alignment; 2835 } 2836 head = (head + num) % align; 2837 assert(num < max_pdiscard); 2838 } else if (tail) { 2839 if (num > align) { 2840 /* Shorten the request to the last aligned cluster. */ 2841 num -= tail; 2842 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 2843 tail > bs->bl.request_alignment) { 2844 tail %= bs->bl.request_alignment; 2845 num -= tail; 2846 } 2847 } 2848 /* limit request size */ 2849 if (num > max_pdiscard) { 2850 num = max_pdiscard; 2851 } 2852 2853 if (!bs->drv) { 2854 ret = -ENOMEDIUM; 2855 goto out; 2856 } 2857 if (bs->drv->bdrv_co_pdiscard) { 2858 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 2859 } else { 2860 BlockAIOCB *acb; 2861 CoroutineIOCompletion co = { 2862 .coroutine = qemu_coroutine_self(), 2863 }; 2864 2865 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 2866 bdrv_co_io_em_complete, &co); 2867 if (acb == NULL) { 2868 ret = -EIO; 2869 goto out; 2870 } else { 2871 qemu_coroutine_yield(); 2872 ret = co.ret; 2873 } 2874 } 2875 if (ret && ret != -ENOTSUP) { 2876 goto out; 2877 } 2878 2879 offset += num; 2880 bytes -= num; 2881 } 2882 ret = 0; 2883 out: 2884 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 2885 tracked_request_end(&req); 2886 bdrv_dec_in_flight(bs); 2887 return ret; 2888 } 2889 2890 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 2891 { 2892 BlockDriver *drv = bs->drv; 2893 CoroutineIOCompletion co = { 2894 .coroutine = qemu_coroutine_self(), 2895 }; 2896 BlockAIOCB *acb; 2897 2898 bdrv_inc_in_flight(bs); 2899 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 2900 co.ret = -ENOTSUP; 2901 goto out; 2902 } 2903 2904 if (drv->bdrv_co_ioctl) { 2905 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 2906 } else { 2907 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 2908 if (!acb) { 2909 co.ret = -ENOTSUP; 2910 goto out; 2911 } 2912 qemu_coroutine_yield(); 2913 } 2914 out: 2915 bdrv_dec_in_flight(bs); 2916 return co.ret; 2917 } 2918 2919 void *qemu_blockalign(BlockDriverState *bs, size_t size) 2920 { 2921 return qemu_memalign(bdrv_opt_mem_align(bs), size); 2922 } 2923 2924 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 2925 { 2926 return memset(qemu_blockalign(bs, size), 0, size); 2927 } 2928 2929 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 2930 { 2931 size_t align = bdrv_opt_mem_align(bs); 2932 2933 /* Ensure that NULL is never returned on success */ 2934 assert(align > 0); 2935 if (size == 0) { 2936 size = align; 2937 } 2938 2939 return qemu_try_memalign(align, size); 2940 } 2941 2942 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 2943 { 2944 void *mem = qemu_try_blockalign(bs, size); 2945 2946 if (mem) { 2947 memset(mem, 0, size); 2948 } 2949 2950 return mem; 2951 } 2952 2953 /* 2954 * Check if all memory in this vector is sector aligned. 2955 */ 2956 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 2957 { 2958 int i; 2959 size_t alignment = bdrv_min_mem_align(bs); 2960 2961 for (i = 0; i < qiov->niov; i++) { 2962 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 2963 return false; 2964 } 2965 if (qiov->iov[i].iov_len % alignment) { 2966 return false; 2967 } 2968 } 2969 2970 return true; 2971 } 2972 2973 void bdrv_add_before_write_notifier(BlockDriverState *bs, 2974 NotifierWithReturn *notifier) 2975 { 2976 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 2977 } 2978 2979 void bdrv_io_plug(BlockDriverState *bs) 2980 { 2981 BdrvChild *child; 2982 2983 QLIST_FOREACH(child, &bs->children, next) { 2984 bdrv_io_plug(child->bs); 2985 } 2986 2987 if (qatomic_fetch_inc(&bs->io_plugged) == 0) { 2988 BlockDriver *drv = bs->drv; 2989 if (drv && drv->bdrv_io_plug) { 2990 drv->bdrv_io_plug(bs); 2991 } 2992 } 2993 } 2994 2995 void bdrv_io_unplug(BlockDriverState *bs) 2996 { 2997 BdrvChild *child; 2998 2999 assert(bs->io_plugged); 3000 if (qatomic_fetch_dec(&bs->io_plugged) == 1) { 3001 BlockDriver *drv = bs->drv; 3002 if (drv && drv->bdrv_io_unplug) { 3003 drv->bdrv_io_unplug(bs); 3004 } 3005 } 3006 3007 QLIST_FOREACH(child, &bs->children, next) { 3008 bdrv_io_unplug(child->bs); 3009 } 3010 } 3011 3012 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) 3013 { 3014 BdrvChild *child; 3015 3016 if (bs->drv && bs->drv->bdrv_register_buf) { 3017 bs->drv->bdrv_register_buf(bs, host, size); 3018 } 3019 QLIST_FOREACH(child, &bs->children, next) { 3020 bdrv_register_buf(child->bs, host, size); 3021 } 3022 } 3023 3024 void bdrv_unregister_buf(BlockDriverState *bs, void *host) 3025 { 3026 BdrvChild *child; 3027 3028 if (bs->drv && bs->drv->bdrv_unregister_buf) { 3029 bs->drv->bdrv_unregister_buf(bs, host); 3030 } 3031 QLIST_FOREACH(child, &bs->children, next) { 3032 bdrv_unregister_buf(child->bs, host); 3033 } 3034 } 3035 3036 static int coroutine_fn bdrv_co_copy_range_internal( 3037 BdrvChild *src, uint64_t src_offset, BdrvChild *dst, 3038 uint64_t dst_offset, uint64_t bytes, 3039 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3040 bool recurse_src) 3041 { 3042 BdrvTrackedRequest req; 3043 int ret; 3044 3045 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3046 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3047 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3048 3049 if (!dst || !dst->bs) { 3050 return -ENOMEDIUM; 3051 } 3052 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes); 3053 if (ret) { 3054 return ret; 3055 } 3056 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3057 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3058 } 3059 3060 if (!src || !src->bs) { 3061 return -ENOMEDIUM; 3062 } 3063 ret = bdrv_check_byte_request(src->bs, src_offset, bytes); 3064 if (ret) { 3065 return ret; 3066 } 3067 3068 if (!src->bs->drv->bdrv_co_copy_range_from 3069 || !dst->bs->drv->bdrv_co_copy_range_to 3070 || src->bs->encrypted || dst->bs->encrypted) { 3071 return -ENOTSUP; 3072 } 3073 3074 if (recurse_src) { 3075 bdrv_inc_in_flight(src->bs); 3076 tracked_request_begin(&req, src->bs, src_offset, bytes, 3077 BDRV_TRACKED_READ); 3078 3079 /* BDRV_REQ_SERIALISING is only for write operation */ 3080 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3081 bdrv_wait_serialising_requests(&req); 3082 3083 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3084 src, src_offset, 3085 dst, dst_offset, 3086 bytes, 3087 read_flags, write_flags); 3088 3089 tracked_request_end(&req); 3090 bdrv_dec_in_flight(src->bs); 3091 } else { 3092 bdrv_inc_in_flight(dst->bs); 3093 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3094 BDRV_TRACKED_WRITE); 3095 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3096 write_flags); 3097 if (!ret) { 3098 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3099 src, src_offset, 3100 dst, dst_offset, 3101 bytes, 3102 read_flags, write_flags); 3103 } 3104 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3105 tracked_request_end(&req); 3106 bdrv_dec_in_flight(dst->bs); 3107 } 3108 3109 return ret; 3110 } 3111 3112 /* Copy range from @src to @dst. 3113 * 3114 * See the comment of bdrv_co_copy_range for the parameter and return value 3115 * semantics. */ 3116 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset, 3117 BdrvChild *dst, uint64_t dst_offset, 3118 uint64_t bytes, 3119 BdrvRequestFlags read_flags, 3120 BdrvRequestFlags write_flags) 3121 { 3122 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3123 read_flags, write_flags); 3124 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3125 bytes, read_flags, write_flags, true); 3126 } 3127 3128 /* Copy range from @src to @dst. 3129 * 3130 * See the comment of bdrv_co_copy_range for the parameter and return value 3131 * semantics. */ 3132 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset, 3133 BdrvChild *dst, uint64_t dst_offset, 3134 uint64_t bytes, 3135 BdrvRequestFlags read_flags, 3136 BdrvRequestFlags write_flags) 3137 { 3138 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3139 read_flags, write_flags); 3140 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3141 bytes, read_flags, write_flags, false); 3142 } 3143 3144 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, 3145 BdrvChild *dst, uint64_t dst_offset, 3146 uint64_t bytes, BdrvRequestFlags read_flags, 3147 BdrvRequestFlags write_flags) 3148 { 3149 return bdrv_co_copy_range_from(src, src_offset, 3150 dst, dst_offset, 3151 bytes, read_flags, write_flags); 3152 } 3153 3154 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3155 { 3156 BdrvChild *c; 3157 QLIST_FOREACH(c, &bs->parents, next_parent) { 3158 if (c->klass->resize) { 3159 c->klass->resize(c); 3160 } 3161 } 3162 } 3163 3164 /** 3165 * Truncate file to 'offset' bytes (needed only for file protocols) 3166 * 3167 * If 'exact' is true, the file must be resized to exactly the given 3168 * 'offset'. Otherwise, it is sufficient for the node to be at least 3169 * 'offset' bytes in length. 3170 */ 3171 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, 3172 PreallocMode prealloc, BdrvRequestFlags flags, 3173 Error **errp) 3174 { 3175 BlockDriverState *bs = child->bs; 3176 BdrvChild *filtered, *backing; 3177 BlockDriver *drv = bs->drv; 3178 BdrvTrackedRequest req; 3179 int64_t old_size, new_bytes; 3180 int ret; 3181 3182 3183 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3184 if (!drv) { 3185 error_setg(errp, "No medium inserted"); 3186 return -ENOMEDIUM; 3187 } 3188 if (offset < 0) { 3189 error_setg(errp, "Image size cannot be negative"); 3190 return -EINVAL; 3191 } 3192 3193 old_size = bdrv_getlength(bs); 3194 if (old_size < 0) { 3195 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3196 return old_size; 3197 } 3198 3199 if (offset > old_size) { 3200 new_bytes = offset - old_size; 3201 } else { 3202 new_bytes = 0; 3203 } 3204 3205 bdrv_inc_in_flight(bs); 3206 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3207 BDRV_TRACKED_TRUNCATE); 3208 3209 /* If we are growing the image and potentially using preallocation for the 3210 * new area, we need to make sure that no write requests are made to it 3211 * concurrently or they might be overwritten by preallocation. */ 3212 if (new_bytes) { 3213 bdrv_mark_request_serialising(&req, 1); 3214 } 3215 if (bs->read_only) { 3216 error_setg(errp, "Image is read-only"); 3217 ret = -EACCES; 3218 goto out; 3219 } 3220 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3221 0); 3222 if (ret < 0) { 3223 error_setg_errno(errp, -ret, 3224 "Failed to prepare request for truncation"); 3225 goto out; 3226 } 3227 3228 filtered = bdrv_filter_child(bs); 3229 backing = bdrv_cow_child(bs); 3230 3231 /* 3232 * If the image has a backing file that is large enough that it would 3233 * provide data for the new area, we cannot leave it unallocated because 3234 * then the backing file content would become visible. Instead, zero-fill 3235 * the new area. 3236 * 3237 * Note that if the image has a backing file, but was opened without the 3238 * backing file, taking care of keeping things consistent with that backing 3239 * file is the user's responsibility. 3240 */ 3241 if (new_bytes && backing) { 3242 int64_t backing_len; 3243 3244 backing_len = bdrv_getlength(backing->bs); 3245 if (backing_len < 0) { 3246 ret = backing_len; 3247 error_setg_errno(errp, -ret, "Could not get backing file size"); 3248 goto out; 3249 } 3250 3251 if (backing_len > old_size) { 3252 flags |= BDRV_REQ_ZERO_WRITE; 3253 } 3254 } 3255 3256 if (drv->bdrv_co_truncate) { 3257 if (flags & ~bs->supported_truncate_flags) { 3258 error_setg(errp, "Block driver does not support requested flags"); 3259 ret = -ENOTSUP; 3260 goto out; 3261 } 3262 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); 3263 } else if (filtered) { 3264 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); 3265 } else { 3266 error_setg(errp, "Image format driver does not support resize"); 3267 ret = -ENOTSUP; 3268 goto out; 3269 } 3270 if (ret < 0) { 3271 goto out; 3272 } 3273 3274 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3275 if (ret < 0) { 3276 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3277 } else { 3278 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3279 } 3280 /* It's possible that truncation succeeded but refresh_total_sectors 3281 * failed, but the latter doesn't affect how we should finish the request. 3282 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */ 3283 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3284 3285 out: 3286 tracked_request_end(&req); 3287 bdrv_dec_in_flight(bs); 3288 3289 return ret; 3290 } 3291