1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "qemu/cutils.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 36 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 37 38 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 39 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 40 41 static void bdrv_parent_cb_resize(BlockDriverState *bs); 42 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 43 int64_t offset, int bytes, BdrvRequestFlags flags); 44 45 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 46 bool ignore_bds_parents) 47 { 48 BdrvChild *c, *next; 49 50 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 51 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 52 continue; 53 } 54 bdrv_parent_drained_begin_single(c, false); 55 } 56 } 57 58 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, 59 int *drained_end_counter) 60 { 61 assert(c->parent_quiesce_counter > 0); 62 c->parent_quiesce_counter--; 63 if (c->role->drained_end) { 64 c->role->drained_end(c, drained_end_counter); 65 } 66 } 67 68 void bdrv_parent_drained_end_single(BdrvChild *c) 69 { 70 int drained_end_counter = 0; 71 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); 72 BDRV_POLL_WHILE(c->bs, atomic_read(&drained_end_counter) > 0); 73 } 74 75 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 76 bool ignore_bds_parents, 77 int *drained_end_counter) 78 { 79 BdrvChild *c; 80 81 QLIST_FOREACH(c, &bs->parents, next_parent) { 82 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 83 continue; 84 } 85 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter); 86 } 87 } 88 89 static bool bdrv_parent_drained_poll_single(BdrvChild *c) 90 { 91 if (c->role->drained_poll) { 92 return c->role->drained_poll(c); 93 } 94 return false; 95 } 96 97 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 98 bool ignore_bds_parents) 99 { 100 BdrvChild *c, *next; 101 bool busy = false; 102 103 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 104 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 105 continue; 106 } 107 busy |= bdrv_parent_drained_poll_single(c); 108 } 109 110 return busy; 111 } 112 113 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) 114 { 115 c->parent_quiesce_counter++; 116 if (c->role->drained_begin) { 117 c->role->drained_begin(c); 118 } 119 if (poll) { 120 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); 121 } 122 } 123 124 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 125 { 126 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 127 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 128 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 129 src->opt_mem_alignment); 130 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 131 src->min_mem_alignment); 132 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 133 } 134 135 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 136 { 137 BlockDriver *drv = bs->drv; 138 Error *local_err = NULL; 139 140 memset(&bs->bl, 0, sizeof(bs->bl)); 141 142 if (!drv) { 143 return; 144 } 145 146 /* Default alignment based on whether driver has byte interface */ 147 bs->bl.request_alignment = (drv->bdrv_co_preadv || 148 drv->bdrv_aio_preadv) ? 1 : 512; 149 150 /* Take some limits from the children as a default */ 151 if (bs->file) { 152 bdrv_refresh_limits(bs->file->bs, &local_err); 153 if (local_err) { 154 error_propagate(errp, local_err); 155 return; 156 } 157 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl); 158 } else { 159 bs->bl.min_mem_alignment = 512; 160 bs->bl.opt_mem_alignment = getpagesize(); 161 162 /* Safe default since most protocols use readv()/writev()/etc */ 163 bs->bl.max_iov = IOV_MAX; 164 } 165 166 if (bs->backing) { 167 bdrv_refresh_limits(bs->backing->bs, &local_err); 168 if (local_err) { 169 error_propagate(errp, local_err); 170 return; 171 } 172 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl); 173 } 174 175 /* Then let the driver override it */ 176 if (drv->bdrv_refresh_limits) { 177 drv->bdrv_refresh_limits(bs, errp); 178 } 179 } 180 181 /** 182 * The copy-on-read flag is actually a reference count so multiple users may 183 * use the feature without worrying about clobbering its previous state. 184 * Copy-on-read stays enabled until all users have called to disable it. 185 */ 186 void bdrv_enable_copy_on_read(BlockDriverState *bs) 187 { 188 atomic_inc(&bs->copy_on_read); 189 } 190 191 void bdrv_disable_copy_on_read(BlockDriverState *bs) 192 { 193 int old = atomic_fetch_dec(&bs->copy_on_read); 194 assert(old >= 1); 195 } 196 197 typedef struct { 198 Coroutine *co; 199 BlockDriverState *bs; 200 bool done; 201 bool begin; 202 bool recursive; 203 bool poll; 204 BdrvChild *parent; 205 bool ignore_bds_parents; 206 int *drained_end_counter; 207 } BdrvCoDrainData; 208 209 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) 210 { 211 BdrvCoDrainData *data = opaque; 212 BlockDriverState *bs = data->bs; 213 214 if (data->begin) { 215 bs->drv->bdrv_co_drain_begin(bs); 216 } else { 217 bs->drv->bdrv_co_drain_end(bs); 218 } 219 220 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ 221 atomic_mb_set(&data->done, true); 222 if (!data->begin) { 223 atomic_dec(data->drained_end_counter); 224 } 225 bdrv_dec_in_flight(bs); 226 227 g_free(data); 228 } 229 230 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ 231 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, 232 int *drained_end_counter) 233 { 234 BdrvCoDrainData *data; 235 236 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || 237 (!begin && !bs->drv->bdrv_co_drain_end)) { 238 return; 239 } 240 241 data = g_new(BdrvCoDrainData, 1); 242 *data = (BdrvCoDrainData) { 243 .bs = bs, 244 .done = false, 245 .begin = begin, 246 .drained_end_counter = drained_end_counter, 247 }; 248 249 if (!begin) { 250 atomic_inc(drained_end_counter); 251 } 252 253 /* Make sure the driver callback completes during the polling phase for 254 * drain_begin. */ 255 bdrv_inc_in_flight(bs); 256 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); 257 aio_co_schedule(bdrv_get_aio_context(bs), data->co); 258 } 259 260 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 261 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 262 BdrvChild *ignore_parent, bool ignore_bds_parents) 263 { 264 BdrvChild *child, *next; 265 266 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 267 return true; 268 } 269 270 if (atomic_read(&bs->in_flight)) { 271 return true; 272 } 273 274 if (recursive) { 275 assert(!ignore_bds_parents); 276 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 277 if (bdrv_drain_poll(child->bs, recursive, child, false)) { 278 return true; 279 } 280 } 281 } 282 283 return false; 284 } 285 286 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, 287 BdrvChild *ignore_parent) 288 { 289 return bdrv_drain_poll(bs, recursive, ignore_parent, false); 290 } 291 292 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 293 BdrvChild *parent, bool ignore_bds_parents, 294 bool poll); 295 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 296 BdrvChild *parent, bool ignore_bds_parents, 297 int *drained_end_counter); 298 299 static void bdrv_co_drain_bh_cb(void *opaque) 300 { 301 BdrvCoDrainData *data = opaque; 302 Coroutine *co = data->co; 303 BlockDriverState *bs = data->bs; 304 305 if (bs) { 306 AioContext *ctx = bdrv_get_aio_context(bs); 307 AioContext *co_ctx = qemu_coroutine_get_aio_context(co); 308 309 /* 310 * When the coroutine yielded, the lock for its home context was 311 * released, so we need to re-acquire it here. If it explicitly 312 * acquired a different context, the lock is still held and we don't 313 * want to lock it a second time (or AIO_WAIT_WHILE() would hang). 314 */ 315 if (ctx == co_ctx) { 316 aio_context_acquire(ctx); 317 } 318 bdrv_dec_in_flight(bs); 319 if (data->begin) { 320 assert(!data->drained_end_counter); 321 bdrv_do_drained_begin(bs, data->recursive, data->parent, 322 data->ignore_bds_parents, data->poll); 323 } else { 324 assert(!data->poll); 325 bdrv_do_drained_end(bs, data->recursive, data->parent, 326 data->ignore_bds_parents, 327 data->drained_end_counter); 328 } 329 if (ctx == co_ctx) { 330 aio_context_release(ctx); 331 } 332 } else { 333 assert(data->begin); 334 bdrv_drain_all_begin(); 335 } 336 337 data->done = true; 338 aio_co_wake(co); 339 } 340 341 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 342 bool begin, bool recursive, 343 BdrvChild *parent, 344 bool ignore_bds_parents, 345 bool poll, 346 int *drained_end_counter) 347 { 348 BdrvCoDrainData data; 349 350 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 351 * other coroutines run if they were queued by aio_co_enter(). */ 352 353 assert(qemu_in_coroutine()); 354 data = (BdrvCoDrainData) { 355 .co = qemu_coroutine_self(), 356 .bs = bs, 357 .done = false, 358 .begin = begin, 359 .recursive = recursive, 360 .parent = parent, 361 .ignore_bds_parents = ignore_bds_parents, 362 .poll = poll, 363 .drained_end_counter = drained_end_counter, 364 }; 365 366 if (bs) { 367 bdrv_inc_in_flight(bs); 368 } 369 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), 370 bdrv_co_drain_bh_cb, &data); 371 372 qemu_coroutine_yield(); 373 /* If we are resumed from some other event (such as an aio completion or a 374 * timer callback), it is a bug in the caller that should be fixed. */ 375 assert(data.done); 376 } 377 378 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 379 BdrvChild *parent, bool ignore_bds_parents) 380 { 381 assert(!qemu_in_coroutine()); 382 383 /* Stop things in parent-to-child order */ 384 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) { 385 aio_disable_external(bdrv_get_aio_context(bs)); 386 } 387 388 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); 389 bdrv_drain_invoke(bs, true, NULL); 390 } 391 392 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 393 BdrvChild *parent, bool ignore_bds_parents, 394 bool poll) 395 { 396 BdrvChild *child, *next; 397 398 if (qemu_in_coroutine()) { 399 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, 400 poll, NULL); 401 return; 402 } 403 404 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); 405 406 if (recursive) { 407 assert(!ignore_bds_parents); 408 bs->recursive_quiesce_counter++; 409 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 410 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, 411 false); 412 } 413 } 414 415 /* 416 * Wait for drained requests to finish. 417 * 418 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 419 * call is needed so things in this AioContext can make progress even 420 * though we don't return to the main AioContext loop - this automatically 421 * includes other nodes in the same AioContext and therefore all child 422 * nodes. 423 */ 424 if (poll) { 425 assert(!ignore_bds_parents); 426 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); 427 } 428 } 429 430 void bdrv_drained_begin(BlockDriverState *bs) 431 { 432 bdrv_do_drained_begin(bs, false, NULL, false, true); 433 } 434 435 void bdrv_subtree_drained_begin(BlockDriverState *bs) 436 { 437 bdrv_do_drained_begin(bs, true, NULL, false, true); 438 } 439 440 /** 441 * This function does not poll, nor must any of its recursively called 442 * functions. The *drained_end_counter pointee will be incremented 443 * once for every background operation scheduled, and decremented once 444 * the operation settles. Therefore, the pointer must remain valid 445 * until the pointee reaches 0. That implies that whoever sets up the 446 * pointee has to poll until it is 0. 447 * 448 * We use atomic operations to access *drained_end_counter, because 449 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of 450 * @bs may contain nodes in different AioContexts, 451 * (2) bdrv_drain_all_end() uses the same counter for all nodes, 452 * regardless of which AioContext they are in. 453 */ 454 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 455 BdrvChild *parent, bool ignore_bds_parents, 456 int *drained_end_counter) 457 { 458 BdrvChild *child; 459 int old_quiesce_counter; 460 461 assert(drained_end_counter != NULL); 462 463 if (qemu_in_coroutine()) { 464 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, 465 false, drained_end_counter); 466 return; 467 } 468 assert(bs->quiesce_counter > 0); 469 470 /* Re-enable things in child-to-parent order */ 471 bdrv_drain_invoke(bs, false, drained_end_counter); 472 bdrv_parent_drained_end(bs, parent, ignore_bds_parents, 473 drained_end_counter); 474 475 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter); 476 if (old_quiesce_counter == 1) { 477 aio_enable_external(bdrv_get_aio_context(bs)); 478 } 479 480 if (recursive) { 481 assert(!ignore_bds_parents); 482 bs->recursive_quiesce_counter--; 483 QLIST_FOREACH(child, &bs->children, next) { 484 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents, 485 drained_end_counter); 486 } 487 } 488 } 489 490 void bdrv_drained_end(BlockDriverState *bs) 491 { 492 int drained_end_counter = 0; 493 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); 494 BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0); 495 } 496 497 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) 498 { 499 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); 500 } 501 502 void bdrv_subtree_drained_end(BlockDriverState *bs) 503 { 504 int drained_end_counter = 0; 505 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); 506 BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0); 507 } 508 509 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) 510 { 511 int i; 512 513 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { 514 bdrv_do_drained_begin(child->bs, true, child, false, true); 515 } 516 } 517 518 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) 519 { 520 int drained_end_counter = 0; 521 int i; 522 523 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { 524 bdrv_do_drained_end(child->bs, true, child, false, 525 &drained_end_counter); 526 } 527 528 BDRV_POLL_WHILE(child->bs, atomic_read(&drained_end_counter) > 0); 529 } 530 531 /* 532 * Wait for pending requests to complete on a single BlockDriverState subtree, 533 * and suspend block driver's internal I/O until next request arrives. 534 * 535 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 536 * AioContext. 537 */ 538 void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 539 { 540 assert(qemu_in_coroutine()); 541 bdrv_drained_begin(bs); 542 bdrv_drained_end(bs); 543 } 544 545 void bdrv_drain(BlockDriverState *bs) 546 { 547 bdrv_drained_begin(bs); 548 bdrv_drained_end(bs); 549 } 550 551 static void bdrv_drain_assert_idle(BlockDriverState *bs) 552 { 553 BdrvChild *child, *next; 554 555 assert(atomic_read(&bs->in_flight) == 0); 556 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 557 bdrv_drain_assert_idle(child->bs); 558 } 559 } 560 561 unsigned int bdrv_drain_all_count = 0; 562 563 static bool bdrv_drain_all_poll(void) 564 { 565 BlockDriverState *bs = NULL; 566 bool result = false; 567 568 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 569 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 570 while ((bs = bdrv_next_all_states(bs))) { 571 AioContext *aio_context = bdrv_get_aio_context(bs); 572 aio_context_acquire(aio_context); 573 result |= bdrv_drain_poll(bs, false, NULL, true); 574 aio_context_release(aio_context); 575 } 576 577 return result; 578 } 579 580 /* 581 * Wait for pending requests to complete across all BlockDriverStates 582 * 583 * This function does not flush data to disk, use bdrv_flush_all() for that 584 * after calling this function. 585 * 586 * This pauses all block jobs and disables external clients. It must 587 * be paired with bdrv_drain_all_end(). 588 * 589 * NOTE: no new block jobs or BlockDriverStates can be created between 590 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 591 */ 592 void bdrv_drain_all_begin(void) 593 { 594 BlockDriverState *bs = NULL; 595 596 if (qemu_in_coroutine()) { 597 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); 598 return; 599 } 600 601 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 602 * loop AioContext, so make sure we're in the main context. */ 603 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 604 assert(bdrv_drain_all_count < INT_MAX); 605 bdrv_drain_all_count++; 606 607 /* Quiesce all nodes, without polling in-flight requests yet. The graph 608 * cannot change during this loop. */ 609 while ((bs = bdrv_next_all_states(bs))) { 610 AioContext *aio_context = bdrv_get_aio_context(bs); 611 612 aio_context_acquire(aio_context); 613 bdrv_do_drained_begin(bs, false, NULL, true, false); 614 aio_context_release(aio_context); 615 } 616 617 /* Now poll the in-flight requests */ 618 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 619 620 while ((bs = bdrv_next_all_states(bs))) { 621 bdrv_drain_assert_idle(bs); 622 } 623 } 624 625 void bdrv_drain_all_end(void) 626 { 627 BlockDriverState *bs = NULL; 628 int drained_end_counter = 0; 629 630 while ((bs = bdrv_next_all_states(bs))) { 631 AioContext *aio_context = bdrv_get_aio_context(bs); 632 633 aio_context_acquire(aio_context); 634 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); 635 aio_context_release(aio_context); 636 } 637 638 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 639 AIO_WAIT_WHILE(NULL, atomic_read(&drained_end_counter) > 0); 640 641 assert(bdrv_drain_all_count > 0); 642 bdrv_drain_all_count--; 643 } 644 645 void bdrv_drain_all(void) 646 { 647 bdrv_drain_all_begin(); 648 bdrv_drain_all_end(); 649 } 650 651 /** 652 * Remove an active request from the tracked requests list 653 * 654 * This function should be called when a tracked request is completing. 655 */ 656 static void tracked_request_end(BdrvTrackedRequest *req) 657 { 658 if (req->serialising) { 659 atomic_dec(&req->bs->serialising_in_flight); 660 } 661 662 qemu_co_mutex_lock(&req->bs->reqs_lock); 663 QLIST_REMOVE(req, list); 664 qemu_co_queue_restart_all(&req->wait_queue); 665 qemu_co_mutex_unlock(&req->bs->reqs_lock); 666 } 667 668 /** 669 * Add an active request to the tracked requests list 670 */ 671 static void tracked_request_begin(BdrvTrackedRequest *req, 672 BlockDriverState *bs, 673 int64_t offset, 674 uint64_t bytes, 675 enum BdrvTrackedRequestType type) 676 { 677 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes); 678 679 *req = (BdrvTrackedRequest){ 680 .bs = bs, 681 .offset = offset, 682 .bytes = bytes, 683 .type = type, 684 .co = qemu_coroutine_self(), 685 .serialising = false, 686 .overlap_offset = offset, 687 .overlap_bytes = bytes, 688 }; 689 690 qemu_co_queue_init(&req->wait_queue); 691 692 qemu_co_mutex_lock(&bs->reqs_lock); 693 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 694 qemu_co_mutex_unlock(&bs->reqs_lock); 695 } 696 697 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 698 { 699 int64_t overlap_offset = req->offset & ~(align - 1); 700 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 701 - overlap_offset; 702 703 if (!req->serialising) { 704 atomic_inc(&req->bs->serialising_in_flight); 705 req->serialising = true; 706 } 707 708 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 709 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 710 } 711 712 static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req) 713 { 714 /* 715 * If the request is serialising, overlap_offset and overlap_bytes are set, 716 * so we can check if the request is aligned. Otherwise, don't care and 717 * return false. 718 */ 719 720 return req->serialising && (req->offset == req->overlap_offset) && 721 (req->bytes == req->overlap_bytes); 722 } 723 724 /** 725 * Round a region to cluster boundaries 726 */ 727 void bdrv_round_to_clusters(BlockDriverState *bs, 728 int64_t offset, int64_t bytes, 729 int64_t *cluster_offset, 730 int64_t *cluster_bytes) 731 { 732 BlockDriverInfo bdi; 733 734 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 735 *cluster_offset = offset; 736 *cluster_bytes = bytes; 737 } else { 738 int64_t c = bdi.cluster_size; 739 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 740 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 741 } 742 } 743 744 static int bdrv_get_cluster_size(BlockDriverState *bs) 745 { 746 BlockDriverInfo bdi; 747 int ret; 748 749 ret = bdrv_get_info(bs, &bdi); 750 if (ret < 0 || bdi.cluster_size == 0) { 751 return bs->bl.request_alignment; 752 } else { 753 return bdi.cluster_size; 754 } 755 } 756 757 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 758 int64_t offset, uint64_t bytes) 759 { 760 /* aaaa bbbb */ 761 if (offset >= req->overlap_offset + req->overlap_bytes) { 762 return false; 763 } 764 /* bbbb aaaa */ 765 if (req->overlap_offset >= offset + bytes) { 766 return false; 767 } 768 return true; 769 } 770 771 void bdrv_inc_in_flight(BlockDriverState *bs) 772 { 773 atomic_inc(&bs->in_flight); 774 } 775 776 void bdrv_wakeup(BlockDriverState *bs) 777 { 778 aio_wait_kick(); 779 } 780 781 void bdrv_dec_in_flight(BlockDriverState *bs) 782 { 783 atomic_dec(&bs->in_flight); 784 bdrv_wakeup(bs); 785 } 786 787 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 788 { 789 BlockDriverState *bs = self->bs; 790 BdrvTrackedRequest *req; 791 bool retry; 792 bool waited = false; 793 794 if (!atomic_read(&bs->serialising_in_flight)) { 795 return false; 796 } 797 798 do { 799 retry = false; 800 qemu_co_mutex_lock(&bs->reqs_lock); 801 QLIST_FOREACH(req, &bs->tracked_requests, list) { 802 if (req == self || (!req->serialising && !self->serialising)) { 803 continue; 804 } 805 if (tracked_request_overlaps(req, self->overlap_offset, 806 self->overlap_bytes)) 807 { 808 /* Hitting this means there was a reentrant request, for 809 * example, a block driver issuing nested requests. This must 810 * never happen since it means deadlock. 811 */ 812 assert(qemu_coroutine_self() != req->co); 813 814 /* If the request is already (indirectly) waiting for us, or 815 * will wait for us as soon as it wakes up, then just go on 816 * (instead of producing a deadlock in the former case). */ 817 if (!req->waiting_for) { 818 self->waiting_for = req; 819 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); 820 self->waiting_for = NULL; 821 retry = true; 822 waited = true; 823 break; 824 } 825 } 826 } 827 qemu_co_mutex_unlock(&bs->reqs_lock); 828 } while (retry); 829 830 return waited; 831 } 832 833 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 834 size_t size) 835 { 836 if (size > BDRV_REQUEST_MAX_BYTES) { 837 return -EIO; 838 } 839 840 if (!bdrv_is_inserted(bs)) { 841 return -ENOMEDIUM; 842 } 843 844 if (offset < 0) { 845 return -EIO; 846 } 847 848 return 0; 849 } 850 851 typedef struct RwCo { 852 BdrvChild *child; 853 int64_t offset; 854 QEMUIOVector *qiov; 855 bool is_write; 856 int ret; 857 BdrvRequestFlags flags; 858 } RwCo; 859 860 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 861 { 862 RwCo *rwco = opaque; 863 864 if (!rwco->is_write) { 865 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset, 866 rwco->qiov->size, rwco->qiov, 867 rwco->flags); 868 } else { 869 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset, 870 rwco->qiov->size, rwco->qiov, 871 rwco->flags); 872 } 873 aio_wait_kick(); 874 } 875 876 /* 877 * Process a vectored synchronous request using coroutines 878 */ 879 static int bdrv_prwv_co(BdrvChild *child, int64_t offset, 880 QEMUIOVector *qiov, bool is_write, 881 BdrvRequestFlags flags) 882 { 883 Coroutine *co; 884 RwCo rwco = { 885 .child = child, 886 .offset = offset, 887 .qiov = qiov, 888 .is_write = is_write, 889 .ret = NOT_DONE, 890 .flags = flags, 891 }; 892 893 if (qemu_in_coroutine()) { 894 /* Fast-path if already in coroutine context */ 895 bdrv_rw_co_entry(&rwco); 896 } else { 897 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco); 898 bdrv_coroutine_enter(child->bs, co); 899 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE); 900 } 901 return rwco.ret; 902 } 903 904 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 905 int bytes, BdrvRequestFlags flags) 906 { 907 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes); 908 909 return bdrv_prwv_co(child, offset, &qiov, true, 910 BDRV_REQ_ZERO_WRITE | flags); 911 } 912 913 /* 914 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 915 * The operation is sped up by checking the block status and only writing 916 * zeroes to the device if they currently do not return zeroes. Optional 917 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 918 * BDRV_REQ_FUA). 919 * 920 * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 921 */ 922 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 923 { 924 int ret; 925 int64_t target_size, bytes, offset = 0; 926 BlockDriverState *bs = child->bs; 927 928 target_size = bdrv_getlength(bs); 929 if (target_size < 0) { 930 return target_size; 931 } 932 933 for (;;) { 934 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 935 if (bytes <= 0) { 936 return 0; 937 } 938 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 939 if (ret < 0) { 940 return ret; 941 } 942 if (ret & BDRV_BLOCK_ZERO) { 943 offset += bytes; 944 continue; 945 } 946 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 947 if (ret < 0) { 948 return ret; 949 } 950 offset += bytes; 951 } 952 } 953 954 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 955 { 956 int ret; 957 958 ret = bdrv_prwv_co(child, offset, qiov, false, 0); 959 if (ret < 0) { 960 return ret; 961 } 962 963 return qiov->size; 964 } 965 966 /* See bdrv_pwrite() for the return codes */ 967 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) 968 { 969 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 970 971 if (bytes < 0) { 972 return -EINVAL; 973 } 974 975 return bdrv_preadv(child, offset, &qiov); 976 } 977 978 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 979 { 980 int ret; 981 982 ret = bdrv_prwv_co(child, offset, qiov, true, 0); 983 if (ret < 0) { 984 return ret; 985 } 986 987 return qiov->size; 988 } 989 990 /* Return no. of bytes on success or < 0 on error. Important errors are: 991 -EIO generic I/O error (may happen for all errors) 992 -ENOMEDIUM No media inserted. 993 -EINVAL Invalid offset or number of bytes 994 -EACCES Trying to write a read-only device 995 */ 996 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes) 997 { 998 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 999 1000 if (bytes < 0) { 1001 return -EINVAL; 1002 } 1003 1004 return bdrv_pwritev(child, offset, &qiov); 1005 } 1006 1007 /* 1008 * Writes to the file and ensures that no writes are reordered across this 1009 * request (acts as a barrier) 1010 * 1011 * Returns 0 on success, -errno in error cases. 1012 */ 1013 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 1014 const void *buf, int count) 1015 { 1016 int ret; 1017 1018 ret = bdrv_pwrite(child, offset, buf, count); 1019 if (ret < 0) { 1020 return ret; 1021 } 1022 1023 ret = bdrv_flush(child->bs); 1024 if (ret < 0) { 1025 return ret; 1026 } 1027 1028 return 0; 1029 } 1030 1031 typedef struct CoroutineIOCompletion { 1032 Coroutine *coroutine; 1033 int ret; 1034 } CoroutineIOCompletion; 1035 1036 static void bdrv_co_io_em_complete(void *opaque, int ret) 1037 { 1038 CoroutineIOCompletion *co = opaque; 1039 1040 co->ret = ret; 1041 aio_co_wake(co->coroutine); 1042 } 1043 1044 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 1045 uint64_t offset, uint64_t bytes, 1046 QEMUIOVector *qiov, int flags) 1047 { 1048 BlockDriver *drv = bs->drv; 1049 int64_t sector_num; 1050 unsigned int nb_sectors; 1051 1052 assert(!(flags & ~BDRV_REQ_MASK)); 1053 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1054 1055 if (!drv) { 1056 return -ENOMEDIUM; 1057 } 1058 1059 if (drv->bdrv_co_preadv) { 1060 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 1061 } 1062 1063 if (drv->bdrv_aio_preadv) { 1064 BlockAIOCB *acb; 1065 CoroutineIOCompletion co = { 1066 .coroutine = qemu_coroutine_self(), 1067 }; 1068 1069 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1070 bdrv_co_io_em_complete, &co); 1071 if (acb == NULL) { 1072 return -EIO; 1073 } else { 1074 qemu_coroutine_yield(); 1075 return co.ret; 1076 } 1077 } 1078 1079 sector_num = offset >> BDRV_SECTOR_BITS; 1080 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1081 1082 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1083 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1084 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1085 assert(drv->bdrv_co_readv); 1086 1087 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1088 } 1089 1090 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 1091 uint64_t offset, uint64_t bytes, 1092 QEMUIOVector *qiov, int flags) 1093 { 1094 BlockDriver *drv = bs->drv; 1095 int64_t sector_num; 1096 unsigned int nb_sectors; 1097 int ret; 1098 1099 assert(!(flags & ~BDRV_REQ_MASK)); 1100 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1101 1102 if (!drv) { 1103 return -ENOMEDIUM; 1104 } 1105 1106 if (drv->bdrv_co_pwritev) { 1107 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 1108 flags & bs->supported_write_flags); 1109 flags &= ~bs->supported_write_flags; 1110 goto emulate_flags; 1111 } 1112 1113 if (drv->bdrv_aio_pwritev) { 1114 BlockAIOCB *acb; 1115 CoroutineIOCompletion co = { 1116 .coroutine = qemu_coroutine_self(), 1117 }; 1118 1119 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, 1120 flags & bs->supported_write_flags, 1121 bdrv_co_io_em_complete, &co); 1122 flags &= ~bs->supported_write_flags; 1123 if (acb == NULL) { 1124 ret = -EIO; 1125 } else { 1126 qemu_coroutine_yield(); 1127 ret = co.ret; 1128 } 1129 goto emulate_flags; 1130 } 1131 1132 sector_num = offset >> BDRV_SECTOR_BITS; 1133 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1134 1135 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1136 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1137 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1138 1139 assert(drv->bdrv_co_writev); 1140 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, 1141 flags & bs->supported_write_flags); 1142 flags &= ~bs->supported_write_flags; 1143 1144 emulate_flags: 1145 if (ret == 0 && (flags & BDRV_REQ_FUA)) { 1146 ret = bdrv_co_flush(bs); 1147 } 1148 1149 return ret; 1150 } 1151 1152 static int coroutine_fn 1153 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 1154 uint64_t bytes, QEMUIOVector *qiov) 1155 { 1156 BlockDriver *drv = bs->drv; 1157 1158 if (!drv) { 1159 return -ENOMEDIUM; 1160 } 1161 1162 if (!drv->bdrv_co_pwritev_compressed) { 1163 return -ENOTSUP; 1164 } 1165 1166 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1167 } 1168 1169 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, 1170 int64_t offset, unsigned int bytes, QEMUIOVector *qiov) 1171 { 1172 BlockDriverState *bs = child->bs; 1173 1174 /* Perform I/O through a temporary buffer so that users who scribble over 1175 * their read buffer while the operation is in progress do not end up 1176 * modifying the image file. This is critical for zero-copy guest I/O 1177 * where anything might happen inside guest memory. 1178 */ 1179 void *bounce_buffer; 1180 1181 BlockDriver *drv = bs->drv; 1182 QEMUIOVector local_qiov; 1183 int64_t cluster_offset; 1184 int64_t cluster_bytes; 1185 size_t skip_bytes; 1186 int ret; 1187 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1188 BDRV_REQUEST_MAX_BYTES); 1189 unsigned int progress = 0; 1190 1191 if (!drv) { 1192 return -ENOMEDIUM; 1193 } 1194 1195 /* FIXME We cannot require callers to have write permissions when all they 1196 * are doing is a read request. If we did things right, write permissions 1197 * would be obtained anyway, but internally by the copy-on-read code. As 1198 * long as it is implemented here rather than in a separate filter driver, 1199 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1200 * it could request permissions. Therefore we have to bypass the permission 1201 * system for the moment. */ 1202 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1203 1204 /* Cover entire cluster so no additional backing file I/O is required when 1205 * allocating cluster in the image file. Note that this value may exceed 1206 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1207 * is one reason we loop rather than doing it all at once. 1208 */ 1209 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1210 skip_bytes = offset - cluster_offset; 1211 1212 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1213 cluster_offset, cluster_bytes); 1214 1215 bounce_buffer = qemu_try_blockalign(bs, 1216 MIN(MIN(max_transfer, cluster_bytes), 1217 MAX_BOUNCE_BUFFER)); 1218 if (bounce_buffer == NULL) { 1219 ret = -ENOMEM; 1220 goto err; 1221 } 1222 1223 while (cluster_bytes) { 1224 int64_t pnum; 1225 1226 ret = bdrv_is_allocated(bs, cluster_offset, 1227 MIN(cluster_bytes, max_transfer), &pnum); 1228 if (ret < 0) { 1229 /* Safe to treat errors in querying allocation as if 1230 * unallocated; we'll probably fail again soon on the 1231 * read, but at least that will set a decent errno. 1232 */ 1233 pnum = MIN(cluster_bytes, max_transfer); 1234 } 1235 1236 /* Stop at EOF if the image ends in the middle of the cluster */ 1237 if (ret == 0 && pnum == 0) { 1238 assert(progress >= bytes); 1239 break; 1240 } 1241 1242 assert(skip_bytes < pnum); 1243 1244 if (ret <= 0) { 1245 /* Must copy-on-read; use the bounce buffer */ 1246 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1247 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1248 1249 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1250 &local_qiov, 0); 1251 if (ret < 0) { 1252 goto err; 1253 } 1254 1255 bdrv_debug_event(bs, BLKDBG_COR_WRITE); 1256 if (drv->bdrv_co_pwrite_zeroes && 1257 buffer_is_zero(bounce_buffer, pnum)) { 1258 /* FIXME: Should we (perhaps conditionally) be setting 1259 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1260 * that still correctly reads as zero? */ 1261 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1262 BDRV_REQ_WRITE_UNCHANGED); 1263 } else { 1264 /* This does not change the data on the disk, it is not 1265 * necessary to flush even in cache=writethrough mode. 1266 */ 1267 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1268 &local_qiov, 1269 BDRV_REQ_WRITE_UNCHANGED); 1270 } 1271 1272 if (ret < 0) { 1273 /* It might be okay to ignore write errors for guest 1274 * requests. If this is a deliberate copy-on-read 1275 * then we don't want to ignore the error. Simply 1276 * report it in all cases. 1277 */ 1278 goto err; 1279 } 1280 1281 qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes, 1282 pnum - skip_bytes); 1283 } else { 1284 /* Read directly into the destination */ 1285 qemu_iovec_init(&local_qiov, qiov->niov); 1286 qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes); 1287 ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size, 1288 &local_qiov, 0); 1289 qemu_iovec_destroy(&local_qiov); 1290 if (ret < 0) { 1291 goto err; 1292 } 1293 } 1294 1295 cluster_offset += pnum; 1296 cluster_bytes -= pnum; 1297 progress += pnum - skip_bytes; 1298 skip_bytes = 0; 1299 } 1300 ret = 0; 1301 1302 err: 1303 qemu_vfree(bounce_buffer); 1304 return ret; 1305 } 1306 1307 /* 1308 * Forwards an already correctly aligned request to the BlockDriver. This 1309 * handles copy on read, zeroing after EOF, and fragmentation of large 1310 * reads; any other features must be implemented by the caller. 1311 */ 1312 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, 1313 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1314 int64_t align, QEMUIOVector *qiov, int flags) 1315 { 1316 BlockDriverState *bs = child->bs; 1317 int64_t total_bytes, max_bytes; 1318 int ret = 0; 1319 uint64_t bytes_remaining = bytes; 1320 int max_transfer; 1321 1322 assert(is_power_of_2(align)); 1323 assert((offset & (align - 1)) == 0); 1324 assert((bytes & (align - 1)) == 0); 1325 assert(!qiov || bytes == qiov->size); 1326 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1327 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1328 align); 1329 1330 /* TODO: We would need a per-BDS .supported_read_flags and 1331 * potential fallback support, if we ever implement any read flags 1332 * to pass through to drivers. For now, there aren't any 1333 * passthrough flags. */ 1334 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ))); 1335 1336 /* Handle Copy on Read and associated serialisation */ 1337 if (flags & BDRV_REQ_COPY_ON_READ) { 1338 /* If we touch the same cluster it counts as an overlap. This 1339 * guarantees that allocating writes will be serialized and not race 1340 * with each other for the same cluster. For example, in copy-on-read 1341 * it ensures that the CoR read and write operations are atomic and 1342 * guest writes cannot interleave between them. */ 1343 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 1344 } 1345 1346 /* BDRV_REQ_SERIALISING is only for write operation */ 1347 assert(!(flags & BDRV_REQ_SERIALISING)); 1348 1349 if (!(flags & BDRV_REQ_NO_SERIALISING)) { 1350 wait_serialising_requests(req); 1351 } 1352 1353 if (flags & BDRV_REQ_COPY_ON_READ) { 1354 int64_t pnum; 1355 1356 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1357 if (ret < 0) { 1358 goto out; 1359 } 1360 1361 if (!ret || pnum != bytes) { 1362 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov); 1363 goto out; 1364 } 1365 } 1366 1367 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1368 total_bytes = bdrv_getlength(bs); 1369 if (total_bytes < 0) { 1370 ret = total_bytes; 1371 goto out; 1372 } 1373 1374 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1375 if (bytes <= max_bytes && bytes <= max_transfer) { 1376 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0); 1377 goto out; 1378 } 1379 1380 while (bytes_remaining) { 1381 int num; 1382 1383 if (max_bytes) { 1384 QEMUIOVector local_qiov; 1385 1386 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1387 assert(num); 1388 qemu_iovec_init(&local_qiov, qiov->niov); 1389 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num); 1390 1391 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1392 num, &local_qiov, 0); 1393 max_bytes -= num; 1394 qemu_iovec_destroy(&local_qiov); 1395 } else { 1396 num = bytes_remaining; 1397 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0, 1398 bytes_remaining); 1399 } 1400 if (ret < 0) { 1401 goto out; 1402 } 1403 bytes_remaining -= num; 1404 } 1405 1406 out: 1407 return ret < 0 ? ret : 0; 1408 } 1409 1410 /* 1411 * Handle a read request in coroutine context 1412 */ 1413 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1414 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1415 BdrvRequestFlags flags) 1416 { 1417 BlockDriverState *bs = child->bs; 1418 BlockDriver *drv = bs->drv; 1419 BdrvTrackedRequest req; 1420 1421 uint64_t align = bs->bl.request_alignment; 1422 uint8_t *head_buf = NULL; 1423 uint8_t *tail_buf = NULL; 1424 QEMUIOVector local_qiov; 1425 bool use_local_qiov = false; 1426 int ret; 1427 1428 trace_bdrv_co_preadv(child->bs, offset, bytes, flags); 1429 1430 if (!drv) { 1431 return -ENOMEDIUM; 1432 } 1433 1434 ret = bdrv_check_byte_request(bs, offset, bytes); 1435 if (ret < 0) { 1436 return ret; 1437 } 1438 1439 bdrv_inc_in_flight(bs); 1440 1441 /* Don't do copy-on-read if we read data before write operation */ 1442 if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) { 1443 flags |= BDRV_REQ_COPY_ON_READ; 1444 } 1445 1446 /* Align read if necessary by padding qiov */ 1447 if (offset & (align - 1)) { 1448 head_buf = qemu_blockalign(bs, align); 1449 qemu_iovec_init(&local_qiov, qiov->niov + 2); 1450 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 1451 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1452 use_local_qiov = true; 1453 1454 bytes += offset & (align - 1); 1455 offset = offset & ~(align - 1); 1456 } 1457 1458 if ((offset + bytes) & (align - 1)) { 1459 if (!use_local_qiov) { 1460 qemu_iovec_init(&local_qiov, qiov->niov + 1); 1461 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1462 use_local_qiov = true; 1463 } 1464 tail_buf = qemu_blockalign(bs, align); 1465 qemu_iovec_add(&local_qiov, tail_buf, 1466 align - ((offset + bytes) & (align - 1))); 1467 1468 bytes = ROUND_UP(bytes, align); 1469 } 1470 1471 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1472 ret = bdrv_aligned_preadv(child, &req, offset, bytes, align, 1473 use_local_qiov ? &local_qiov : qiov, 1474 flags); 1475 tracked_request_end(&req); 1476 bdrv_dec_in_flight(bs); 1477 1478 if (use_local_qiov) { 1479 qemu_iovec_destroy(&local_qiov); 1480 qemu_vfree(head_buf); 1481 qemu_vfree(tail_buf); 1482 } 1483 1484 return ret; 1485 } 1486 1487 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1488 int64_t offset, int bytes, BdrvRequestFlags flags) 1489 { 1490 BlockDriver *drv = bs->drv; 1491 QEMUIOVector qiov; 1492 void *buf = NULL; 1493 int ret = 0; 1494 bool need_flush = false; 1495 int head = 0; 1496 int tail = 0; 1497 1498 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1499 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1500 bs->bl.request_alignment); 1501 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1502 1503 if (!drv) { 1504 return -ENOMEDIUM; 1505 } 1506 1507 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1508 return -ENOTSUP; 1509 } 1510 1511 assert(alignment % bs->bl.request_alignment == 0); 1512 head = offset % alignment; 1513 tail = (offset + bytes) % alignment; 1514 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1515 assert(max_write_zeroes >= bs->bl.request_alignment); 1516 1517 while (bytes > 0 && !ret) { 1518 int num = bytes; 1519 1520 /* Align request. Block drivers can expect the "bulk" of the request 1521 * to be aligned, and that unaligned requests do not cross cluster 1522 * boundaries. 1523 */ 1524 if (head) { 1525 /* Make a small request up to the first aligned sector. For 1526 * convenience, limit this request to max_transfer even if 1527 * we don't need to fall back to writes. */ 1528 num = MIN(MIN(bytes, max_transfer), alignment - head); 1529 head = (head + num) % alignment; 1530 assert(num < max_write_zeroes); 1531 } else if (tail && num > alignment) { 1532 /* Shorten the request to the last aligned sector. */ 1533 num -= tail; 1534 } 1535 1536 /* limit request size */ 1537 if (num > max_write_zeroes) { 1538 num = max_write_zeroes; 1539 } 1540 1541 ret = -ENOTSUP; 1542 /* First try the efficient write zeroes operation */ 1543 if (drv->bdrv_co_pwrite_zeroes) { 1544 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1545 flags & bs->supported_zero_flags); 1546 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1547 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1548 need_flush = true; 1549 } 1550 } else { 1551 assert(!bs->supported_zero_flags); 1552 } 1553 1554 if (ret < 0 && !(flags & BDRV_REQ_NO_FALLBACK)) { 1555 /* Fall back to bounce buffer if write zeroes is unsupported */ 1556 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1557 1558 if ((flags & BDRV_REQ_FUA) && 1559 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1560 /* No need for bdrv_driver_pwrite() to do a fallback 1561 * flush on each chunk; use just one at the end */ 1562 write_flags &= ~BDRV_REQ_FUA; 1563 need_flush = true; 1564 } 1565 num = MIN(num, max_transfer); 1566 if (buf == NULL) { 1567 buf = qemu_try_blockalign0(bs, num); 1568 if (buf == NULL) { 1569 ret = -ENOMEM; 1570 goto fail; 1571 } 1572 } 1573 qemu_iovec_init_buf(&qiov, buf, num); 1574 1575 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); 1576 1577 /* Keep bounce buffer around if it is big enough for all 1578 * all future requests. 1579 */ 1580 if (num < max_transfer) { 1581 qemu_vfree(buf); 1582 buf = NULL; 1583 } 1584 } 1585 1586 offset += num; 1587 bytes -= num; 1588 } 1589 1590 fail: 1591 if (ret == 0 && need_flush) { 1592 ret = bdrv_co_flush(bs); 1593 } 1594 qemu_vfree(buf); 1595 return ret; 1596 } 1597 1598 static inline int coroutine_fn 1599 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, 1600 BdrvTrackedRequest *req, int flags) 1601 { 1602 BlockDriverState *bs = child->bs; 1603 bool waited; 1604 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1605 1606 if (bs->read_only) { 1607 return -EPERM; 1608 } 1609 1610 /* BDRV_REQ_NO_SERIALISING is only for read operation */ 1611 assert(!(flags & BDRV_REQ_NO_SERIALISING)); 1612 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1613 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1614 assert(!(flags & ~BDRV_REQ_MASK)); 1615 1616 if (flags & BDRV_REQ_SERIALISING) { 1617 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 1618 } 1619 1620 waited = wait_serialising_requests(req); 1621 1622 assert(!waited || !req->serialising || 1623 is_request_serialising_and_aligned(req)); 1624 assert(req->overlap_offset <= offset); 1625 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1626 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); 1627 1628 switch (req->type) { 1629 case BDRV_TRACKED_WRITE: 1630 case BDRV_TRACKED_DISCARD: 1631 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1632 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1633 } else { 1634 assert(child->perm & BLK_PERM_WRITE); 1635 } 1636 return notifier_with_return_list_notify(&bs->before_write_notifiers, 1637 req); 1638 case BDRV_TRACKED_TRUNCATE: 1639 assert(child->perm & BLK_PERM_RESIZE); 1640 return 0; 1641 default: 1642 abort(); 1643 } 1644 } 1645 1646 static inline void coroutine_fn 1647 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes, 1648 BdrvTrackedRequest *req, int ret) 1649 { 1650 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1651 BlockDriverState *bs = child->bs; 1652 1653 atomic_inc(&bs->write_gen); 1654 1655 /* 1656 * Discard cannot extend the image, but in error handling cases, such as 1657 * when reverting a qcow2 cluster allocation, the discarded range can pass 1658 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1659 * here. Instead, just skip it, since semantically a discard request 1660 * beyond EOF cannot expand the image anyway. 1661 */ 1662 if (ret == 0 && 1663 (req->type == BDRV_TRACKED_TRUNCATE || 1664 end_sector > bs->total_sectors) && 1665 req->type != BDRV_TRACKED_DISCARD) { 1666 bs->total_sectors = end_sector; 1667 bdrv_parent_cb_resize(bs); 1668 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1669 } 1670 if (req->bytes) { 1671 switch (req->type) { 1672 case BDRV_TRACKED_WRITE: 1673 stat64_max(&bs->wr_highest_offset, offset + bytes); 1674 /* fall through, to set dirty bits */ 1675 case BDRV_TRACKED_DISCARD: 1676 bdrv_set_dirty(bs, offset, bytes); 1677 break; 1678 default: 1679 break; 1680 } 1681 } 1682 } 1683 1684 /* 1685 * Forwards an already correctly aligned write request to the BlockDriver, 1686 * after possibly fragmenting it. 1687 */ 1688 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, 1689 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1690 int64_t align, QEMUIOVector *qiov, int flags) 1691 { 1692 BlockDriverState *bs = child->bs; 1693 BlockDriver *drv = bs->drv; 1694 int ret; 1695 1696 uint64_t bytes_remaining = bytes; 1697 int max_transfer; 1698 1699 if (!drv) { 1700 return -ENOMEDIUM; 1701 } 1702 1703 if (bdrv_has_readonly_bitmaps(bs)) { 1704 return -EPERM; 1705 } 1706 1707 assert(is_power_of_2(align)); 1708 assert((offset & (align - 1)) == 0); 1709 assert((bytes & (align - 1)) == 0); 1710 assert(!qiov || bytes == qiov->size); 1711 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1712 align); 1713 1714 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1715 1716 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1717 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1718 qemu_iovec_is_zero(qiov)) { 1719 flags |= BDRV_REQ_ZERO_WRITE; 1720 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1721 flags |= BDRV_REQ_MAY_UNMAP; 1722 } 1723 } 1724 1725 if (ret < 0) { 1726 /* Do nothing, write notifier decided to fail this request */ 1727 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1728 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1729 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1730 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1731 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov); 1732 } else if (bytes <= max_transfer) { 1733 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1734 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags); 1735 } else { 1736 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1737 while (bytes_remaining) { 1738 int num = MIN(bytes_remaining, max_transfer); 1739 QEMUIOVector local_qiov; 1740 int local_flags = flags; 1741 1742 assert(num); 1743 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1744 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1745 /* If FUA is going to be emulated by flush, we only 1746 * need to flush on the last iteration */ 1747 local_flags &= ~BDRV_REQ_FUA; 1748 } 1749 qemu_iovec_init(&local_qiov, qiov->niov); 1750 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num); 1751 1752 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 1753 num, &local_qiov, local_flags); 1754 qemu_iovec_destroy(&local_qiov); 1755 if (ret < 0) { 1756 break; 1757 } 1758 bytes_remaining -= num; 1759 } 1760 } 1761 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 1762 1763 if (ret >= 0) { 1764 ret = 0; 1765 } 1766 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 1767 1768 return ret; 1769 } 1770 1771 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, 1772 int64_t offset, 1773 unsigned int bytes, 1774 BdrvRequestFlags flags, 1775 BdrvTrackedRequest *req) 1776 { 1777 BlockDriverState *bs = child->bs; 1778 uint8_t *buf = NULL; 1779 QEMUIOVector local_qiov; 1780 uint64_t align = bs->bl.request_alignment; 1781 unsigned int head_padding_bytes, tail_padding_bytes; 1782 int ret = 0; 1783 1784 head_padding_bytes = offset & (align - 1); 1785 tail_padding_bytes = (align - (offset + bytes)) & (align - 1); 1786 1787 1788 assert(flags & BDRV_REQ_ZERO_WRITE); 1789 if (head_padding_bytes || tail_padding_bytes) { 1790 buf = qemu_blockalign(bs, align); 1791 qemu_iovec_init_buf(&local_qiov, buf, align); 1792 } 1793 if (head_padding_bytes) { 1794 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 1795 1796 /* RMW the unaligned part before head. */ 1797 mark_request_serialising(req, align); 1798 wait_serialising_requests(req); 1799 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1800 ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align, 1801 align, &local_qiov, 0); 1802 if (ret < 0) { 1803 goto fail; 1804 } 1805 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1806 1807 memset(buf + head_padding_bytes, 0, zero_bytes); 1808 ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align, 1809 align, &local_qiov, 1810 flags & ~BDRV_REQ_ZERO_WRITE); 1811 if (ret < 0) { 1812 goto fail; 1813 } 1814 offset += zero_bytes; 1815 bytes -= zero_bytes; 1816 } 1817 1818 assert(!bytes || (offset & (align - 1)) == 0); 1819 if (bytes >= align) { 1820 /* Write the aligned part in the middle. */ 1821 uint64_t aligned_bytes = bytes & ~(align - 1); 1822 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 1823 NULL, flags); 1824 if (ret < 0) { 1825 goto fail; 1826 } 1827 bytes -= aligned_bytes; 1828 offset += aligned_bytes; 1829 } 1830 1831 assert(!bytes || (offset & (align - 1)) == 0); 1832 if (bytes) { 1833 assert(align == tail_padding_bytes + bytes); 1834 /* RMW the unaligned part after tail. */ 1835 mark_request_serialising(req, align); 1836 wait_serialising_requests(req); 1837 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1838 ret = bdrv_aligned_preadv(child, req, offset, align, 1839 align, &local_qiov, 0); 1840 if (ret < 0) { 1841 goto fail; 1842 } 1843 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1844 1845 memset(buf, 0, bytes); 1846 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 1847 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 1848 } 1849 fail: 1850 qemu_vfree(buf); 1851 return ret; 1852 1853 } 1854 1855 /* 1856 * Handle a write request in coroutine context 1857 */ 1858 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 1859 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1860 BdrvRequestFlags flags) 1861 { 1862 BlockDriverState *bs = child->bs; 1863 BdrvTrackedRequest req; 1864 uint64_t align = bs->bl.request_alignment; 1865 uint8_t *head_buf = NULL; 1866 uint8_t *tail_buf = NULL; 1867 QEMUIOVector local_qiov; 1868 bool use_local_qiov = false; 1869 int ret; 1870 1871 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags); 1872 1873 if (!bs->drv) { 1874 return -ENOMEDIUM; 1875 } 1876 1877 ret = bdrv_check_byte_request(bs, offset, bytes); 1878 if (ret < 0) { 1879 return ret; 1880 } 1881 1882 bdrv_inc_in_flight(bs); 1883 /* 1884 * Align write if necessary by performing a read-modify-write cycle. 1885 * Pad qiov with the read parts and be sure to have a tracked request not 1886 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 1887 */ 1888 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 1889 1890 if (flags & BDRV_REQ_ZERO_WRITE) { 1891 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 1892 goto out; 1893 } 1894 1895 if (offset & (align - 1)) { 1896 QEMUIOVector head_qiov; 1897 1898 mark_request_serialising(&req, align); 1899 wait_serialising_requests(&req); 1900 1901 head_buf = qemu_blockalign(bs, align); 1902 qemu_iovec_init_buf(&head_qiov, head_buf, align); 1903 1904 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1905 ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align, 1906 align, &head_qiov, 0); 1907 if (ret < 0) { 1908 goto fail; 1909 } 1910 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1911 1912 qemu_iovec_init(&local_qiov, qiov->niov + 2); 1913 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 1914 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1915 use_local_qiov = true; 1916 1917 bytes += offset & (align - 1); 1918 offset = offset & ~(align - 1); 1919 1920 /* We have read the tail already if the request is smaller 1921 * than one aligned block. 1922 */ 1923 if (bytes < align) { 1924 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes); 1925 bytes = align; 1926 } 1927 } 1928 1929 if ((offset + bytes) & (align - 1)) { 1930 QEMUIOVector tail_qiov; 1931 size_t tail_bytes; 1932 bool waited; 1933 1934 mark_request_serialising(&req, align); 1935 waited = wait_serialising_requests(&req); 1936 assert(!waited || !use_local_qiov); 1937 1938 tail_buf = qemu_blockalign(bs, align); 1939 qemu_iovec_init_buf(&tail_qiov, tail_buf, align); 1940 1941 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1942 ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1), 1943 align, align, &tail_qiov, 0); 1944 if (ret < 0) { 1945 goto fail; 1946 } 1947 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1948 1949 if (!use_local_qiov) { 1950 qemu_iovec_init(&local_qiov, qiov->niov + 1); 1951 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1952 use_local_qiov = true; 1953 } 1954 1955 tail_bytes = (offset + bytes) & (align - 1); 1956 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 1957 1958 bytes = ROUND_UP(bytes, align); 1959 } 1960 1961 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 1962 use_local_qiov ? &local_qiov : qiov, 1963 flags); 1964 1965 fail: 1966 1967 if (use_local_qiov) { 1968 qemu_iovec_destroy(&local_qiov); 1969 } 1970 qemu_vfree(head_buf); 1971 qemu_vfree(tail_buf); 1972 out: 1973 tracked_request_end(&req); 1974 bdrv_dec_in_flight(bs); 1975 return ret; 1976 } 1977 1978 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 1979 int bytes, BdrvRequestFlags flags) 1980 { 1981 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 1982 1983 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 1984 flags &= ~BDRV_REQ_MAY_UNMAP; 1985 } 1986 1987 return bdrv_co_pwritev(child, offset, bytes, NULL, 1988 BDRV_REQ_ZERO_WRITE | flags); 1989 } 1990 1991 /* 1992 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 1993 */ 1994 int bdrv_flush_all(void) 1995 { 1996 BdrvNextIterator it; 1997 BlockDriverState *bs = NULL; 1998 int result = 0; 1999 2000 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 2001 AioContext *aio_context = bdrv_get_aio_context(bs); 2002 int ret; 2003 2004 aio_context_acquire(aio_context); 2005 ret = bdrv_flush(bs); 2006 if (ret < 0 && !result) { 2007 result = ret; 2008 } 2009 aio_context_release(aio_context); 2010 } 2011 2012 return result; 2013 } 2014 2015 2016 typedef struct BdrvCoBlockStatusData { 2017 BlockDriverState *bs; 2018 BlockDriverState *base; 2019 bool want_zero; 2020 int64_t offset; 2021 int64_t bytes; 2022 int64_t *pnum; 2023 int64_t *map; 2024 BlockDriverState **file; 2025 int ret; 2026 bool done; 2027 } BdrvCoBlockStatusData; 2028 2029 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs, 2030 bool want_zero, 2031 int64_t offset, 2032 int64_t bytes, 2033 int64_t *pnum, 2034 int64_t *map, 2035 BlockDriverState **file) 2036 { 2037 assert(bs->file && bs->file->bs); 2038 *pnum = bytes; 2039 *map = offset; 2040 *file = bs->file->bs; 2041 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2042 } 2043 2044 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs, 2045 bool want_zero, 2046 int64_t offset, 2047 int64_t bytes, 2048 int64_t *pnum, 2049 int64_t *map, 2050 BlockDriverState **file) 2051 { 2052 assert(bs->backing && bs->backing->bs); 2053 *pnum = bytes; 2054 *map = offset; 2055 *file = bs->backing->bs; 2056 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 2057 } 2058 2059 /* 2060 * Returns the allocation status of the specified sectors. 2061 * Drivers not implementing the functionality are assumed to not support 2062 * backing files, hence all their sectors are reported as allocated. 2063 * 2064 * If 'want_zero' is true, the caller is querying for mapping 2065 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2066 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2067 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2068 * 2069 * If 'offset' is beyond the end of the disk image the return value is 2070 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2071 * 2072 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2073 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2074 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2075 * 2076 * 'pnum' is set to the number of bytes (including and immediately 2077 * following the specified offset) that are easily known to be in the 2078 * same allocated/unallocated state. Note that a second call starting 2079 * at the original offset plus returned pnum may have the same status. 2080 * The returned value is non-zero on success except at end-of-file. 2081 * 2082 * Returns negative errno on failure. Otherwise, if the 2083 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2084 * set to the host mapping and BDS corresponding to the guest offset. 2085 */ 2086 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, 2087 bool want_zero, 2088 int64_t offset, int64_t bytes, 2089 int64_t *pnum, int64_t *map, 2090 BlockDriverState **file) 2091 { 2092 int64_t total_size; 2093 int64_t n; /* bytes */ 2094 int ret; 2095 int64_t local_map = 0; 2096 BlockDriverState *local_file = NULL; 2097 int64_t aligned_offset, aligned_bytes; 2098 uint32_t align; 2099 2100 assert(pnum); 2101 *pnum = 0; 2102 total_size = bdrv_getlength(bs); 2103 if (total_size < 0) { 2104 ret = total_size; 2105 goto early_out; 2106 } 2107 2108 if (offset >= total_size) { 2109 ret = BDRV_BLOCK_EOF; 2110 goto early_out; 2111 } 2112 if (!bytes) { 2113 ret = 0; 2114 goto early_out; 2115 } 2116 2117 n = total_size - offset; 2118 if (n < bytes) { 2119 bytes = n; 2120 } 2121 2122 /* Must be non-NULL or bdrv_getlength() would have failed */ 2123 assert(bs->drv); 2124 if (!bs->drv->bdrv_co_block_status) { 2125 *pnum = bytes; 2126 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2127 if (offset + bytes == total_size) { 2128 ret |= BDRV_BLOCK_EOF; 2129 } 2130 if (bs->drv->protocol_name) { 2131 ret |= BDRV_BLOCK_OFFSET_VALID; 2132 local_map = offset; 2133 local_file = bs; 2134 } 2135 goto early_out; 2136 } 2137 2138 bdrv_inc_in_flight(bs); 2139 2140 /* Round out to request_alignment boundaries */ 2141 align = bs->bl.request_alignment; 2142 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2143 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2144 2145 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2146 aligned_bytes, pnum, &local_map, 2147 &local_file); 2148 if (ret < 0) { 2149 *pnum = 0; 2150 goto out; 2151 } 2152 2153 /* 2154 * The driver's result must be a non-zero multiple of request_alignment. 2155 * Clamp pnum and adjust map to original request. 2156 */ 2157 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2158 align > offset - aligned_offset); 2159 if (ret & BDRV_BLOCK_RECURSE) { 2160 assert(ret & BDRV_BLOCK_DATA); 2161 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2162 assert(!(ret & BDRV_BLOCK_ZERO)); 2163 } 2164 2165 *pnum -= offset - aligned_offset; 2166 if (*pnum > bytes) { 2167 *pnum = bytes; 2168 } 2169 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2170 local_map += offset - aligned_offset; 2171 } 2172 2173 if (ret & BDRV_BLOCK_RAW) { 2174 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2175 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2176 *pnum, pnum, &local_map, &local_file); 2177 goto out; 2178 } 2179 2180 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2181 ret |= BDRV_BLOCK_ALLOCATED; 2182 } else if (want_zero) { 2183 if (bdrv_unallocated_blocks_are_zero(bs)) { 2184 ret |= BDRV_BLOCK_ZERO; 2185 } else if (bs->backing) { 2186 BlockDriverState *bs2 = bs->backing->bs; 2187 int64_t size2 = bdrv_getlength(bs2); 2188 2189 if (size2 >= 0 && offset >= size2) { 2190 ret |= BDRV_BLOCK_ZERO; 2191 } 2192 } 2193 } 2194 2195 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2196 local_file && local_file != bs && 2197 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2198 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2199 int64_t file_pnum; 2200 int ret2; 2201 2202 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2203 *pnum, &file_pnum, NULL, NULL); 2204 if (ret2 >= 0) { 2205 /* Ignore errors. This is just providing extra information, it 2206 * is useful but not necessary. 2207 */ 2208 if (ret2 & BDRV_BLOCK_EOF && 2209 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2210 /* 2211 * It is valid for the format block driver to read 2212 * beyond the end of the underlying file's current 2213 * size; such areas read as zero. 2214 */ 2215 ret |= BDRV_BLOCK_ZERO; 2216 } else { 2217 /* Limit request to the range reported by the protocol driver */ 2218 *pnum = file_pnum; 2219 ret |= (ret2 & BDRV_BLOCK_ZERO); 2220 } 2221 } 2222 } 2223 2224 out: 2225 bdrv_dec_in_flight(bs); 2226 if (ret >= 0 && offset + *pnum == total_size) { 2227 ret |= BDRV_BLOCK_EOF; 2228 } 2229 early_out: 2230 if (file) { 2231 *file = local_file; 2232 } 2233 if (map) { 2234 *map = local_map; 2235 } 2236 return ret; 2237 } 2238 2239 static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2240 BlockDriverState *base, 2241 bool want_zero, 2242 int64_t offset, 2243 int64_t bytes, 2244 int64_t *pnum, 2245 int64_t *map, 2246 BlockDriverState **file) 2247 { 2248 BlockDriverState *p; 2249 int ret = 0; 2250 bool first = true; 2251 2252 assert(bs != base); 2253 for (p = bs; p != base; p = backing_bs(p)) { 2254 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2255 file); 2256 if (ret < 0) { 2257 break; 2258 } 2259 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) { 2260 /* 2261 * Reading beyond the end of the file continues to read 2262 * zeroes, but we can only widen the result to the 2263 * unallocated length we learned from an earlier 2264 * iteration. 2265 */ 2266 *pnum = bytes; 2267 } 2268 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) { 2269 break; 2270 } 2271 /* [offset, pnum] unallocated on this layer, which could be only 2272 * the first part of [offset, bytes]. */ 2273 bytes = MIN(bytes, *pnum); 2274 first = false; 2275 } 2276 return ret; 2277 } 2278 2279 /* Coroutine wrapper for bdrv_block_status_above() */ 2280 static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque) 2281 { 2282 BdrvCoBlockStatusData *data = opaque; 2283 2284 data->ret = bdrv_co_block_status_above(data->bs, data->base, 2285 data->want_zero, 2286 data->offset, data->bytes, 2287 data->pnum, data->map, data->file); 2288 data->done = true; 2289 aio_wait_kick(); 2290 } 2291 2292 /* 2293 * Synchronous wrapper around bdrv_co_block_status_above(). 2294 * 2295 * See bdrv_co_block_status_above() for details. 2296 */ 2297 static int bdrv_common_block_status_above(BlockDriverState *bs, 2298 BlockDriverState *base, 2299 bool want_zero, int64_t offset, 2300 int64_t bytes, int64_t *pnum, 2301 int64_t *map, 2302 BlockDriverState **file) 2303 { 2304 Coroutine *co; 2305 BdrvCoBlockStatusData data = { 2306 .bs = bs, 2307 .base = base, 2308 .want_zero = want_zero, 2309 .offset = offset, 2310 .bytes = bytes, 2311 .pnum = pnum, 2312 .map = map, 2313 .file = file, 2314 .done = false, 2315 }; 2316 2317 if (qemu_in_coroutine()) { 2318 /* Fast-path if already in coroutine context */ 2319 bdrv_block_status_above_co_entry(&data); 2320 } else { 2321 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data); 2322 bdrv_coroutine_enter(bs, co); 2323 BDRV_POLL_WHILE(bs, !data.done); 2324 } 2325 return data.ret; 2326 } 2327 2328 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2329 int64_t offset, int64_t bytes, int64_t *pnum, 2330 int64_t *map, BlockDriverState **file) 2331 { 2332 return bdrv_common_block_status_above(bs, base, true, offset, bytes, 2333 pnum, map, file); 2334 } 2335 2336 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2337 int64_t *pnum, int64_t *map, BlockDriverState **file) 2338 { 2339 return bdrv_block_status_above(bs, backing_bs(bs), 2340 offset, bytes, pnum, map, file); 2341 } 2342 2343 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, 2344 int64_t bytes, int64_t *pnum) 2345 { 2346 int ret; 2347 int64_t dummy; 2348 2349 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset, 2350 bytes, pnum ? pnum : &dummy, NULL, 2351 NULL); 2352 if (ret < 0) { 2353 return ret; 2354 } 2355 return !!(ret & BDRV_BLOCK_ALLOCATED); 2356 } 2357 2358 /* 2359 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2360 * 2361 * Return 1 if (a prefix of) the given range is allocated in any image 2362 * between BASE and TOP (BASE is only included if include_base is set). 2363 * BASE can be NULL to check if the given offset is allocated in any 2364 * image of the chain. Return 0 otherwise, or negative errno on 2365 * failure. 2366 * 2367 * 'pnum' is set to the number of bytes (including and immediately 2368 * following the specified offset) that are known to be in the same 2369 * allocated/unallocated state. Note that a subsequent call starting 2370 * at 'offset + *pnum' may return the same allocation status (in other 2371 * words, the result is not necessarily the maximum possible range); 2372 * but 'pnum' will only be 0 when end of file is reached. 2373 * 2374 */ 2375 int bdrv_is_allocated_above(BlockDriverState *top, 2376 BlockDriverState *base, 2377 bool include_base, int64_t offset, 2378 int64_t bytes, int64_t *pnum) 2379 { 2380 BlockDriverState *intermediate; 2381 int ret; 2382 int64_t n = bytes; 2383 2384 assert(base || !include_base); 2385 2386 intermediate = top; 2387 while (include_base || intermediate != base) { 2388 int64_t pnum_inter; 2389 int64_t size_inter; 2390 2391 assert(intermediate); 2392 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter); 2393 if (ret < 0) { 2394 return ret; 2395 } 2396 if (ret) { 2397 *pnum = pnum_inter; 2398 return 1; 2399 } 2400 2401 size_inter = bdrv_getlength(intermediate); 2402 if (size_inter < 0) { 2403 return size_inter; 2404 } 2405 if (n > pnum_inter && 2406 (intermediate == top || offset + pnum_inter < size_inter)) { 2407 n = pnum_inter; 2408 } 2409 2410 if (intermediate == base) { 2411 break; 2412 } 2413 2414 intermediate = backing_bs(intermediate); 2415 } 2416 2417 *pnum = n; 2418 return 0; 2419 } 2420 2421 typedef struct BdrvVmstateCo { 2422 BlockDriverState *bs; 2423 QEMUIOVector *qiov; 2424 int64_t pos; 2425 bool is_read; 2426 int ret; 2427 } BdrvVmstateCo; 2428 2429 static int coroutine_fn 2430 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 2431 bool is_read) 2432 { 2433 BlockDriver *drv = bs->drv; 2434 int ret = -ENOTSUP; 2435 2436 bdrv_inc_in_flight(bs); 2437 2438 if (!drv) { 2439 ret = -ENOMEDIUM; 2440 } else if (drv->bdrv_load_vmstate) { 2441 if (is_read) { 2442 ret = drv->bdrv_load_vmstate(bs, qiov, pos); 2443 } else { 2444 ret = drv->bdrv_save_vmstate(bs, qiov, pos); 2445 } 2446 } else if (bs->file) { 2447 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read); 2448 } 2449 2450 bdrv_dec_in_flight(bs); 2451 return ret; 2452 } 2453 2454 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque) 2455 { 2456 BdrvVmstateCo *co = opaque; 2457 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read); 2458 aio_wait_kick(); 2459 } 2460 2461 static inline int 2462 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 2463 bool is_read) 2464 { 2465 if (qemu_in_coroutine()) { 2466 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read); 2467 } else { 2468 BdrvVmstateCo data = { 2469 .bs = bs, 2470 .qiov = qiov, 2471 .pos = pos, 2472 .is_read = is_read, 2473 .ret = -EINPROGRESS, 2474 }; 2475 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data); 2476 2477 bdrv_coroutine_enter(bs, co); 2478 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS); 2479 return data.ret; 2480 } 2481 } 2482 2483 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2484 int64_t pos, int size) 2485 { 2486 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2487 int ret; 2488 2489 ret = bdrv_writev_vmstate(bs, &qiov, pos); 2490 if (ret < 0) { 2491 return ret; 2492 } 2493 2494 return size; 2495 } 2496 2497 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2498 { 2499 return bdrv_rw_vmstate(bs, qiov, pos, false); 2500 } 2501 2502 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2503 int64_t pos, int size) 2504 { 2505 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2506 int ret; 2507 2508 ret = bdrv_readv_vmstate(bs, &qiov, pos); 2509 if (ret < 0) { 2510 return ret; 2511 } 2512 2513 return size; 2514 } 2515 2516 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2517 { 2518 return bdrv_rw_vmstate(bs, qiov, pos, true); 2519 } 2520 2521 /**************************************************************/ 2522 /* async I/Os */ 2523 2524 void bdrv_aio_cancel(BlockAIOCB *acb) 2525 { 2526 qemu_aio_ref(acb); 2527 bdrv_aio_cancel_async(acb); 2528 while (acb->refcnt > 1) { 2529 if (acb->aiocb_info->get_aio_context) { 2530 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2531 } else if (acb->bs) { 2532 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2533 * assert that we're not using an I/O thread. Thread-safe 2534 * code should use bdrv_aio_cancel_async exclusively. 2535 */ 2536 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2537 aio_poll(bdrv_get_aio_context(acb->bs), true); 2538 } else { 2539 abort(); 2540 } 2541 } 2542 qemu_aio_unref(acb); 2543 } 2544 2545 /* Async version of aio cancel. The caller is not blocked if the acb implements 2546 * cancel_async, otherwise we do nothing and let the request normally complete. 2547 * In either case the completion callback must be called. */ 2548 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2549 { 2550 if (acb->aiocb_info->cancel_async) { 2551 acb->aiocb_info->cancel_async(acb); 2552 } 2553 } 2554 2555 /**************************************************************/ 2556 /* Coroutine block device emulation */ 2557 2558 typedef struct FlushCo { 2559 BlockDriverState *bs; 2560 int ret; 2561 } FlushCo; 2562 2563 2564 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 2565 { 2566 FlushCo *rwco = opaque; 2567 2568 rwco->ret = bdrv_co_flush(rwco->bs); 2569 aio_wait_kick(); 2570 } 2571 2572 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2573 { 2574 int current_gen; 2575 int ret = 0; 2576 2577 bdrv_inc_in_flight(bs); 2578 2579 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 2580 bdrv_is_sg(bs)) { 2581 goto early_exit; 2582 } 2583 2584 qemu_co_mutex_lock(&bs->reqs_lock); 2585 current_gen = atomic_read(&bs->write_gen); 2586 2587 /* Wait until any previous flushes are completed */ 2588 while (bs->active_flush_req) { 2589 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2590 } 2591 2592 /* Flushes reach this point in nondecreasing current_gen order. */ 2593 bs->active_flush_req = true; 2594 qemu_co_mutex_unlock(&bs->reqs_lock); 2595 2596 /* Write back all layers by calling one driver function */ 2597 if (bs->drv->bdrv_co_flush) { 2598 ret = bs->drv->bdrv_co_flush(bs); 2599 goto out; 2600 } 2601 2602 /* Write back cached data to the OS even with cache=unsafe */ 2603 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 2604 if (bs->drv->bdrv_co_flush_to_os) { 2605 ret = bs->drv->bdrv_co_flush_to_os(bs); 2606 if (ret < 0) { 2607 goto out; 2608 } 2609 } 2610 2611 /* But don't actually force it to the disk with cache=unsafe */ 2612 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2613 goto flush_parent; 2614 } 2615 2616 /* Check if we really need to flush anything */ 2617 if (bs->flushed_gen == current_gen) { 2618 goto flush_parent; 2619 } 2620 2621 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 2622 if (!bs->drv) { 2623 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2624 * (even in case of apparent success) */ 2625 ret = -ENOMEDIUM; 2626 goto out; 2627 } 2628 if (bs->drv->bdrv_co_flush_to_disk) { 2629 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2630 } else if (bs->drv->bdrv_aio_flush) { 2631 BlockAIOCB *acb; 2632 CoroutineIOCompletion co = { 2633 .coroutine = qemu_coroutine_self(), 2634 }; 2635 2636 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2637 if (acb == NULL) { 2638 ret = -EIO; 2639 } else { 2640 qemu_coroutine_yield(); 2641 ret = co.ret; 2642 } 2643 } else { 2644 /* 2645 * Some block drivers always operate in either writethrough or unsafe 2646 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2647 * know how the server works (because the behaviour is hardcoded or 2648 * depends on server-side configuration), so we can't ensure that 2649 * everything is safe on disk. Returning an error doesn't work because 2650 * that would break guests even if the server operates in writethrough 2651 * mode. 2652 * 2653 * Let's hope the user knows what he's doing. 2654 */ 2655 ret = 0; 2656 } 2657 2658 if (ret < 0) { 2659 goto out; 2660 } 2661 2662 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2663 * in the case of cache=unsafe, so there are no useless flushes. 2664 */ 2665 flush_parent: 2666 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2667 out: 2668 /* Notify any pending flushes that we have completed */ 2669 if (ret == 0) { 2670 bs->flushed_gen = current_gen; 2671 } 2672 2673 qemu_co_mutex_lock(&bs->reqs_lock); 2674 bs->active_flush_req = false; 2675 /* Return value is ignored - it's ok if wait queue is empty */ 2676 qemu_co_queue_next(&bs->flush_queue); 2677 qemu_co_mutex_unlock(&bs->reqs_lock); 2678 2679 early_exit: 2680 bdrv_dec_in_flight(bs); 2681 return ret; 2682 } 2683 2684 int bdrv_flush(BlockDriverState *bs) 2685 { 2686 Coroutine *co; 2687 FlushCo flush_co = { 2688 .bs = bs, 2689 .ret = NOT_DONE, 2690 }; 2691 2692 if (qemu_in_coroutine()) { 2693 /* Fast-path if already in coroutine context */ 2694 bdrv_flush_co_entry(&flush_co); 2695 } else { 2696 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co); 2697 bdrv_coroutine_enter(bs, co); 2698 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE); 2699 } 2700 2701 return flush_co.ret; 2702 } 2703 2704 typedef struct DiscardCo { 2705 BdrvChild *child; 2706 int64_t offset; 2707 int64_t bytes; 2708 int ret; 2709 } DiscardCo; 2710 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque) 2711 { 2712 DiscardCo *rwco = opaque; 2713 2714 rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes); 2715 aio_wait_kick(); 2716 } 2717 2718 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2719 int64_t bytes) 2720 { 2721 BdrvTrackedRequest req; 2722 int max_pdiscard, ret; 2723 int head, tail, align; 2724 BlockDriverState *bs = child->bs; 2725 2726 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { 2727 return -ENOMEDIUM; 2728 } 2729 2730 if (bdrv_has_readonly_bitmaps(bs)) { 2731 return -EPERM; 2732 } 2733 2734 if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) { 2735 return -EIO; 2736 } 2737 2738 /* Do nothing if disabled. */ 2739 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2740 return 0; 2741 } 2742 2743 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2744 return 0; 2745 } 2746 2747 /* Discard is advisory, but some devices track and coalesce 2748 * unaligned requests, so we must pass everything down rather than 2749 * round here. Still, most devices will just silently ignore 2750 * unaligned requests (by returning -ENOTSUP), so we must fragment 2751 * the request accordingly. */ 2752 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 2753 assert(align % bs->bl.request_alignment == 0); 2754 head = offset % align; 2755 tail = (offset + bytes) % align; 2756 2757 bdrv_inc_in_flight(bs); 2758 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 2759 2760 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 2761 if (ret < 0) { 2762 goto out; 2763 } 2764 2765 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), 2766 align); 2767 assert(max_pdiscard >= bs->bl.request_alignment); 2768 2769 while (bytes > 0) { 2770 int64_t num = bytes; 2771 2772 if (head) { 2773 /* Make small requests to get to alignment boundaries. */ 2774 num = MIN(bytes, align - head); 2775 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 2776 num %= bs->bl.request_alignment; 2777 } 2778 head = (head + num) % align; 2779 assert(num < max_pdiscard); 2780 } else if (tail) { 2781 if (num > align) { 2782 /* Shorten the request to the last aligned cluster. */ 2783 num -= tail; 2784 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 2785 tail > bs->bl.request_alignment) { 2786 tail %= bs->bl.request_alignment; 2787 num -= tail; 2788 } 2789 } 2790 /* limit request size */ 2791 if (num > max_pdiscard) { 2792 num = max_pdiscard; 2793 } 2794 2795 if (!bs->drv) { 2796 ret = -ENOMEDIUM; 2797 goto out; 2798 } 2799 if (bs->drv->bdrv_co_pdiscard) { 2800 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 2801 } else { 2802 BlockAIOCB *acb; 2803 CoroutineIOCompletion co = { 2804 .coroutine = qemu_coroutine_self(), 2805 }; 2806 2807 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 2808 bdrv_co_io_em_complete, &co); 2809 if (acb == NULL) { 2810 ret = -EIO; 2811 goto out; 2812 } else { 2813 qemu_coroutine_yield(); 2814 ret = co.ret; 2815 } 2816 } 2817 if (ret && ret != -ENOTSUP) { 2818 goto out; 2819 } 2820 2821 offset += num; 2822 bytes -= num; 2823 } 2824 ret = 0; 2825 out: 2826 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 2827 tracked_request_end(&req); 2828 bdrv_dec_in_flight(bs); 2829 return ret; 2830 } 2831 2832 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes) 2833 { 2834 Coroutine *co; 2835 DiscardCo rwco = { 2836 .child = child, 2837 .offset = offset, 2838 .bytes = bytes, 2839 .ret = NOT_DONE, 2840 }; 2841 2842 if (qemu_in_coroutine()) { 2843 /* Fast-path if already in coroutine context */ 2844 bdrv_pdiscard_co_entry(&rwco); 2845 } else { 2846 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco); 2847 bdrv_coroutine_enter(child->bs, co); 2848 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE); 2849 } 2850 2851 return rwco.ret; 2852 } 2853 2854 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 2855 { 2856 BlockDriver *drv = bs->drv; 2857 CoroutineIOCompletion co = { 2858 .coroutine = qemu_coroutine_self(), 2859 }; 2860 BlockAIOCB *acb; 2861 2862 bdrv_inc_in_flight(bs); 2863 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 2864 co.ret = -ENOTSUP; 2865 goto out; 2866 } 2867 2868 if (drv->bdrv_co_ioctl) { 2869 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 2870 } else { 2871 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 2872 if (!acb) { 2873 co.ret = -ENOTSUP; 2874 goto out; 2875 } 2876 qemu_coroutine_yield(); 2877 } 2878 out: 2879 bdrv_dec_in_flight(bs); 2880 return co.ret; 2881 } 2882 2883 void *qemu_blockalign(BlockDriverState *bs, size_t size) 2884 { 2885 return qemu_memalign(bdrv_opt_mem_align(bs), size); 2886 } 2887 2888 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 2889 { 2890 return memset(qemu_blockalign(bs, size), 0, size); 2891 } 2892 2893 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 2894 { 2895 size_t align = bdrv_opt_mem_align(bs); 2896 2897 /* Ensure that NULL is never returned on success */ 2898 assert(align > 0); 2899 if (size == 0) { 2900 size = align; 2901 } 2902 2903 return qemu_try_memalign(align, size); 2904 } 2905 2906 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 2907 { 2908 void *mem = qemu_try_blockalign(bs, size); 2909 2910 if (mem) { 2911 memset(mem, 0, size); 2912 } 2913 2914 return mem; 2915 } 2916 2917 /* 2918 * Check if all memory in this vector is sector aligned. 2919 */ 2920 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 2921 { 2922 int i; 2923 size_t alignment = bdrv_min_mem_align(bs); 2924 2925 for (i = 0; i < qiov->niov; i++) { 2926 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 2927 return false; 2928 } 2929 if (qiov->iov[i].iov_len % alignment) { 2930 return false; 2931 } 2932 } 2933 2934 return true; 2935 } 2936 2937 void bdrv_add_before_write_notifier(BlockDriverState *bs, 2938 NotifierWithReturn *notifier) 2939 { 2940 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 2941 } 2942 2943 void bdrv_io_plug(BlockDriverState *bs) 2944 { 2945 BdrvChild *child; 2946 2947 QLIST_FOREACH(child, &bs->children, next) { 2948 bdrv_io_plug(child->bs); 2949 } 2950 2951 if (atomic_fetch_inc(&bs->io_plugged) == 0) { 2952 BlockDriver *drv = bs->drv; 2953 if (drv && drv->bdrv_io_plug) { 2954 drv->bdrv_io_plug(bs); 2955 } 2956 } 2957 } 2958 2959 void bdrv_io_unplug(BlockDriverState *bs) 2960 { 2961 BdrvChild *child; 2962 2963 assert(bs->io_plugged); 2964 if (atomic_fetch_dec(&bs->io_plugged) == 1) { 2965 BlockDriver *drv = bs->drv; 2966 if (drv && drv->bdrv_io_unplug) { 2967 drv->bdrv_io_unplug(bs); 2968 } 2969 } 2970 2971 QLIST_FOREACH(child, &bs->children, next) { 2972 bdrv_io_unplug(child->bs); 2973 } 2974 } 2975 2976 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) 2977 { 2978 BdrvChild *child; 2979 2980 if (bs->drv && bs->drv->bdrv_register_buf) { 2981 bs->drv->bdrv_register_buf(bs, host, size); 2982 } 2983 QLIST_FOREACH(child, &bs->children, next) { 2984 bdrv_register_buf(child->bs, host, size); 2985 } 2986 } 2987 2988 void bdrv_unregister_buf(BlockDriverState *bs, void *host) 2989 { 2990 BdrvChild *child; 2991 2992 if (bs->drv && bs->drv->bdrv_unregister_buf) { 2993 bs->drv->bdrv_unregister_buf(bs, host); 2994 } 2995 QLIST_FOREACH(child, &bs->children, next) { 2996 bdrv_unregister_buf(child->bs, host); 2997 } 2998 } 2999 3000 static int coroutine_fn bdrv_co_copy_range_internal( 3001 BdrvChild *src, uint64_t src_offset, BdrvChild *dst, 3002 uint64_t dst_offset, uint64_t bytes, 3003 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 3004 bool recurse_src) 3005 { 3006 BdrvTrackedRequest req; 3007 int ret; 3008 3009 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 3010 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 3011 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 3012 3013 if (!dst || !dst->bs) { 3014 return -ENOMEDIUM; 3015 } 3016 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes); 3017 if (ret) { 3018 return ret; 3019 } 3020 if (write_flags & BDRV_REQ_ZERO_WRITE) { 3021 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 3022 } 3023 3024 if (!src || !src->bs) { 3025 return -ENOMEDIUM; 3026 } 3027 ret = bdrv_check_byte_request(src->bs, src_offset, bytes); 3028 if (ret) { 3029 return ret; 3030 } 3031 3032 if (!src->bs->drv->bdrv_co_copy_range_from 3033 || !dst->bs->drv->bdrv_co_copy_range_to 3034 || src->bs->encrypted || dst->bs->encrypted) { 3035 return -ENOTSUP; 3036 } 3037 3038 if (recurse_src) { 3039 bdrv_inc_in_flight(src->bs); 3040 tracked_request_begin(&req, src->bs, src_offset, bytes, 3041 BDRV_TRACKED_READ); 3042 3043 /* BDRV_REQ_SERIALISING is only for write operation */ 3044 assert(!(read_flags & BDRV_REQ_SERIALISING)); 3045 if (!(read_flags & BDRV_REQ_NO_SERIALISING)) { 3046 wait_serialising_requests(&req); 3047 } 3048 3049 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 3050 src, src_offset, 3051 dst, dst_offset, 3052 bytes, 3053 read_flags, write_flags); 3054 3055 tracked_request_end(&req); 3056 bdrv_dec_in_flight(src->bs); 3057 } else { 3058 bdrv_inc_in_flight(dst->bs); 3059 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 3060 BDRV_TRACKED_WRITE); 3061 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 3062 write_flags); 3063 if (!ret) { 3064 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 3065 src, src_offset, 3066 dst, dst_offset, 3067 bytes, 3068 read_flags, write_flags); 3069 } 3070 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 3071 tracked_request_end(&req); 3072 bdrv_dec_in_flight(dst->bs); 3073 } 3074 3075 return ret; 3076 } 3077 3078 /* Copy range from @src to @dst. 3079 * 3080 * See the comment of bdrv_co_copy_range for the parameter and return value 3081 * semantics. */ 3082 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset, 3083 BdrvChild *dst, uint64_t dst_offset, 3084 uint64_t bytes, 3085 BdrvRequestFlags read_flags, 3086 BdrvRequestFlags write_flags) 3087 { 3088 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3089 read_flags, write_flags); 3090 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3091 bytes, read_flags, write_flags, true); 3092 } 3093 3094 /* Copy range from @src to @dst. 3095 * 3096 * See the comment of bdrv_co_copy_range for the parameter and return value 3097 * semantics. */ 3098 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset, 3099 BdrvChild *dst, uint64_t dst_offset, 3100 uint64_t bytes, 3101 BdrvRequestFlags read_flags, 3102 BdrvRequestFlags write_flags) 3103 { 3104 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3105 read_flags, write_flags); 3106 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3107 bytes, read_flags, write_flags, false); 3108 } 3109 3110 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, 3111 BdrvChild *dst, uint64_t dst_offset, 3112 uint64_t bytes, BdrvRequestFlags read_flags, 3113 BdrvRequestFlags write_flags) 3114 { 3115 return bdrv_co_copy_range_from(src, src_offset, 3116 dst, dst_offset, 3117 bytes, read_flags, write_flags); 3118 } 3119 3120 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3121 { 3122 BdrvChild *c; 3123 QLIST_FOREACH(c, &bs->parents, next_parent) { 3124 if (c->role->resize) { 3125 c->role->resize(c); 3126 } 3127 } 3128 } 3129 3130 /** 3131 * Truncate file to 'offset' bytes (needed only for file protocols) 3132 */ 3133 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, 3134 PreallocMode prealloc, Error **errp) 3135 { 3136 BlockDriverState *bs = child->bs; 3137 BlockDriver *drv = bs->drv; 3138 BdrvTrackedRequest req; 3139 int64_t old_size, new_bytes; 3140 int ret; 3141 3142 3143 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3144 if (!drv) { 3145 error_setg(errp, "No medium inserted"); 3146 return -ENOMEDIUM; 3147 } 3148 if (offset < 0) { 3149 error_setg(errp, "Image size cannot be negative"); 3150 return -EINVAL; 3151 } 3152 3153 old_size = bdrv_getlength(bs); 3154 if (old_size < 0) { 3155 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3156 return old_size; 3157 } 3158 3159 if (offset > old_size) { 3160 new_bytes = offset - old_size; 3161 } else { 3162 new_bytes = 0; 3163 } 3164 3165 bdrv_inc_in_flight(bs); 3166 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3167 BDRV_TRACKED_TRUNCATE); 3168 3169 /* If we are growing the image and potentially using preallocation for the 3170 * new area, we need to make sure that no write requests are made to it 3171 * concurrently or they might be overwritten by preallocation. */ 3172 if (new_bytes) { 3173 mark_request_serialising(&req, 1); 3174 } 3175 if (bs->read_only) { 3176 error_setg(errp, "Image is read-only"); 3177 ret = -EACCES; 3178 goto out; 3179 } 3180 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3181 0); 3182 if (ret < 0) { 3183 error_setg_errno(errp, -ret, 3184 "Failed to prepare request for truncation"); 3185 goto out; 3186 } 3187 3188 if (!drv->bdrv_co_truncate) { 3189 if (bs->file && drv->is_filter) { 3190 ret = bdrv_co_truncate(bs->file, offset, prealloc, errp); 3191 goto out; 3192 } 3193 error_setg(errp, "Image format driver does not support resize"); 3194 ret = -ENOTSUP; 3195 goto out; 3196 } 3197 3198 ret = drv->bdrv_co_truncate(bs, offset, prealloc, errp); 3199 if (ret < 0) { 3200 goto out; 3201 } 3202 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3203 if (ret < 0) { 3204 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3205 } else { 3206 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3207 } 3208 /* It's possible that truncation succeeded but refresh_total_sectors 3209 * failed, but the latter doesn't affect how we should finish the request. 3210 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */ 3211 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3212 3213 out: 3214 tracked_request_end(&req); 3215 bdrv_dec_in_flight(bs); 3216 3217 return ret; 3218 } 3219 3220 typedef struct TruncateCo { 3221 BdrvChild *child; 3222 int64_t offset; 3223 PreallocMode prealloc; 3224 Error **errp; 3225 int ret; 3226 } TruncateCo; 3227 3228 static void coroutine_fn bdrv_truncate_co_entry(void *opaque) 3229 { 3230 TruncateCo *tco = opaque; 3231 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc, 3232 tco->errp); 3233 aio_wait_kick(); 3234 } 3235 3236 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc, 3237 Error **errp) 3238 { 3239 Coroutine *co; 3240 TruncateCo tco = { 3241 .child = child, 3242 .offset = offset, 3243 .prealloc = prealloc, 3244 .errp = errp, 3245 .ret = NOT_DONE, 3246 }; 3247 3248 if (qemu_in_coroutine()) { 3249 /* Fast-path if already in coroutine context */ 3250 bdrv_truncate_co_entry(&tco); 3251 } else { 3252 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco); 3253 bdrv_coroutine_enter(child->bs, co); 3254 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE); 3255 } 3256 3257 return tco.ret; 3258 } 3259