1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "trace.h" 27 #include "sysemu/block-backend.h" 28 #include "block/aio-wait.h" 29 #include "block/blockjob.h" 30 #include "block/blockjob_int.h" 31 #include "block/block_int.h" 32 #include "qemu/cutils.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 36 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 37 38 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ 39 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) 40 41 static void bdrv_parent_cb_resize(BlockDriverState *bs); 42 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 43 int64_t offset, int bytes, BdrvRequestFlags flags); 44 45 void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 46 bool ignore_bds_parents) 47 { 48 BdrvChild *c, *next; 49 50 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 51 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 52 continue; 53 } 54 bdrv_parent_drained_begin_single(c, false); 55 } 56 } 57 58 void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 59 bool ignore_bds_parents) 60 { 61 BdrvChild *c, *next; 62 63 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 64 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 65 continue; 66 } 67 if (c->role->drained_end) { 68 c->role->drained_end(c); 69 } 70 } 71 } 72 73 static bool bdrv_parent_drained_poll_single(BdrvChild *c) 74 { 75 if (c->role->drained_poll) { 76 return c->role->drained_poll(c); 77 } 78 return false; 79 } 80 81 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, 82 bool ignore_bds_parents) 83 { 84 BdrvChild *c, *next; 85 bool busy = false; 86 87 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { 88 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { 89 continue; 90 } 91 busy |= bdrv_parent_drained_poll_single(c); 92 } 93 94 return busy; 95 } 96 97 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) 98 { 99 if (c->role->drained_begin) { 100 c->role->drained_begin(c); 101 } 102 if (poll) { 103 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); 104 } 105 } 106 107 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) 108 { 109 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); 110 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); 111 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, 112 src->opt_mem_alignment); 113 dst->min_mem_alignment = MAX(dst->min_mem_alignment, 114 src->min_mem_alignment); 115 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); 116 } 117 118 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 119 { 120 BlockDriver *drv = bs->drv; 121 Error *local_err = NULL; 122 123 memset(&bs->bl, 0, sizeof(bs->bl)); 124 125 if (!drv) { 126 return; 127 } 128 129 /* Default alignment based on whether driver has byte interface */ 130 bs->bl.request_alignment = (drv->bdrv_co_preadv || 131 drv->bdrv_aio_preadv) ? 1 : 512; 132 133 /* Take some limits from the children as a default */ 134 if (bs->file) { 135 bdrv_refresh_limits(bs->file->bs, &local_err); 136 if (local_err) { 137 error_propagate(errp, local_err); 138 return; 139 } 140 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl); 141 } else { 142 bs->bl.min_mem_alignment = 512; 143 bs->bl.opt_mem_alignment = getpagesize(); 144 145 /* Safe default since most protocols use readv()/writev()/etc */ 146 bs->bl.max_iov = IOV_MAX; 147 } 148 149 if (bs->backing) { 150 bdrv_refresh_limits(bs->backing->bs, &local_err); 151 if (local_err) { 152 error_propagate(errp, local_err); 153 return; 154 } 155 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl); 156 } 157 158 /* Then let the driver override it */ 159 if (drv->bdrv_refresh_limits) { 160 drv->bdrv_refresh_limits(bs, errp); 161 } 162 } 163 164 /** 165 * The copy-on-read flag is actually a reference count so multiple users may 166 * use the feature without worrying about clobbering its previous state. 167 * Copy-on-read stays enabled until all users have called to disable it. 168 */ 169 void bdrv_enable_copy_on_read(BlockDriverState *bs) 170 { 171 atomic_inc(&bs->copy_on_read); 172 } 173 174 void bdrv_disable_copy_on_read(BlockDriverState *bs) 175 { 176 int old = atomic_fetch_dec(&bs->copy_on_read); 177 assert(old >= 1); 178 } 179 180 typedef struct { 181 Coroutine *co; 182 BlockDriverState *bs; 183 bool done; 184 bool begin; 185 bool recursive; 186 bool poll; 187 BdrvChild *parent; 188 bool ignore_bds_parents; 189 } BdrvCoDrainData; 190 191 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) 192 { 193 BdrvCoDrainData *data = opaque; 194 BlockDriverState *bs = data->bs; 195 196 if (data->begin) { 197 bs->drv->bdrv_co_drain_begin(bs); 198 } else { 199 bs->drv->bdrv_co_drain_end(bs); 200 } 201 202 /* Set data->done before reading bs->wakeup. */ 203 atomic_mb_set(&data->done, true); 204 bdrv_dec_in_flight(bs); 205 206 if (data->begin) { 207 g_free(data); 208 } 209 } 210 211 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ 212 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) 213 { 214 BdrvCoDrainData *data; 215 216 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || 217 (!begin && !bs->drv->bdrv_co_drain_end)) { 218 return; 219 } 220 221 data = g_new(BdrvCoDrainData, 1); 222 *data = (BdrvCoDrainData) { 223 .bs = bs, 224 .done = false, 225 .begin = begin 226 }; 227 228 /* Make sure the driver callback completes during the polling phase for 229 * drain_begin. */ 230 bdrv_inc_in_flight(bs); 231 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); 232 aio_co_schedule(bdrv_get_aio_context(bs), data->co); 233 234 if (!begin) { 235 BDRV_POLL_WHILE(bs, !data->done); 236 g_free(data); 237 } 238 } 239 240 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ 241 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 242 BdrvChild *ignore_parent, bool ignore_bds_parents) 243 { 244 BdrvChild *child, *next; 245 246 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { 247 return true; 248 } 249 250 if (atomic_read(&bs->in_flight)) { 251 return true; 252 } 253 254 if (recursive) { 255 assert(!ignore_bds_parents); 256 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 257 if (bdrv_drain_poll(child->bs, recursive, child, false)) { 258 return true; 259 } 260 } 261 } 262 263 return false; 264 } 265 266 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, 267 BdrvChild *ignore_parent) 268 { 269 return bdrv_drain_poll(bs, recursive, ignore_parent, false); 270 } 271 272 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 273 BdrvChild *parent, bool ignore_bds_parents, 274 bool poll); 275 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 276 BdrvChild *parent, bool ignore_bds_parents); 277 278 static void bdrv_co_drain_bh_cb(void *opaque) 279 { 280 BdrvCoDrainData *data = opaque; 281 Coroutine *co = data->co; 282 BlockDriverState *bs = data->bs; 283 284 if (bs) { 285 AioContext *ctx = bdrv_get_aio_context(bs); 286 AioContext *co_ctx = qemu_coroutine_get_aio_context(co); 287 288 /* 289 * When the coroutine yielded, the lock for its home context was 290 * released, so we need to re-acquire it here. If it explicitly 291 * acquired a different context, the lock is still held and we don't 292 * want to lock it a second time (or AIO_WAIT_WHILE() would hang). 293 */ 294 if (ctx == co_ctx) { 295 aio_context_acquire(ctx); 296 } 297 bdrv_dec_in_flight(bs); 298 if (data->begin) { 299 bdrv_do_drained_begin(bs, data->recursive, data->parent, 300 data->ignore_bds_parents, data->poll); 301 } else { 302 bdrv_do_drained_end(bs, data->recursive, data->parent, 303 data->ignore_bds_parents); 304 } 305 if (ctx == co_ctx) { 306 aio_context_release(ctx); 307 } 308 } else { 309 assert(data->begin); 310 bdrv_drain_all_begin(); 311 } 312 313 data->done = true; 314 aio_co_wake(co); 315 } 316 317 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, 318 bool begin, bool recursive, 319 BdrvChild *parent, 320 bool ignore_bds_parents, 321 bool poll) 322 { 323 BdrvCoDrainData data; 324 325 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and 326 * other coroutines run if they were queued by aio_co_enter(). */ 327 328 assert(qemu_in_coroutine()); 329 data = (BdrvCoDrainData) { 330 .co = qemu_coroutine_self(), 331 .bs = bs, 332 .done = false, 333 .begin = begin, 334 .recursive = recursive, 335 .parent = parent, 336 .ignore_bds_parents = ignore_bds_parents, 337 .poll = poll, 338 }; 339 if (bs) { 340 bdrv_inc_in_flight(bs); 341 } 342 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), 343 bdrv_co_drain_bh_cb, &data); 344 345 qemu_coroutine_yield(); 346 /* If we are resumed from some other event (such as an aio completion or a 347 * timer callback), it is a bug in the caller that should be fixed. */ 348 assert(data.done); 349 } 350 351 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 352 BdrvChild *parent, bool ignore_bds_parents) 353 { 354 assert(!qemu_in_coroutine()); 355 356 /* Stop things in parent-to-child order */ 357 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) { 358 aio_disable_external(bdrv_get_aio_context(bs)); 359 } 360 361 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); 362 bdrv_drain_invoke(bs, true); 363 } 364 365 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 366 BdrvChild *parent, bool ignore_bds_parents, 367 bool poll) 368 { 369 BdrvChild *child, *next; 370 371 if (qemu_in_coroutine()) { 372 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, 373 poll); 374 return; 375 } 376 377 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); 378 379 if (recursive) { 380 assert(!ignore_bds_parents); 381 bs->recursive_quiesce_counter++; 382 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 383 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, 384 false); 385 } 386 } 387 388 /* 389 * Wait for drained requests to finish. 390 * 391 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The 392 * call is needed so things in this AioContext can make progress even 393 * though we don't return to the main AioContext loop - this automatically 394 * includes other nodes in the same AioContext and therefore all child 395 * nodes. 396 */ 397 if (poll) { 398 assert(!ignore_bds_parents); 399 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); 400 } 401 } 402 403 void bdrv_drained_begin(BlockDriverState *bs) 404 { 405 bdrv_do_drained_begin(bs, false, NULL, false, true); 406 } 407 408 void bdrv_subtree_drained_begin(BlockDriverState *bs) 409 { 410 bdrv_do_drained_begin(bs, true, NULL, false, true); 411 } 412 413 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 414 BdrvChild *parent, bool ignore_bds_parents) 415 { 416 BdrvChild *child, *next; 417 int old_quiesce_counter; 418 419 if (qemu_in_coroutine()) { 420 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, 421 false); 422 return; 423 } 424 assert(bs->quiesce_counter > 0); 425 426 /* Re-enable things in child-to-parent order */ 427 bdrv_drain_invoke(bs, false); 428 bdrv_parent_drained_end(bs, parent, ignore_bds_parents); 429 430 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter); 431 if (old_quiesce_counter == 1) { 432 aio_enable_external(bdrv_get_aio_context(bs)); 433 } 434 435 if (recursive) { 436 assert(!ignore_bds_parents); 437 bs->recursive_quiesce_counter--; 438 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 439 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents); 440 } 441 } 442 } 443 444 void bdrv_drained_end(BlockDriverState *bs) 445 { 446 bdrv_do_drained_end(bs, false, NULL, false); 447 } 448 449 void bdrv_subtree_drained_end(BlockDriverState *bs) 450 { 451 bdrv_do_drained_end(bs, true, NULL, false); 452 } 453 454 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) 455 { 456 int i; 457 458 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { 459 bdrv_do_drained_begin(child->bs, true, child, false, true); 460 } 461 } 462 463 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) 464 { 465 int i; 466 467 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { 468 bdrv_do_drained_end(child->bs, true, child, false); 469 } 470 } 471 472 /* 473 * Wait for pending requests to complete on a single BlockDriverState subtree, 474 * and suspend block driver's internal I/O until next request arrives. 475 * 476 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 477 * AioContext. 478 */ 479 void coroutine_fn bdrv_co_drain(BlockDriverState *bs) 480 { 481 assert(qemu_in_coroutine()); 482 bdrv_drained_begin(bs); 483 bdrv_drained_end(bs); 484 } 485 486 void bdrv_drain(BlockDriverState *bs) 487 { 488 bdrv_drained_begin(bs); 489 bdrv_drained_end(bs); 490 } 491 492 static void bdrv_drain_assert_idle(BlockDriverState *bs) 493 { 494 BdrvChild *child, *next; 495 496 assert(atomic_read(&bs->in_flight) == 0); 497 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 498 bdrv_drain_assert_idle(child->bs); 499 } 500 } 501 502 unsigned int bdrv_drain_all_count = 0; 503 504 static bool bdrv_drain_all_poll(void) 505 { 506 BlockDriverState *bs = NULL; 507 bool result = false; 508 509 /* bdrv_drain_poll() can't make changes to the graph and we are holding the 510 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ 511 while ((bs = bdrv_next_all_states(bs))) { 512 AioContext *aio_context = bdrv_get_aio_context(bs); 513 aio_context_acquire(aio_context); 514 result |= bdrv_drain_poll(bs, false, NULL, true); 515 aio_context_release(aio_context); 516 } 517 518 return result; 519 } 520 521 /* 522 * Wait for pending requests to complete across all BlockDriverStates 523 * 524 * This function does not flush data to disk, use bdrv_flush_all() for that 525 * after calling this function. 526 * 527 * This pauses all block jobs and disables external clients. It must 528 * be paired with bdrv_drain_all_end(). 529 * 530 * NOTE: no new block jobs or BlockDriverStates can be created between 531 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. 532 */ 533 void bdrv_drain_all_begin(void) 534 { 535 BlockDriverState *bs = NULL; 536 537 if (qemu_in_coroutine()) { 538 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true); 539 return; 540 } 541 542 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main 543 * loop AioContext, so make sure we're in the main context. */ 544 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 545 assert(bdrv_drain_all_count < INT_MAX); 546 bdrv_drain_all_count++; 547 548 /* Quiesce all nodes, without polling in-flight requests yet. The graph 549 * cannot change during this loop. */ 550 while ((bs = bdrv_next_all_states(bs))) { 551 AioContext *aio_context = bdrv_get_aio_context(bs); 552 553 aio_context_acquire(aio_context); 554 bdrv_do_drained_begin(bs, false, NULL, true, false); 555 aio_context_release(aio_context); 556 } 557 558 /* Now poll the in-flight requests */ 559 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); 560 561 while ((bs = bdrv_next_all_states(bs))) { 562 bdrv_drain_assert_idle(bs); 563 } 564 } 565 566 void bdrv_drain_all_end(void) 567 { 568 BlockDriverState *bs = NULL; 569 570 while ((bs = bdrv_next_all_states(bs))) { 571 AioContext *aio_context = bdrv_get_aio_context(bs); 572 573 aio_context_acquire(aio_context); 574 bdrv_do_drained_end(bs, false, NULL, true); 575 aio_context_release(aio_context); 576 } 577 578 assert(bdrv_drain_all_count > 0); 579 bdrv_drain_all_count--; 580 } 581 582 void bdrv_drain_all(void) 583 { 584 bdrv_drain_all_begin(); 585 bdrv_drain_all_end(); 586 } 587 588 /** 589 * Remove an active request from the tracked requests list 590 * 591 * This function should be called when a tracked request is completing. 592 */ 593 static void tracked_request_end(BdrvTrackedRequest *req) 594 { 595 if (req->serialising) { 596 atomic_dec(&req->bs->serialising_in_flight); 597 } 598 599 qemu_co_mutex_lock(&req->bs->reqs_lock); 600 QLIST_REMOVE(req, list); 601 qemu_co_queue_restart_all(&req->wait_queue); 602 qemu_co_mutex_unlock(&req->bs->reqs_lock); 603 } 604 605 /** 606 * Add an active request to the tracked requests list 607 */ 608 static void tracked_request_begin(BdrvTrackedRequest *req, 609 BlockDriverState *bs, 610 int64_t offset, 611 uint64_t bytes, 612 enum BdrvTrackedRequestType type) 613 { 614 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes); 615 616 *req = (BdrvTrackedRequest){ 617 .bs = bs, 618 .offset = offset, 619 .bytes = bytes, 620 .type = type, 621 .co = qemu_coroutine_self(), 622 .serialising = false, 623 .overlap_offset = offset, 624 .overlap_bytes = bytes, 625 }; 626 627 qemu_co_queue_init(&req->wait_queue); 628 629 qemu_co_mutex_lock(&bs->reqs_lock); 630 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 631 qemu_co_mutex_unlock(&bs->reqs_lock); 632 } 633 634 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 635 { 636 int64_t overlap_offset = req->offset & ~(align - 1); 637 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 638 - overlap_offset; 639 640 if (!req->serialising) { 641 atomic_inc(&req->bs->serialising_in_flight); 642 req->serialising = true; 643 } 644 645 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 646 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 647 } 648 649 static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req) 650 { 651 /* 652 * If the request is serialising, overlap_offset and overlap_bytes are set, 653 * so we can check if the request is aligned. Otherwise, don't care and 654 * return false. 655 */ 656 657 return req->serialising && (req->offset == req->overlap_offset) && 658 (req->bytes == req->overlap_bytes); 659 } 660 661 /** 662 * Round a region to cluster boundaries 663 */ 664 void bdrv_round_to_clusters(BlockDriverState *bs, 665 int64_t offset, int64_t bytes, 666 int64_t *cluster_offset, 667 int64_t *cluster_bytes) 668 { 669 BlockDriverInfo bdi; 670 671 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 672 *cluster_offset = offset; 673 *cluster_bytes = bytes; 674 } else { 675 int64_t c = bdi.cluster_size; 676 *cluster_offset = QEMU_ALIGN_DOWN(offset, c); 677 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); 678 } 679 } 680 681 static int bdrv_get_cluster_size(BlockDriverState *bs) 682 { 683 BlockDriverInfo bdi; 684 int ret; 685 686 ret = bdrv_get_info(bs, &bdi); 687 if (ret < 0 || bdi.cluster_size == 0) { 688 return bs->bl.request_alignment; 689 } else { 690 return bdi.cluster_size; 691 } 692 } 693 694 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 695 int64_t offset, uint64_t bytes) 696 { 697 /* aaaa bbbb */ 698 if (offset >= req->overlap_offset + req->overlap_bytes) { 699 return false; 700 } 701 /* bbbb aaaa */ 702 if (req->overlap_offset >= offset + bytes) { 703 return false; 704 } 705 return true; 706 } 707 708 void bdrv_inc_in_flight(BlockDriverState *bs) 709 { 710 atomic_inc(&bs->in_flight); 711 } 712 713 void bdrv_wakeup(BlockDriverState *bs) 714 { 715 aio_wait_kick(); 716 } 717 718 void bdrv_dec_in_flight(BlockDriverState *bs) 719 { 720 atomic_dec(&bs->in_flight); 721 bdrv_wakeup(bs); 722 } 723 724 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 725 { 726 BlockDriverState *bs = self->bs; 727 BdrvTrackedRequest *req; 728 bool retry; 729 bool waited = false; 730 731 if (!atomic_read(&bs->serialising_in_flight)) { 732 return false; 733 } 734 735 do { 736 retry = false; 737 qemu_co_mutex_lock(&bs->reqs_lock); 738 QLIST_FOREACH(req, &bs->tracked_requests, list) { 739 if (req == self || (!req->serialising && !self->serialising)) { 740 continue; 741 } 742 if (tracked_request_overlaps(req, self->overlap_offset, 743 self->overlap_bytes)) 744 { 745 /* Hitting this means there was a reentrant request, for 746 * example, a block driver issuing nested requests. This must 747 * never happen since it means deadlock. 748 */ 749 assert(qemu_coroutine_self() != req->co); 750 751 /* If the request is already (indirectly) waiting for us, or 752 * will wait for us as soon as it wakes up, then just go on 753 * (instead of producing a deadlock in the former case). */ 754 if (!req->waiting_for) { 755 self->waiting_for = req; 756 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); 757 self->waiting_for = NULL; 758 retry = true; 759 waited = true; 760 break; 761 } 762 } 763 } 764 qemu_co_mutex_unlock(&bs->reqs_lock); 765 } while (retry); 766 767 return waited; 768 } 769 770 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 771 size_t size) 772 { 773 if (size > BDRV_REQUEST_MAX_BYTES) { 774 return -EIO; 775 } 776 777 if (!bdrv_is_inserted(bs)) { 778 return -ENOMEDIUM; 779 } 780 781 if (offset < 0) { 782 return -EIO; 783 } 784 785 return 0; 786 } 787 788 typedef struct RwCo { 789 BdrvChild *child; 790 int64_t offset; 791 QEMUIOVector *qiov; 792 bool is_write; 793 int ret; 794 BdrvRequestFlags flags; 795 } RwCo; 796 797 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 798 { 799 RwCo *rwco = opaque; 800 801 if (!rwco->is_write) { 802 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset, 803 rwco->qiov->size, rwco->qiov, 804 rwco->flags); 805 } else { 806 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset, 807 rwco->qiov->size, rwco->qiov, 808 rwco->flags); 809 } 810 aio_wait_kick(); 811 } 812 813 /* 814 * Process a vectored synchronous request using coroutines 815 */ 816 static int bdrv_prwv_co(BdrvChild *child, int64_t offset, 817 QEMUIOVector *qiov, bool is_write, 818 BdrvRequestFlags flags) 819 { 820 Coroutine *co; 821 RwCo rwco = { 822 .child = child, 823 .offset = offset, 824 .qiov = qiov, 825 .is_write = is_write, 826 .ret = NOT_DONE, 827 .flags = flags, 828 }; 829 830 if (qemu_in_coroutine()) { 831 /* Fast-path if already in coroutine context */ 832 bdrv_rw_co_entry(&rwco); 833 } else { 834 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco); 835 bdrv_coroutine_enter(child->bs, co); 836 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE); 837 } 838 return rwco.ret; 839 } 840 841 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 842 int bytes, BdrvRequestFlags flags) 843 { 844 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes); 845 846 return bdrv_prwv_co(child, offset, &qiov, true, 847 BDRV_REQ_ZERO_WRITE | flags); 848 } 849 850 /* 851 * Completely zero out a block device with the help of bdrv_pwrite_zeroes. 852 * The operation is sped up by checking the block status and only writing 853 * zeroes to the device if they currently do not return zeroes. Optional 854 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, 855 * BDRV_REQ_FUA). 856 * 857 * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 858 */ 859 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) 860 { 861 int ret; 862 int64_t target_size, bytes, offset = 0; 863 BlockDriverState *bs = child->bs; 864 865 target_size = bdrv_getlength(bs); 866 if (target_size < 0) { 867 return target_size; 868 } 869 870 for (;;) { 871 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); 872 if (bytes <= 0) { 873 return 0; 874 } 875 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); 876 if (ret < 0) { 877 return ret; 878 } 879 if (ret & BDRV_BLOCK_ZERO) { 880 offset += bytes; 881 continue; 882 } 883 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); 884 if (ret < 0) { 885 return ret; 886 } 887 offset += bytes; 888 } 889 } 890 891 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 892 { 893 int ret; 894 895 ret = bdrv_prwv_co(child, offset, qiov, false, 0); 896 if (ret < 0) { 897 return ret; 898 } 899 900 return qiov->size; 901 } 902 903 /* See bdrv_pwrite() for the return codes */ 904 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) 905 { 906 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 907 908 if (bytes < 0) { 909 return -EINVAL; 910 } 911 912 return bdrv_preadv(child, offset, &qiov); 913 } 914 915 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) 916 { 917 int ret; 918 919 ret = bdrv_prwv_co(child, offset, qiov, true, 0); 920 if (ret < 0) { 921 return ret; 922 } 923 924 return qiov->size; 925 } 926 927 /* Return no. of bytes on success or < 0 on error. Important errors are: 928 -EIO generic I/O error (may happen for all errors) 929 -ENOMEDIUM No media inserted. 930 -EINVAL Invalid offset or number of bytes 931 -EACCES Trying to write a read-only device 932 */ 933 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes) 934 { 935 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 936 937 if (bytes < 0) { 938 return -EINVAL; 939 } 940 941 return bdrv_pwritev(child, offset, &qiov); 942 } 943 944 /* 945 * Writes to the file and ensures that no writes are reordered across this 946 * request (acts as a barrier) 947 * 948 * Returns 0 on success, -errno in error cases. 949 */ 950 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 951 const void *buf, int count) 952 { 953 int ret; 954 955 ret = bdrv_pwrite(child, offset, buf, count); 956 if (ret < 0) { 957 return ret; 958 } 959 960 ret = bdrv_flush(child->bs); 961 if (ret < 0) { 962 return ret; 963 } 964 965 return 0; 966 } 967 968 typedef struct CoroutineIOCompletion { 969 Coroutine *coroutine; 970 int ret; 971 } CoroutineIOCompletion; 972 973 static void bdrv_co_io_em_complete(void *opaque, int ret) 974 { 975 CoroutineIOCompletion *co = opaque; 976 977 co->ret = ret; 978 aio_co_wake(co->coroutine); 979 } 980 981 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, 982 uint64_t offset, uint64_t bytes, 983 QEMUIOVector *qiov, int flags) 984 { 985 BlockDriver *drv = bs->drv; 986 int64_t sector_num; 987 unsigned int nb_sectors; 988 989 assert(!(flags & ~BDRV_REQ_MASK)); 990 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 991 992 if (!drv) { 993 return -ENOMEDIUM; 994 } 995 996 if (drv->bdrv_co_preadv) { 997 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); 998 } 999 1000 if (drv->bdrv_aio_preadv) { 1001 BlockAIOCB *acb; 1002 CoroutineIOCompletion co = { 1003 .coroutine = qemu_coroutine_self(), 1004 }; 1005 1006 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, 1007 bdrv_co_io_em_complete, &co); 1008 if (acb == NULL) { 1009 return -EIO; 1010 } else { 1011 qemu_coroutine_yield(); 1012 return co.ret; 1013 } 1014 } 1015 1016 sector_num = offset >> BDRV_SECTOR_BITS; 1017 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1018 1019 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1020 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1021 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1022 assert(drv->bdrv_co_readv); 1023 1024 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1025 } 1026 1027 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, 1028 uint64_t offset, uint64_t bytes, 1029 QEMUIOVector *qiov, int flags) 1030 { 1031 BlockDriver *drv = bs->drv; 1032 int64_t sector_num; 1033 unsigned int nb_sectors; 1034 int ret; 1035 1036 assert(!(flags & ~BDRV_REQ_MASK)); 1037 assert(!(flags & BDRV_REQ_NO_FALLBACK)); 1038 1039 if (!drv) { 1040 return -ENOMEDIUM; 1041 } 1042 1043 if (drv->bdrv_co_pwritev) { 1044 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, 1045 flags & bs->supported_write_flags); 1046 flags &= ~bs->supported_write_flags; 1047 goto emulate_flags; 1048 } 1049 1050 if (drv->bdrv_aio_pwritev) { 1051 BlockAIOCB *acb; 1052 CoroutineIOCompletion co = { 1053 .coroutine = qemu_coroutine_self(), 1054 }; 1055 1056 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, 1057 flags & bs->supported_write_flags, 1058 bdrv_co_io_em_complete, &co); 1059 flags &= ~bs->supported_write_flags; 1060 if (acb == NULL) { 1061 ret = -EIO; 1062 } else { 1063 qemu_coroutine_yield(); 1064 ret = co.ret; 1065 } 1066 goto emulate_flags; 1067 } 1068 1069 sector_num = offset >> BDRV_SECTOR_BITS; 1070 nb_sectors = bytes >> BDRV_SECTOR_BITS; 1071 1072 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1073 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1074 assert(bytes <= BDRV_REQUEST_MAX_BYTES); 1075 1076 assert(drv->bdrv_co_writev); 1077 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, 1078 flags & bs->supported_write_flags); 1079 flags &= ~bs->supported_write_flags; 1080 1081 emulate_flags: 1082 if (ret == 0 && (flags & BDRV_REQ_FUA)) { 1083 ret = bdrv_co_flush(bs); 1084 } 1085 1086 return ret; 1087 } 1088 1089 static int coroutine_fn 1090 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 1091 uint64_t bytes, QEMUIOVector *qiov) 1092 { 1093 BlockDriver *drv = bs->drv; 1094 1095 if (!drv) { 1096 return -ENOMEDIUM; 1097 } 1098 1099 if (!drv->bdrv_co_pwritev_compressed) { 1100 return -ENOTSUP; 1101 } 1102 1103 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); 1104 } 1105 1106 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, 1107 int64_t offset, unsigned int bytes, QEMUIOVector *qiov) 1108 { 1109 BlockDriverState *bs = child->bs; 1110 1111 /* Perform I/O through a temporary buffer so that users who scribble over 1112 * their read buffer while the operation is in progress do not end up 1113 * modifying the image file. This is critical for zero-copy guest I/O 1114 * where anything might happen inside guest memory. 1115 */ 1116 void *bounce_buffer; 1117 1118 BlockDriver *drv = bs->drv; 1119 QEMUIOVector local_qiov; 1120 int64_t cluster_offset; 1121 int64_t cluster_bytes; 1122 size_t skip_bytes; 1123 int ret; 1124 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, 1125 BDRV_REQUEST_MAX_BYTES); 1126 unsigned int progress = 0; 1127 1128 if (!drv) { 1129 return -ENOMEDIUM; 1130 } 1131 1132 /* FIXME We cannot require callers to have write permissions when all they 1133 * are doing is a read request. If we did things right, write permissions 1134 * would be obtained anyway, but internally by the copy-on-read code. As 1135 * long as it is implemented here rather than in a separate filter driver, 1136 * the copy-on-read code doesn't have its own BdrvChild, however, for which 1137 * it could request permissions. Therefore we have to bypass the permission 1138 * system for the moment. */ 1139 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1140 1141 /* Cover entire cluster so no additional backing file I/O is required when 1142 * allocating cluster in the image file. Note that this value may exceed 1143 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which 1144 * is one reason we loop rather than doing it all at once. 1145 */ 1146 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); 1147 skip_bytes = offset - cluster_offset; 1148 1149 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, 1150 cluster_offset, cluster_bytes); 1151 1152 bounce_buffer = qemu_try_blockalign(bs, 1153 MIN(MIN(max_transfer, cluster_bytes), 1154 MAX_BOUNCE_BUFFER)); 1155 if (bounce_buffer == NULL) { 1156 ret = -ENOMEM; 1157 goto err; 1158 } 1159 1160 while (cluster_bytes) { 1161 int64_t pnum; 1162 1163 ret = bdrv_is_allocated(bs, cluster_offset, 1164 MIN(cluster_bytes, max_transfer), &pnum); 1165 if (ret < 0) { 1166 /* Safe to treat errors in querying allocation as if 1167 * unallocated; we'll probably fail again soon on the 1168 * read, but at least that will set a decent errno. 1169 */ 1170 pnum = MIN(cluster_bytes, max_transfer); 1171 } 1172 1173 /* Stop at EOF if the image ends in the middle of the cluster */ 1174 if (ret == 0 && pnum == 0) { 1175 assert(progress >= bytes); 1176 break; 1177 } 1178 1179 assert(skip_bytes < pnum); 1180 1181 if (ret <= 0) { 1182 /* Must copy-on-read; use the bounce buffer */ 1183 pnum = MIN(pnum, MAX_BOUNCE_BUFFER); 1184 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); 1185 1186 ret = bdrv_driver_preadv(bs, cluster_offset, pnum, 1187 &local_qiov, 0); 1188 if (ret < 0) { 1189 goto err; 1190 } 1191 1192 bdrv_debug_event(bs, BLKDBG_COR_WRITE); 1193 if (drv->bdrv_co_pwrite_zeroes && 1194 buffer_is_zero(bounce_buffer, pnum)) { 1195 /* FIXME: Should we (perhaps conditionally) be setting 1196 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy 1197 * that still correctly reads as zero? */ 1198 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 1199 BDRV_REQ_WRITE_UNCHANGED); 1200 } else { 1201 /* This does not change the data on the disk, it is not 1202 * necessary to flush even in cache=writethrough mode. 1203 */ 1204 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, 1205 &local_qiov, 1206 BDRV_REQ_WRITE_UNCHANGED); 1207 } 1208 1209 if (ret < 0) { 1210 /* It might be okay to ignore write errors for guest 1211 * requests. If this is a deliberate copy-on-read 1212 * then we don't want to ignore the error. Simply 1213 * report it in all cases. 1214 */ 1215 goto err; 1216 } 1217 1218 qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes, 1219 pnum - skip_bytes); 1220 } else { 1221 /* Read directly into the destination */ 1222 qemu_iovec_init(&local_qiov, qiov->niov); 1223 qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes); 1224 ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size, 1225 &local_qiov, 0); 1226 qemu_iovec_destroy(&local_qiov); 1227 if (ret < 0) { 1228 goto err; 1229 } 1230 } 1231 1232 cluster_offset += pnum; 1233 cluster_bytes -= pnum; 1234 progress += pnum - skip_bytes; 1235 skip_bytes = 0; 1236 } 1237 ret = 0; 1238 1239 err: 1240 qemu_vfree(bounce_buffer); 1241 return ret; 1242 } 1243 1244 /* 1245 * Forwards an already correctly aligned request to the BlockDriver. This 1246 * handles copy on read, zeroing after EOF, and fragmentation of large 1247 * reads; any other features must be implemented by the caller. 1248 */ 1249 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, 1250 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1251 int64_t align, QEMUIOVector *qiov, int flags) 1252 { 1253 BlockDriverState *bs = child->bs; 1254 int64_t total_bytes, max_bytes; 1255 int ret = 0; 1256 uint64_t bytes_remaining = bytes; 1257 int max_transfer; 1258 1259 assert(is_power_of_2(align)); 1260 assert((offset & (align - 1)) == 0); 1261 assert((bytes & (align - 1)) == 0); 1262 assert(!qiov || bytes == qiov->size); 1263 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1264 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1265 align); 1266 1267 /* TODO: We would need a per-BDS .supported_read_flags and 1268 * potential fallback support, if we ever implement any read flags 1269 * to pass through to drivers. For now, there aren't any 1270 * passthrough flags. */ 1271 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ))); 1272 1273 /* Handle Copy on Read and associated serialisation */ 1274 if (flags & BDRV_REQ_COPY_ON_READ) { 1275 /* If we touch the same cluster it counts as an overlap. This 1276 * guarantees that allocating writes will be serialized and not race 1277 * with each other for the same cluster. For example, in copy-on-read 1278 * it ensures that the CoR read and write operations are atomic and 1279 * guest writes cannot interleave between them. */ 1280 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 1281 } 1282 1283 /* BDRV_REQ_SERIALISING is only for write operation */ 1284 assert(!(flags & BDRV_REQ_SERIALISING)); 1285 1286 if (!(flags & BDRV_REQ_NO_SERIALISING)) { 1287 wait_serialising_requests(req); 1288 } 1289 1290 if (flags & BDRV_REQ_COPY_ON_READ) { 1291 int64_t pnum; 1292 1293 ret = bdrv_is_allocated(bs, offset, bytes, &pnum); 1294 if (ret < 0) { 1295 goto out; 1296 } 1297 1298 if (!ret || pnum != bytes) { 1299 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov); 1300 goto out; 1301 } 1302 } 1303 1304 /* Forward the request to the BlockDriver, possibly fragmenting it */ 1305 total_bytes = bdrv_getlength(bs); 1306 if (total_bytes < 0) { 1307 ret = total_bytes; 1308 goto out; 1309 } 1310 1311 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); 1312 if (bytes <= max_bytes && bytes <= max_transfer) { 1313 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0); 1314 goto out; 1315 } 1316 1317 while (bytes_remaining) { 1318 int num; 1319 1320 if (max_bytes) { 1321 QEMUIOVector local_qiov; 1322 1323 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); 1324 assert(num); 1325 qemu_iovec_init(&local_qiov, qiov->niov); 1326 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num); 1327 1328 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, 1329 num, &local_qiov, 0); 1330 max_bytes -= num; 1331 qemu_iovec_destroy(&local_qiov); 1332 } else { 1333 num = bytes_remaining; 1334 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0, 1335 bytes_remaining); 1336 } 1337 if (ret < 0) { 1338 goto out; 1339 } 1340 bytes_remaining -= num; 1341 } 1342 1343 out: 1344 return ret < 0 ? ret : 0; 1345 } 1346 1347 /* 1348 * Handle a read request in coroutine context 1349 */ 1350 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 1351 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1352 BdrvRequestFlags flags) 1353 { 1354 BlockDriverState *bs = child->bs; 1355 BlockDriver *drv = bs->drv; 1356 BdrvTrackedRequest req; 1357 1358 uint64_t align = bs->bl.request_alignment; 1359 uint8_t *head_buf = NULL; 1360 uint8_t *tail_buf = NULL; 1361 QEMUIOVector local_qiov; 1362 bool use_local_qiov = false; 1363 int ret; 1364 1365 trace_bdrv_co_preadv(child->bs, offset, bytes, flags); 1366 1367 if (!drv) { 1368 return -ENOMEDIUM; 1369 } 1370 1371 ret = bdrv_check_byte_request(bs, offset, bytes); 1372 if (ret < 0) { 1373 return ret; 1374 } 1375 1376 bdrv_inc_in_flight(bs); 1377 1378 /* Don't do copy-on-read if we read data before write operation */ 1379 if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) { 1380 flags |= BDRV_REQ_COPY_ON_READ; 1381 } 1382 1383 /* Align read if necessary by padding qiov */ 1384 if (offset & (align - 1)) { 1385 head_buf = qemu_blockalign(bs, align); 1386 qemu_iovec_init(&local_qiov, qiov->niov + 2); 1387 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 1388 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1389 use_local_qiov = true; 1390 1391 bytes += offset & (align - 1); 1392 offset = offset & ~(align - 1); 1393 } 1394 1395 if ((offset + bytes) & (align - 1)) { 1396 if (!use_local_qiov) { 1397 qemu_iovec_init(&local_qiov, qiov->niov + 1); 1398 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1399 use_local_qiov = true; 1400 } 1401 tail_buf = qemu_blockalign(bs, align); 1402 qemu_iovec_add(&local_qiov, tail_buf, 1403 align - ((offset + bytes) & (align - 1))); 1404 1405 bytes = ROUND_UP(bytes, align); 1406 } 1407 1408 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); 1409 ret = bdrv_aligned_preadv(child, &req, offset, bytes, align, 1410 use_local_qiov ? &local_qiov : qiov, 1411 flags); 1412 tracked_request_end(&req); 1413 bdrv_dec_in_flight(bs); 1414 1415 if (use_local_qiov) { 1416 qemu_iovec_destroy(&local_qiov); 1417 qemu_vfree(head_buf); 1418 qemu_vfree(tail_buf); 1419 } 1420 1421 return ret; 1422 } 1423 1424 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, 1425 int64_t offset, int bytes, BdrvRequestFlags flags) 1426 { 1427 BlockDriver *drv = bs->drv; 1428 QEMUIOVector qiov; 1429 void *buf = NULL; 1430 int ret = 0; 1431 bool need_flush = false; 1432 int head = 0; 1433 int tail = 0; 1434 1435 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); 1436 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, 1437 bs->bl.request_alignment); 1438 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); 1439 1440 if (!drv) { 1441 return -ENOMEDIUM; 1442 } 1443 1444 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { 1445 return -ENOTSUP; 1446 } 1447 1448 assert(alignment % bs->bl.request_alignment == 0); 1449 head = offset % alignment; 1450 tail = (offset + bytes) % alignment; 1451 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); 1452 assert(max_write_zeroes >= bs->bl.request_alignment); 1453 1454 while (bytes > 0 && !ret) { 1455 int num = bytes; 1456 1457 /* Align request. Block drivers can expect the "bulk" of the request 1458 * to be aligned, and that unaligned requests do not cross cluster 1459 * boundaries. 1460 */ 1461 if (head) { 1462 /* Make a small request up to the first aligned sector. For 1463 * convenience, limit this request to max_transfer even if 1464 * we don't need to fall back to writes. */ 1465 num = MIN(MIN(bytes, max_transfer), alignment - head); 1466 head = (head + num) % alignment; 1467 assert(num < max_write_zeroes); 1468 } else if (tail && num > alignment) { 1469 /* Shorten the request to the last aligned sector. */ 1470 num -= tail; 1471 } 1472 1473 /* limit request size */ 1474 if (num > max_write_zeroes) { 1475 num = max_write_zeroes; 1476 } 1477 1478 ret = -ENOTSUP; 1479 /* First try the efficient write zeroes operation */ 1480 if (drv->bdrv_co_pwrite_zeroes) { 1481 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, 1482 flags & bs->supported_zero_flags); 1483 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && 1484 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { 1485 need_flush = true; 1486 } 1487 } else { 1488 assert(!bs->supported_zero_flags); 1489 } 1490 1491 if (ret < 0 && !(flags & BDRV_REQ_NO_FALLBACK)) { 1492 /* Fall back to bounce buffer if write zeroes is unsupported */ 1493 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; 1494 1495 if ((flags & BDRV_REQ_FUA) && 1496 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1497 /* No need for bdrv_driver_pwrite() to do a fallback 1498 * flush on each chunk; use just one at the end */ 1499 write_flags &= ~BDRV_REQ_FUA; 1500 need_flush = true; 1501 } 1502 num = MIN(num, max_transfer); 1503 if (buf == NULL) { 1504 buf = qemu_try_blockalign0(bs, num); 1505 if (buf == NULL) { 1506 ret = -ENOMEM; 1507 goto fail; 1508 } 1509 } 1510 qemu_iovec_init_buf(&qiov, buf, num); 1511 1512 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); 1513 1514 /* Keep bounce buffer around if it is big enough for all 1515 * all future requests. 1516 */ 1517 if (num < max_transfer) { 1518 qemu_vfree(buf); 1519 buf = NULL; 1520 } 1521 } 1522 1523 offset += num; 1524 bytes -= num; 1525 } 1526 1527 fail: 1528 if (ret == 0 && need_flush) { 1529 ret = bdrv_co_flush(bs); 1530 } 1531 qemu_vfree(buf); 1532 return ret; 1533 } 1534 1535 static inline int coroutine_fn 1536 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, 1537 BdrvTrackedRequest *req, int flags) 1538 { 1539 BlockDriverState *bs = child->bs; 1540 bool waited; 1541 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1542 1543 if (bs->read_only) { 1544 return -EPERM; 1545 } 1546 1547 /* BDRV_REQ_NO_SERIALISING is only for read operation */ 1548 assert(!(flags & BDRV_REQ_NO_SERIALISING)); 1549 assert(!(bs->open_flags & BDRV_O_INACTIVE)); 1550 assert((bs->open_flags & BDRV_O_NO_IO) == 0); 1551 assert(!(flags & ~BDRV_REQ_MASK)); 1552 1553 if (flags & BDRV_REQ_SERIALISING) { 1554 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 1555 } 1556 1557 waited = wait_serialising_requests(req); 1558 1559 assert(!waited || !req->serialising || 1560 is_request_serialising_and_aligned(req)); 1561 assert(req->overlap_offset <= offset); 1562 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1563 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); 1564 1565 switch (req->type) { 1566 case BDRV_TRACKED_WRITE: 1567 case BDRV_TRACKED_DISCARD: 1568 if (flags & BDRV_REQ_WRITE_UNCHANGED) { 1569 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); 1570 } else { 1571 assert(child->perm & BLK_PERM_WRITE); 1572 } 1573 return notifier_with_return_list_notify(&bs->before_write_notifiers, 1574 req); 1575 case BDRV_TRACKED_TRUNCATE: 1576 assert(child->perm & BLK_PERM_RESIZE); 1577 return 0; 1578 default: 1579 abort(); 1580 } 1581 } 1582 1583 static inline void coroutine_fn 1584 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes, 1585 BdrvTrackedRequest *req, int ret) 1586 { 1587 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); 1588 BlockDriverState *bs = child->bs; 1589 1590 atomic_inc(&bs->write_gen); 1591 1592 /* 1593 * Discard cannot extend the image, but in error handling cases, such as 1594 * when reverting a qcow2 cluster allocation, the discarded range can pass 1595 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD 1596 * here. Instead, just skip it, since semantically a discard request 1597 * beyond EOF cannot expand the image anyway. 1598 */ 1599 if (ret == 0 && 1600 (req->type == BDRV_TRACKED_TRUNCATE || 1601 end_sector > bs->total_sectors) && 1602 req->type != BDRV_TRACKED_DISCARD) { 1603 bs->total_sectors = end_sector; 1604 bdrv_parent_cb_resize(bs); 1605 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); 1606 } 1607 if (req->bytes) { 1608 switch (req->type) { 1609 case BDRV_TRACKED_WRITE: 1610 stat64_max(&bs->wr_highest_offset, offset + bytes); 1611 /* fall through, to set dirty bits */ 1612 case BDRV_TRACKED_DISCARD: 1613 bdrv_set_dirty(bs, offset, bytes); 1614 break; 1615 default: 1616 break; 1617 } 1618 } 1619 } 1620 1621 /* 1622 * Forwards an already correctly aligned write request to the BlockDriver, 1623 * after possibly fragmenting it. 1624 */ 1625 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, 1626 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1627 int64_t align, QEMUIOVector *qiov, int flags) 1628 { 1629 BlockDriverState *bs = child->bs; 1630 BlockDriver *drv = bs->drv; 1631 int ret; 1632 1633 uint64_t bytes_remaining = bytes; 1634 int max_transfer; 1635 1636 if (!drv) { 1637 return -ENOMEDIUM; 1638 } 1639 1640 if (bdrv_has_readonly_bitmaps(bs)) { 1641 return -EPERM; 1642 } 1643 1644 assert(is_power_of_2(align)); 1645 assert((offset & (align - 1)) == 0); 1646 assert((bytes & (align - 1)) == 0); 1647 assert(!qiov || bytes == qiov->size); 1648 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), 1649 align); 1650 1651 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); 1652 1653 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1654 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && 1655 qemu_iovec_is_zero(qiov)) { 1656 flags |= BDRV_REQ_ZERO_WRITE; 1657 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1658 flags |= BDRV_REQ_MAY_UNMAP; 1659 } 1660 } 1661 1662 if (ret < 0) { 1663 /* Do nothing, write notifier decided to fail this request */ 1664 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1665 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); 1666 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); 1667 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { 1668 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov); 1669 } else if (bytes <= max_transfer) { 1670 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1671 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags); 1672 } else { 1673 bdrv_debug_event(bs, BLKDBG_PWRITEV); 1674 while (bytes_remaining) { 1675 int num = MIN(bytes_remaining, max_transfer); 1676 QEMUIOVector local_qiov; 1677 int local_flags = flags; 1678 1679 assert(num); 1680 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && 1681 !(bs->supported_write_flags & BDRV_REQ_FUA)) { 1682 /* If FUA is going to be emulated by flush, we only 1683 * need to flush on the last iteration */ 1684 local_flags &= ~BDRV_REQ_FUA; 1685 } 1686 qemu_iovec_init(&local_qiov, qiov->niov); 1687 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num); 1688 1689 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, 1690 num, &local_qiov, local_flags); 1691 qemu_iovec_destroy(&local_qiov); 1692 if (ret < 0) { 1693 break; 1694 } 1695 bytes_remaining -= num; 1696 } 1697 } 1698 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); 1699 1700 if (ret >= 0) { 1701 ret = 0; 1702 } 1703 bdrv_co_write_req_finish(child, offset, bytes, req, ret); 1704 1705 return ret; 1706 } 1707 1708 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, 1709 int64_t offset, 1710 unsigned int bytes, 1711 BdrvRequestFlags flags, 1712 BdrvTrackedRequest *req) 1713 { 1714 BlockDriverState *bs = child->bs; 1715 uint8_t *buf = NULL; 1716 QEMUIOVector local_qiov; 1717 uint64_t align = bs->bl.request_alignment; 1718 unsigned int head_padding_bytes, tail_padding_bytes; 1719 int ret = 0; 1720 1721 head_padding_bytes = offset & (align - 1); 1722 tail_padding_bytes = (align - (offset + bytes)) & (align - 1); 1723 1724 1725 assert(flags & BDRV_REQ_ZERO_WRITE); 1726 if (head_padding_bytes || tail_padding_bytes) { 1727 buf = qemu_blockalign(bs, align); 1728 qemu_iovec_init_buf(&local_qiov, buf, align); 1729 } 1730 if (head_padding_bytes) { 1731 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); 1732 1733 /* RMW the unaligned part before head. */ 1734 mark_request_serialising(req, align); 1735 wait_serialising_requests(req); 1736 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1737 ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align, 1738 align, &local_qiov, 0); 1739 if (ret < 0) { 1740 goto fail; 1741 } 1742 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1743 1744 memset(buf + head_padding_bytes, 0, zero_bytes); 1745 ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align, 1746 align, &local_qiov, 1747 flags & ~BDRV_REQ_ZERO_WRITE); 1748 if (ret < 0) { 1749 goto fail; 1750 } 1751 offset += zero_bytes; 1752 bytes -= zero_bytes; 1753 } 1754 1755 assert(!bytes || (offset & (align - 1)) == 0); 1756 if (bytes >= align) { 1757 /* Write the aligned part in the middle. */ 1758 uint64_t aligned_bytes = bytes & ~(align - 1); 1759 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, 1760 NULL, flags); 1761 if (ret < 0) { 1762 goto fail; 1763 } 1764 bytes -= aligned_bytes; 1765 offset += aligned_bytes; 1766 } 1767 1768 assert(!bytes || (offset & (align - 1)) == 0); 1769 if (bytes) { 1770 assert(align == tail_padding_bytes + bytes); 1771 /* RMW the unaligned part after tail. */ 1772 mark_request_serialising(req, align); 1773 wait_serialising_requests(req); 1774 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1775 ret = bdrv_aligned_preadv(child, req, offset, align, 1776 align, &local_qiov, 0); 1777 if (ret < 0) { 1778 goto fail; 1779 } 1780 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1781 1782 memset(buf, 0, bytes); 1783 ret = bdrv_aligned_pwritev(child, req, offset, align, align, 1784 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); 1785 } 1786 fail: 1787 qemu_vfree(buf); 1788 return ret; 1789 1790 } 1791 1792 /* 1793 * Handle a write request in coroutine context 1794 */ 1795 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 1796 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1797 BdrvRequestFlags flags) 1798 { 1799 BlockDriverState *bs = child->bs; 1800 BdrvTrackedRequest req; 1801 uint64_t align = bs->bl.request_alignment; 1802 uint8_t *head_buf = NULL; 1803 uint8_t *tail_buf = NULL; 1804 QEMUIOVector local_qiov; 1805 bool use_local_qiov = false; 1806 int ret; 1807 1808 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags); 1809 1810 if (!bs->drv) { 1811 return -ENOMEDIUM; 1812 } 1813 1814 ret = bdrv_check_byte_request(bs, offset, bytes); 1815 if (ret < 0) { 1816 return ret; 1817 } 1818 1819 bdrv_inc_in_flight(bs); 1820 /* 1821 * Align write if necessary by performing a read-modify-write cycle. 1822 * Pad qiov with the read parts and be sure to have a tracked request not 1823 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 1824 */ 1825 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); 1826 1827 if (flags & BDRV_REQ_ZERO_WRITE) { 1828 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); 1829 goto out; 1830 } 1831 1832 if (offset & (align - 1)) { 1833 QEMUIOVector head_qiov; 1834 1835 mark_request_serialising(&req, align); 1836 wait_serialising_requests(&req); 1837 1838 head_buf = qemu_blockalign(bs, align); 1839 qemu_iovec_init_buf(&head_qiov, head_buf, align); 1840 1841 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); 1842 ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align, 1843 align, &head_qiov, 0); 1844 if (ret < 0) { 1845 goto fail; 1846 } 1847 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1848 1849 qemu_iovec_init(&local_qiov, qiov->niov + 2); 1850 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 1851 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1852 use_local_qiov = true; 1853 1854 bytes += offset & (align - 1); 1855 offset = offset & ~(align - 1); 1856 1857 /* We have read the tail already if the request is smaller 1858 * than one aligned block. 1859 */ 1860 if (bytes < align) { 1861 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes); 1862 bytes = align; 1863 } 1864 } 1865 1866 if ((offset + bytes) & (align - 1)) { 1867 QEMUIOVector tail_qiov; 1868 size_t tail_bytes; 1869 bool waited; 1870 1871 mark_request_serialising(&req, align); 1872 waited = wait_serialising_requests(&req); 1873 assert(!waited || !use_local_qiov); 1874 1875 tail_buf = qemu_blockalign(bs, align); 1876 qemu_iovec_init_buf(&tail_qiov, tail_buf, align); 1877 1878 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); 1879 ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1), 1880 align, align, &tail_qiov, 0); 1881 if (ret < 0) { 1882 goto fail; 1883 } 1884 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1885 1886 if (!use_local_qiov) { 1887 qemu_iovec_init(&local_qiov, qiov->niov + 1); 1888 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1889 use_local_qiov = true; 1890 } 1891 1892 tail_bytes = (offset + bytes) & (align - 1); 1893 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 1894 1895 bytes = ROUND_UP(bytes, align); 1896 } 1897 1898 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, 1899 use_local_qiov ? &local_qiov : qiov, 1900 flags); 1901 1902 fail: 1903 1904 if (use_local_qiov) { 1905 qemu_iovec_destroy(&local_qiov); 1906 } 1907 qemu_vfree(head_buf); 1908 qemu_vfree(tail_buf); 1909 out: 1910 tracked_request_end(&req); 1911 bdrv_dec_in_flight(bs); 1912 return ret; 1913 } 1914 1915 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 1916 int bytes, BdrvRequestFlags flags) 1917 { 1918 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); 1919 1920 if (!(child->bs->open_flags & BDRV_O_UNMAP)) { 1921 flags &= ~BDRV_REQ_MAY_UNMAP; 1922 } 1923 1924 return bdrv_co_pwritev(child, offset, bytes, NULL, 1925 BDRV_REQ_ZERO_WRITE | flags); 1926 } 1927 1928 /* 1929 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. 1930 */ 1931 int bdrv_flush_all(void) 1932 { 1933 BdrvNextIterator it; 1934 BlockDriverState *bs = NULL; 1935 int result = 0; 1936 1937 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 1938 AioContext *aio_context = bdrv_get_aio_context(bs); 1939 int ret; 1940 1941 aio_context_acquire(aio_context); 1942 ret = bdrv_flush(bs); 1943 if (ret < 0 && !result) { 1944 result = ret; 1945 } 1946 aio_context_release(aio_context); 1947 } 1948 1949 return result; 1950 } 1951 1952 1953 typedef struct BdrvCoBlockStatusData { 1954 BlockDriverState *bs; 1955 BlockDriverState *base; 1956 bool want_zero; 1957 int64_t offset; 1958 int64_t bytes; 1959 int64_t *pnum; 1960 int64_t *map; 1961 BlockDriverState **file; 1962 int ret; 1963 bool done; 1964 } BdrvCoBlockStatusData; 1965 1966 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs, 1967 bool want_zero, 1968 int64_t offset, 1969 int64_t bytes, 1970 int64_t *pnum, 1971 int64_t *map, 1972 BlockDriverState **file) 1973 { 1974 assert(bs->file && bs->file->bs); 1975 *pnum = bytes; 1976 *map = offset; 1977 *file = bs->file->bs; 1978 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 1979 } 1980 1981 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs, 1982 bool want_zero, 1983 int64_t offset, 1984 int64_t bytes, 1985 int64_t *pnum, 1986 int64_t *map, 1987 BlockDriverState **file) 1988 { 1989 assert(bs->backing && bs->backing->bs); 1990 *pnum = bytes; 1991 *map = offset; 1992 *file = bs->backing->bs; 1993 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; 1994 } 1995 1996 /* 1997 * Returns the allocation status of the specified sectors. 1998 * Drivers not implementing the functionality are assumed to not support 1999 * backing files, hence all their sectors are reported as allocated. 2000 * 2001 * If 'want_zero' is true, the caller is querying for mapping 2002 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and 2003 * _ZERO where possible; otherwise, the result favors larger 'pnum', 2004 * with a focus on accurate BDRV_BLOCK_ALLOCATED. 2005 * 2006 * If 'offset' is beyond the end of the disk image the return value is 2007 * BDRV_BLOCK_EOF and 'pnum' is set to 0. 2008 * 2009 * 'bytes' is the max value 'pnum' should be set to. If bytes goes 2010 * beyond the end of the disk image it will be clamped; if 'pnum' is set to 2011 * the end of the image, then the returned value will include BDRV_BLOCK_EOF. 2012 * 2013 * 'pnum' is set to the number of bytes (including and immediately 2014 * following the specified offset) that are easily known to be in the 2015 * same allocated/unallocated state. Note that a second call starting 2016 * at the original offset plus returned pnum may have the same status. 2017 * The returned value is non-zero on success except at end-of-file. 2018 * 2019 * Returns negative errno on failure. Otherwise, if the 2020 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are 2021 * set to the host mapping and BDS corresponding to the guest offset. 2022 */ 2023 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, 2024 bool want_zero, 2025 int64_t offset, int64_t bytes, 2026 int64_t *pnum, int64_t *map, 2027 BlockDriverState **file) 2028 { 2029 int64_t total_size; 2030 int64_t n; /* bytes */ 2031 int ret; 2032 int64_t local_map = 0; 2033 BlockDriverState *local_file = NULL; 2034 int64_t aligned_offset, aligned_bytes; 2035 uint32_t align; 2036 2037 assert(pnum); 2038 *pnum = 0; 2039 total_size = bdrv_getlength(bs); 2040 if (total_size < 0) { 2041 ret = total_size; 2042 goto early_out; 2043 } 2044 2045 if (offset >= total_size) { 2046 ret = BDRV_BLOCK_EOF; 2047 goto early_out; 2048 } 2049 if (!bytes) { 2050 ret = 0; 2051 goto early_out; 2052 } 2053 2054 n = total_size - offset; 2055 if (n < bytes) { 2056 bytes = n; 2057 } 2058 2059 /* Must be non-NULL or bdrv_getlength() would have failed */ 2060 assert(bs->drv); 2061 if (!bs->drv->bdrv_co_block_status) { 2062 *pnum = bytes; 2063 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 2064 if (offset + bytes == total_size) { 2065 ret |= BDRV_BLOCK_EOF; 2066 } 2067 if (bs->drv->protocol_name) { 2068 ret |= BDRV_BLOCK_OFFSET_VALID; 2069 local_map = offset; 2070 local_file = bs; 2071 } 2072 goto early_out; 2073 } 2074 2075 bdrv_inc_in_flight(bs); 2076 2077 /* Round out to request_alignment boundaries */ 2078 align = bs->bl.request_alignment; 2079 aligned_offset = QEMU_ALIGN_DOWN(offset, align); 2080 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; 2081 2082 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, 2083 aligned_bytes, pnum, &local_map, 2084 &local_file); 2085 if (ret < 0) { 2086 *pnum = 0; 2087 goto out; 2088 } 2089 2090 /* 2091 * The driver's result must be a non-zero multiple of request_alignment. 2092 * Clamp pnum and adjust map to original request. 2093 */ 2094 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && 2095 align > offset - aligned_offset); 2096 if (ret & BDRV_BLOCK_RECURSE) { 2097 assert(ret & BDRV_BLOCK_DATA); 2098 assert(ret & BDRV_BLOCK_OFFSET_VALID); 2099 assert(!(ret & BDRV_BLOCK_ZERO)); 2100 } 2101 2102 *pnum -= offset - aligned_offset; 2103 if (*pnum > bytes) { 2104 *pnum = bytes; 2105 } 2106 if (ret & BDRV_BLOCK_OFFSET_VALID) { 2107 local_map += offset - aligned_offset; 2108 } 2109 2110 if (ret & BDRV_BLOCK_RAW) { 2111 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); 2112 ret = bdrv_co_block_status(local_file, want_zero, local_map, 2113 *pnum, pnum, &local_map, &local_file); 2114 goto out; 2115 } 2116 2117 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 2118 ret |= BDRV_BLOCK_ALLOCATED; 2119 } else if (want_zero) { 2120 if (bdrv_unallocated_blocks_are_zero(bs)) { 2121 ret |= BDRV_BLOCK_ZERO; 2122 } else if (bs->backing) { 2123 BlockDriverState *bs2 = bs->backing->bs; 2124 int64_t size2 = bdrv_getlength(bs2); 2125 2126 if (size2 >= 0 && offset >= size2) { 2127 ret |= BDRV_BLOCK_ZERO; 2128 } 2129 } 2130 } 2131 2132 if (want_zero && ret & BDRV_BLOCK_RECURSE && 2133 local_file && local_file != bs && 2134 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 2135 (ret & BDRV_BLOCK_OFFSET_VALID)) { 2136 int64_t file_pnum; 2137 int ret2; 2138 2139 ret2 = bdrv_co_block_status(local_file, want_zero, local_map, 2140 *pnum, &file_pnum, NULL, NULL); 2141 if (ret2 >= 0) { 2142 /* Ignore errors. This is just providing extra information, it 2143 * is useful but not necessary. 2144 */ 2145 if (ret2 & BDRV_BLOCK_EOF && 2146 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { 2147 /* 2148 * It is valid for the format block driver to read 2149 * beyond the end of the underlying file's current 2150 * size; such areas read as zero. 2151 */ 2152 ret |= BDRV_BLOCK_ZERO; 2153 } else { 2154 /* Limit request to the range reported by the protocol driver */ 2155 *pnum = file_pnum; 2156 ret |= (ret2 & BDRV_BLOCK_ZERO); 2157 } 2158 } 2159 } 2160 2161 out: 2162 bdrv_dec_in_flight(bs); 2163 if (ret >= 0 && offset + *pnum == total_size) { 2164 ret |= BDRV_BLOCK_EOF; 2165 } 2166 early_out: 2167 if (file) { 2168 *file = local_file; 2169 } 2170 if (map) { 2171 *map = local_map; 2172 } 2173 return ret; 2174 } 2175 2176 static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, 2177 BlockDriverState *base, 2178 bool want_zero, 2179 int64_t offset, 2180 int64_t bytes, 2181 int64_t *pnum, 2182 int64_t *map, 2183 BlockDriverState **file) 2184 { 2185 BlockDriverState *p; 2186 int ret = 0; 2187 bool first = true; 2188 2189 assert(bs != base); 2190 for (p = bs; p != base; p = backing_bs(p)) { 2191 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, 2192 file); 2193 if (ret < 0) { 2194 break; 2195 } 2196 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) { 2197 /* 2198 * Reading beyond the end of the file continues to read 2199 * zeroes, but we can only widen the result to the 2200 * unallocated length we learned from an earlier 2201 * iteration. 2202 */ 2203 *pnum = bytes; 2204 } 2205 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) { 2206 break; 2207 } 2208 /* [offset, pnum] unallocated on this layer, which could be only 2209 * the first part of [offset, bytes]. */ 2210 bytes = MIN(bytes, *pnum); 2211 first = false; 2212 } 2213 return ret; 2214 } 2215 2216 /* Coroutine wrapper for bdrv_block_status_above() */ 2217 static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque) 2218 { 2219 BdrvCoBlockStatusData *data = opaque; 2220 2221 data->ret = bdrv_co_block_status_above(data->bs, data->base, 2222 data->want_zero, 2223 data->offset, data->bytes, 2224 data->pnum, data->map, data->file); 2225 data->done = true; 2226 aio_wait_kick(); 2227 } 2228 2229 /* 2230 * Synchronous wrapper around bdrv_co_block_status_above(). 2231 * 2232 * See bdrv_co_block_status_above() for details. 2233 */ 2234 static int bdrv_common_block_status_above(BlockDriverState *bs, 2235 BlockDriverState *base, 2236 bool want_zero, int64_t offset, 2237 int64_t bytes, int64_t *pnum, 2238 int64_t *map, 2239 BlockDriverState **file) 2240 { 2241 Coroutine *co; 2242 BdrvCoBlockStatusData data = { 2243 .bs = bs, 2244 .base = base, 2245 .want_zero = want_zero, 2246 .offset = offset, 2247 .bytes = bytes, 2248 .pnum = pnum, 2249 .map = map, 2250 .file = file, 2251 .done = false, 2252 }; 2253 2254 if (qemu_in_coroutine()) { 2255 /* Fast-path if already in coroutine context */ 2256 bdrv_block_status_above_co_entry(&data); 2257 } else { 2258 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data); 2259 bdrv_coroutine_enter(bs, co); 2260 BDRV_POLL_WHILE(bs, !data.done); 2261 } 2262 return data.ret; 2263 } 2264 2265 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 2266 int64_t offset, int64_t bytes, int64_t *pnum, 2267 int64_t *map, BlockDriverState **file) 2268 { 2269 return bdrv_common_block_status_above(bs, base, true, offset, bytes, 2270 pnum, map, file); 2271 } 2272 2273 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, 2274 int64_t *pnum, int64_t *map, BlockDriverState **file) 2275 { 2276 return bdrv_block_status_above(bs, backing_bs(bs), 2277 offset, bytes, pnum, map, file); 2278 } 2279 2280 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, 2281 int64_t bytes, int64_t *pnum) 2282 { 2283 int ret; 2284 int64_t dummy; 2285 2286 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset, 2287 bytes, pnum ? pnum : &dummy, NULL, 2288 NULL); 2289 if (ret < 0) { 2290 return ret; 2291 } 2292 return !!(ret & BDRV_BLOCK_ALLOCATED); 2293 } 2294 2295 /* 2296 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 2297 * 2298 * Return true if (a prefix of) the given range is allocated in any image 2299 * between BASE and TOP (inclusive). BASE can be NULL to check if the given 2300 * offset is allocated in any image of the chain. Return false otherwise, 2301 * or negative errno on failure. 2302 * 2303 * 'pnum' is set to the number of bytes (including and immediately 2304 * following the specified offset) that are known to be in the same 2305 * allocated/unallocated state. Note that a subsequent call starting 2306 * at 'offset + *pnum' may return the same allocation status (in other 2307 * words, the result is not necessarily the maximum possible range); 2308 * but 'pnum' will only be 0 when end of file is reached. 2309 * 2310 */ 2311 int bdrv_is_allocated_above(BlockDriverState *top, 2312 BlockDriverState *base, 2313 int64_t offset, int64_t bytes, int64_t *pnum) 2314 { 2315 BlockDriverState *intermediate; 2316 int ret; 2317 int64_t n = bytes; 2318 2319 intermediate = top; 2320 while (intermediate && intermediate != base) { 2321 int64_t pnum_inter; 2322 int64_t size_inter; 2323 2324 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter); 2325 if (ret < 0) { 2326 return ret; 2327 } 2328 if (ret) { 2329 *pnum = pnum_inter; 2330 return 1; 2331 } 2332 2333 size_inter = bdrv_getlength(intermediate); 2334 if (size_inter < 0) { 2335 return size_inter; 2336 } 2337 if (n > pnum_inter && 2338 (intermediate == top || offset + pnum_inter < size_inter)) { 2339 n = pnum_inter; 2340 } 2341 2342 intermediate = backing_bs(intermediate); 2343 } 2344 2345 *pnum = n; 2346 return 0; 2347 } 2348 2349 typedef struct BdrvVmstateCo { 2350 BlockDriverState *bs; 2351 QEMUIOVector *qiov; 2352 int64_t pos; 2353 bool is_read; 2354 int ret; 2355 } BdrvVmstateCo; 2356 2357 static int coroutine_fn 2358 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 2359 bool is_read) 2360 { 2361 BlockDriver *drv = bs->drv; 2362 int ret = -ENOTSUP; 2363 2364 bdrv_inc_in_flight(bs); 2365 2366 if (!drv) { 2367 ret = -ENOMEDIUM; 2368 } else if (drv->bdrv_load_vmstate) { 2369 if (is_read) { 2370 ret = drv->bdrv_load_vmstate(bs, qiov, pos); 2371 } else { 2372 ret = drv->bdrv_save_vmstate(bs, qiov, pos); 2373 } 2374 } else if (bs->file) { 2375 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read); 2376 } 2377 2378 bdrv_dec_in_flight(bs); 2379 return ret; 2380 } 2381 2382 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque) 2383 { 2384 BdrvVmstateCo *co = opaque; 2385 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read); 2386 aio_wait_kick(); 2387 } 2388 2389 static inline int 2390 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, 2391 bool is_read) 2392 { 2393 if (qemu_in_coroutine()) { 2394 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read); 2395 } else { 2396 BdrvVmstateCo data = { 2397 .bs = bs, 2398 .qiov = qiov, 2399 .pos = pos, 2400 .is_read = is_read, 2401 .ret = -EINPROGRESS, 2402 }; 2403 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data); 2404 2405 bdrv_coroutine_enter(bs, co); 2406 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS); 2407 return data.ret; 2408 } 2409 } 2410 2411 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2412 int64_t pos, int size) 2413 { 2414 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2415 int ret; 2416 2417 ret = bdrv_writev_vmstate(bs, &qiov, pos); 2418 if (ret < 0) { 2419 return ret; 2420 } 2421 2422 return size; 2423 } 2424 2425 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2426 { 2427 return bdrv_rw_vmstate(bs, qiov, pos, false); 2428 } 2429 2430 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2431 int64_t pos, int size) 2432 { 2433 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); 2434 int ret; 2435 2436 ret = bdrv_readv_vmstate(bs, &qiov, pos); 2437 if (ret < 0) { 2438 return ret; 2439 } 2440 2441 return size; 2442 } 2443 2444 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 2445 { 2446 return bdrv_rw_vmstate(bs, qiov, pos, true); 2447 } 2448 2449 /**************************************************************/ 2450 /* async I/Os */ 2451 2452 void bdrv_aio_cancel(BlockAIOCB *acb) 2453 { 2454 qemu_aio_ref(acb); 2455 bdrv_aio_cancel_async(acb); 2456 while (acb->refcnt > 1) { 2457 if (acb->aiocb_info->get_aio_context) { 2458 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 2459 } else if (acb->bs) { 2460 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so 2461 * assert that we're not using an I/O thread. Thread-safe 2462 * code should use bdrv_aio_cancel_async exclusively. 2463 */ 2464 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); 2465 aio_poll(bdrv_get_aio_context(acb->bs), true); 2466 } else { 2467 abort(); 2468 } 2469 } 2470 qemu_aio_unref(acb); 2471 } 2472 2473 /* Async version of aio cancel. The caller is not blocked if the acb implements 2474 * cancel_async, otherwise we do nothing and let the request normally complete. 2475 * In either case the completion callback must be called. */ 2476 void bdrv_aio_cancel_async(BlockAIOCB *acb) 2477 { 2478 if (acb->aiocb_info->cancel_async) { 2479 acb->aiocb_info->cancel_async(acb); 2480 } 2481 } 2482 2483 /**************************************************************/ 2484 /* Coroutine block device emulation */ 2485 2486 typedef struct FlushCo { 2487 BlockDriverState *bs; 2488 int ret; 2489 } FlushCo; 2490 2491 2492 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 2493 { 2494 FlushCo *rwco = opaque; 2495 2496 rwco->ret = bdrv_co_flush(rwco->bs); 2497 aio_wait_kick(); 2498 } 2499 2500 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2501 { 2502 int current_gen; 2503 int ret = 0; 2504 2505 bdrv_inc_in_flight(bs); 2506 2507 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || 2508 bdrv_is_sg(bs)) { 2509 goto early_exit; 2510 } 2511 2512 qemu_co_mutex_lock(&bs->reqs_lock); 2513 current_gen = atomic_read(&bs->write_gen); 2514 2515 /* Wait until any previous flushes are completed */ 2516 while (bs->active_flush_req) { 2517 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); 2518 } 2519 2520 /* Flushes reach this point in nondecreasing current_gen order. */ 2521 bs->active_flush_req = true; 2522 qemu_co_mutex_unlock(&bs->reqs_lock); 2523 2524 /* Write back all layers by calling one driver function */ 2525 if (bs->drv->bdrv_co_flush) { 2526 ret = bs->drv->bdrv_co_flush(bs); 2527 goto out; 2528 } 2529 2530 /* Write back cached data to the OS even with cache=unsafe */ 2531 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 2532 if (bs->drv->bdrv_co_flush_to_os) { 2533 ret = bs->drv->bdrv_co_flush_to_os(bs); 2534 if (ret < 0) { 2535 goto out; 2536 } 2537 } 2538 2539 /* But don't actually force it to the disk with cache=unsafe */ 2540 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2541 goto flush_parent; 2542 } 2543 2544 /* Check if we really need to flush anything */ 2545 if (bs->flushed_gen == current_gen) { 2546 goto flush_parent; 2547 } 2548 2549 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 2550 if (!bs->drv) { 2551 /* bs->drv->bdrv_co_flush() might have ejected the BDS 2552 * (even in case of apparent success) */ 2553 ret = -ENOMEDIUM; 2554 goto out; 2555 } 2556 if (bs->drv->bdrv_co_flush_to_disk) { 2557 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2558 } else if (bs->drv->bdrv_aio_flush) { 2559 BlockAIOCB *acb; 2560 CoroutineIOCompletion co = { 2561 .coroutine = qemu_coroutine_self(), 2562 }; 2563 2564 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2565 if (acb == NULL) { 2566 ret = -EIO; 2567 } else { 2568 qemu_coroutine_yield(); 2569 ret = co.ret; 2570 } 2571 } else { 2572 /* 2573 * Some block drivers always operate in either writethrough or unsafe 2574 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2575 * know how the server works (because the behaviour is hardcoded or 2576 * depends on server-side configuration), so we can't ensure that 2577 * everything is safe on disk. Returning an error doesn't work because 2578 * that would break guests even if the server operates in writethrough 2579 * mode. 2580 * 2581 * Let's hope the user knows what he's doing. 2582 */ 2583 ret = 0; 2584 } 2585 2586 if (ret < 0) { 2587 goto out; 2588 } 2589 2590 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2591 * in the case of cache=unsafe, so there are no useless flushes. 2592 */ 2593 flush_parent: 2594 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0; 2595 out: 2596 /* Notify any pending flushes that we have completed */ 2597 if (ret == 0) { 2598 bs->flushed_gen = current_gen; 2599 } 2600 2601 qemu_co_mutex_lock(&bs->reqs_lock); 2602 bs->active_flush_req = false; 2603 /* Return value is ignored - it's ok if wait queue is empty */ 2604 qemu_co_queue_next(&bs->flush_queue); 2605 qemu_co_mutex_unlock(&bs->reqs_lock); 2606 2607 early_exit: 2608 bdrv_dec_in_flight(bs); 2609 return ret; 2610 } 2611 2612 int bdrv_flush(BlockDriverState *bs) 2613 { 2614 Coroutine *co; 2615 FlushCo flush_co = { 2616 .bs = bs, 2617 .ret = NOT_DONE, 2618 }; 2619 2620 if (qemu_in_coroutine()) { 2621 /* Fast-path if already in coroutine context */ 2622 bdrv_flush_co_entry(&flush_co); 2623 } else { 2624 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co); 2625 bdrv_coroutine_enter(bs, co); 2626 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE); 2627 } 2628 2629 return flush_co.ret; 2630 } 2631 2632 typedef struct DiscardCo { 2633 BdrvChild *child; 2634 int64_t offset; 2635 int64_t bytes; 2636 int ret; 2637 } DiscardCo; 2638 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque) 2639 { 2640 DiscardCo *rwco = opaque; 2641 2642 rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes); 2643 aio_wait_kick(); 2644 } 2645 2646 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, 2647 int64_t bytes) 2648 { 2649 BdrvTrackedRequest req; 2650 int max_pdiscard, ret; 2651 int head, tail, align; 2652 BlockDriverState *bs = child->bs; 2653 2654 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { 2655 return -ENOMEDIUM; 2656 } 2657 2658 if (bdrv_has_readonly_bitmaps(bs)) { 2659 return -EPERM; 2660 } 2661 2662 if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) { 2663 return -EIO; 2664 } 2665 2666 /* Do nothing if disabled. */ 2667 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2668 return 0; 2669 } 2670 2671 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { 2672 return 0; 2673 } 2674 2675 /* Discard is advisory, but some devices track and coalesce 2676 * unaligned requests, so we must pass everything down rather than 2677 * round here. Still, most devices will just silently ignore 2678 * unaligned requests (by returning -ENOTSUP), so we must fragment 2679 * the request accordingly. */ 2680 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); 2681 assert(align % bs->bl.request_alignment == 0); 2682 head = offset % align; 2683 tail = (offset + bytes) % align; 2684 2685 bdrv_inc_in_flight(bs); 2686 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); 2687 2688 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); 2689 if (ret < 0) { 2690 goto out; 2691 } 2692 2693 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), 2694 align); 2695 assert(max_pdiscard >= bs->bl.request_alignment); 2696 2697 while (bytes > 0) { 2698 int64_t num = bytes; 2699 2700 if (head) { 2701 /* Make small requests to get to alignment boundaries. */ 2702 num = MIN(bytes, align - head); 2703 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { 2704 num %= bs->bl.request_alignment; 2705 } 2706 head = (head + num) % align; 2707 assert(num < max_pdiscard); 2708 } else if (tail) { 2709 if (num > align) { 2710 /* Shorten the request to the last aligned cluster. */ 2711 num -= tail; 2712 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && 2713 tail > bs->bl.request_alignment) { 2714 tail %= bs->bl.request_alignment; 2715 num -= tail; 2716 } 2717 } 2718 /* limit request size */ 2719 if (num > max_pdiscard) { 2720 num = max_pdiscard; 2721 } 2722 2723 if (!bs->drv) { 2724 ret = -ENOMEDIUM; 2725 goto out; 2726 } 2727 if (bs->drv->bdrv_co_pdiscard) { 2728 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); 2729 } else { 2730 BlockAIOCB *acb; 2731 CoroutineIOCompletion co = { 2732 .coroutine = qemu_coroutine_self(), 2733 }; 2734 2735 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, 2736 bdrv_co_io_em_complete, &co); 2737 if (acb == NULL) { 2738 ret = -EIO; 2739 goto out; 2740 } else { 2741 qemu_coroutine_yield(); 2742 ret = co.ret; 2743 } 2744 } 2745 if (ret && ret != -ENOTSUP) { 2746 goto out; 2747 } 2748 2749 offset += num; 2750 bytes -= num; 2751 } 2752 ret = 0; 2753 out: 2754 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); 2755 tracked_request_end(&req); 2756 bdrv_dec_in_flight(bs); 2757 return ret; 2758 } 2759 2760 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes) 2761 { 2762 Coroutine *co; 2763 DiscardCo rwco = { 2764 .child = child, 2765 .offset = offset, 2766 .bytes = bytes, 2767 .ret = NOT_DONE, 2768 }; 2769 2770 if (qemu_in_coroutine()) { 2771 /* Fast-path if already in coroutine context */ 2772 bdrv_pdiscard_co_entry(&rwco); 2773 } else { 2774 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco); 2775 bdrv_coroutine_enter(child->bs, co); 2776 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE); 2777 } 2778 2779 return rwco.ret; 2780 } 2781 2782 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) 2783 { 2784 BlockDriver *drv = bs->drv; 2785 CoroutineIOCompletion co = { 2786 .coroutine = qemu_coroutine_self(), 2787 }; 2788 BlockAIOCB *acb; 2789 2790 bdrv_inc_in_flight(bs); 2791 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { 2792 co.ret = -ENOTSUP; 2793 goto out; 2794 } 2795 2796 if (drv->bdrv_co_ioctl) { 2797 co.ret = drv->bdrv_co_ioctl(bs, req, buf); 2798 } else { 2799 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); 2800 if (!acb) { 2801 co.ret = -ENOTSUP; 2802 goto out; 2803 } 2804 qemu_coroutine_yield(); 2805 } 2806 out: 2807 bdrv_dec_in_flight(bs); 2808 return co.ret; 2809 } 2810 2811 void *qemu_blockalign(BlockDriverState *bs, size_t size) 2812 { 2813 return qemu_memalign(bdrv_opt_mem_align(bs), size); 2814 } 2815 2816 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 2817 { 2818 return memset(qemu_blockalign(bs, size), 0, size); 2819 } 2820 2821 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 2822 { 2823 size_t align = bdrv_opt_mem_align(bs); 2824 2825 /* Ensure that NULL is never returned on success */ 2826 assert(align > 0); 2827 if (size == 0) { 2828 size = align; 2829 } 2830 2831 return qemu_try_memalign(align, size); 2832 } 2833 2834 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 2835 { 2836 void *mem = qemu_try_blockalign(bs, size); 2837 2838 if (mem) { 2839 memset(mem, 0, size); 2840 } 2841 2842 return mem; 2843 } 2844 2845 /* 2846 * Check if all memory in this vector is sector aligned. 2847 */ 2848 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 2849 { 2850 int i; 2851 size_t alignment = bdrv_min_mem_align(bs); 2852 2853 for (i = 0; i < qiov->niov; i++) { 2854 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 2855 return false; 2856 } 2857 if (qiov->iov[i].iov_len % alignment) { 2858 return false; 2859 } 2860 } 2861 2862 return true; 2863 } 2864 2865 void bdrv_add_before_write_notifier(BlockDriverState *bs, 2866 NotifierWithReturn *notifier) 2867 { 2868 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 2869 } 2870 2871 void bdrv_io_plug(BlockDriverState *bs) 2872 { 2873 BdrvChild *child; 2874 2875 QLIST_FOREACH(child, &bs->children, next) { 2876 bdrv_io_plug(child->bs); 2877 } 2878 2879 if (atomic_fetch_inc(&bs->io_plugged) == 0) { 2880 BlockDriver *drv = bs->drv; 2881 if (drv && drv->bdrv_io_plug) { 2882 drv->bdrv_io_plug(bs); 2883 } 2884 } 2885 } 2886 2887 void bdrv_io_unplug(BlockDriverState *bs) 2888 { 2889 BdrvChild *child; 2890 2891 assert(bs->io_plugged); 2892 if (atomic_fetch_dec(&bs->io_plugged) == 1) { 2893 BlockDriver *drv = bs->drv; 2894 if (drv && drv->bdrv_io_unplug) { 2895 drv->bdrv_io_unplug(bs); 2896 } 2897 } 2898 2899 QLIST_FOREACH(child, &bs->children, next) { 2900 bdrv_io_unplug(child->bs); 2901 } 2902 } 2903 2904 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) 2905 { 2906 BdrvChild *child; 2907 2908 if (bs->drv && bs->drv->bdrv_register_buf) { 2909 bs->drv->bdrv_register_buf(bs, host, size); 2910 } 2911 QLIST_FOREACH(child, &bs->children, next) { 2912 bdrv_register_buf(child->bs, host, size); 2913 } 2914 } 2915 2916 void bdrv_unregister_buf(BlockDriverState *bs, void *host) 2917 { 2918 BdrvChild *child; 2919 2920 if (bs->drv && bs->drv->bdrv_unregister_buf) { 2921 bs->drv->bdrv_unregister_buf(bs, host); 2922 } 2923 QLIST_FOREACH(child, &bs->children, next) { 2924 bdrv_unregister_buf(child->bs, host); 2925 } 2926 } 2927 2928 static int coroutine_fn bdrv_co_copy_range_internal( 2929 BdrvChild *src, uint64_t src_offset, BdrvChild *dst, 2930 uint64_t dst_offset, uint64_t bytes, 2931 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, 2932 bool recurse_src) 2933 { 2934 BdrvTrackedRequest req; 2935 int ret; 2936 2937 /* TODO We can support BDRV_REQ_NO_FALLBACK here */ 2938 assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); 2939 assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); 2940 2941 if (!dst || !dst->bs) { 2942 return -ENOMEDIUM; 2943 } 2944 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes); 2945 if (ret) { 2946 return ret; 2947 } 2948 if (write_flags & BDRV_REQ_ZERO_WRITE) { 2949 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); 2950 } 2951 2952 if (!src || !src->bs) { 2953 return -ENOMEDIUM; 2954 } 2955 ret = bdrv_check_byte_request(src->bs, src_offset, bytes); 2956 if (ret) { 2957 return ret; 2958 } 2959 2960 if (!src->bs->drv->bdrv_co_copy_range_from 2961 || !dst->bs->drv->bdrv_co_copy_range_to 2962 || src->bs->encrypted || dst->bs->encrypted) { 2963 return -ENOTSUP; 2964 } 2965 2966 if (recurse_src) { 2967 bdrv_inc_in_flight(src->bs); 2968 tracked_request_begin(&req, src->bs, src_offset, bytes, 2969 BDRV_TRACKED_READ); 2970 2971 /* BDRV_REQ_SERIALISING is only for write operation */ 2972 assert(!(read_flags & BDRV_REQ_SERIALISING)); 2973 if (!(read_flags & BDRV_REQ_NO_SERIALISING)) { 2974 wait_serialising_requests(&req); 2975 } 2976 2977 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, 2978 src, src_offset, 2979 dst, dst_offset, 2980 bytes, 2981 read_flags, write_flags); 2982 2983 tracked_request_end(&req); 2984 bdrv_dec_in_flight(src->bs); 2985 } else { 2986 bdrv_inc_in_flight(dst->bs); 2987 tracked_request_begin(&req, dst->bs, dst_offset, bytes, 2988 BDRV_TRACKED_WRITE); 2989 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, 2990 write_flags); 2991 if (!ret) { 2992 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, 2993 src, src_offset, 2994 dst, dst_offset, 2995 bytes, 2996 read_flags, write_flags); 2997 } 2998 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); 2999 tracked_request_end(&req); 3000 bdrv_dec_in_flight(dst->bs); 3001 } 3002 3003 return ret; 3004 } 3005 3006 /* Copy range from @src to @dst. 3007 * 3008 * See the comment of bdrv_co_copy_range for the parameter and return value 3009 * semantics. */ 3010 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset, 3011 BdrvChild *dst, uint64_t dst_offset, 3012 uint64_t bytes, 3013 BdrvRequestFlags read_flags, 3014 BdrvRequestFlags write_flags) 3015 { 3016 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, 3017 read_flags, write_flags); 3018 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3019 bytes, read_flags, write_flags, true); 3020 } 3021 3022 /* Copy range from @src to @dst. 3023 * 3024 * See the comment of bdrv_co_copy_range for the parameter and return value 3025 * semantics. */ 3026 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset, 3027 BdrvChild *dst, uint64_t dst_offset, 3028 uint64_t bytes, 3029 BdrvRequestFlags read_flags, 3030 BdrvRequestFlags write_flags) 3031 { 3032 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3033 read_flags, write_flags); 3034 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, 3035 bytes, read_flags, write_flags, false); 3036 } 3037 3038 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, 3039 BdrvChild *dst, uint64_t dst_offset, 3040 uint64_t bytes, BdrvRequestFlags read_flags, 3041 BdrvRequestFlags write_flags) 3042 { 3043 return bdrv_co_copy_range_from(src, src_offset, 3044 dst, dst_offset, 3045 bytes, read_flags, write_flags); 3046 } 3047 3048 static void bdrv_parent_cb_resize(BlockDriverState *bs) 3049 { 3050 BdrvChild *c; 3051 QLIST_FOREACH(c, &bs->parents, next_parent) { 3052 if (c->role->resize) { 3053 c->role->resize(c); 3054 } 3055 } 3056 } 3057 3058 /** 3059 * Truncate file to 'offset' bytes (needed only for file protocols) 3060 */ 3061 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, 3062 PreallocMode prealloc, Error **errp) 3063 { 3064 BlockDriverState *bs = child->bs; 3065 BlockDriver *drv = bs->drv; 3066 BdrvTrackedRequest req; 3067 int64_t old_size, new_bytes; 3068 int ret; 3069 3070 3071 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ 3072 if (!drv) { 3073 error_setg(errp, "No medium inserted"); 3074 return -ENOMEDIUM; 3075 } 3076 if (offset < 0) { 3077 error_setg(errp, "Image size cannot be negative"); 3078 return -EINVAL; 3079 } 3080 3081 old_size = bdrv_getlength(bs); 3082 if (old_size < 0) { 3083 error_setg_errno(errp, -old_size, "Failed to get old image size"); 3084 return old_size; 3085 } 3086 3087 if (offset > old_size) { 3088 new_bytes = offset - old_size; 3089 } else { 3090 new_bytes = 0; 3091 } 3092 3093 bdrv_inc_in_flight(bs); 3094 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, 3095 BDRV_TRACKED_TRUNCATE); 3096 3097 /* If we are growing the image and potentially using preallocation for the 3098 * new area, we need to make sure that no write requests are made to it 3099 * concurrently or they might be overwritten by preallocation. */ 3100 if (new_bytes) { 3101 mark_request_serialising(&req, 1); 3102 } 3103 if (bs->read_only) { 3104 error_setg(errp, "Image is read-only"); 3105 ret = -EACCES; 3106 goto out; 3107 } 3108 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, 3109 0); 3110 if (ret < 0) { 3111 error_setg_errno(errp, -ret, 3112 "Failed to prepare request for truncation"); 3113 goto out; 3114 } 3115 3116 if (!drv->bdrv_co_truncate) { 3117 if (bs->file && drv->is_filter) { 3118 ret = bdrv_co_truncate(bs->file, offset, prealloc, errp); 3119 goto out; 3120 } 3121 error_setg(errp, "Image format driver does not support resize"); 3122 ret = -ENOTSUP; 3123 goto out; 3124 } 3125 3126 ret = drv->bdrv_co_truncate(bs, offset, prealloc, errp); 3127 if (ret < 0) { 3128 goto out; 3129 } 3130 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3131 if (ret < 0) { 3132 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 3133 } else { 3134 offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3135 } 3136 /* It's possible that truncation succeeded but refresh_total_sectors 3137 * failed, but the latter doesn't affect how we should finish the request. 3138 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */ 3139 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); 3140 3141 out: 3142 tracked_request_end(&req); 3143 bdrv_dec_in_flight(bs); 3144 3145 return ret; 3146 } 3147 3148 typedef struct TruncateCo { 3149 BdrvChild *child; 3150 int64_t offset; 3151 PreallocMode prealloc; 3152 Error **errp; 3153 int ret; 3154 } TruncateCo; 3155 3156 static void coroutine_fn bdrv_truncate_co_entry(void *opaque) 3157 { 3158 TruncateCo *tco = opaque; 3159 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc, 3160 tco->errp); 3161 aio_wait_kick(); 3162 } 3163 3164 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc, 3165 Error **errp) 3166 { 3167 Coroutine *co; 3168 TruncateCo tco = { 3169 .child = child, 3170 .offset = offset, 3171 .prealloc = prealloc, 3172 .errp = errp, 3173 .ret = NOT_DONE, 3174 }; 3175 3176 if (qemu_in_coroutine()) { 3177 /* Fast-path if already in coroutine context */ 3178 bdrv_truncate_co_entry(&tco); 3179 } else { 3180 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco); 3181 bdrv_coroutine_enter(child->bs, co); 3182 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE); 3183 } 3184 3185 return tco.ret; 3186 } 3187