1 /* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "trace.h" 26 #include "sysemu/qtest.h" 27 #include "block/blockjob.h" 28 #include "block/block_int.h" 29 30 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 31 32 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 33 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 34 BlockCompletionFunc *cb, void *opaque); 35 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 36 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 37 BlockCompletionFunc *cb, void *opaque); 38 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 39 int64_t sector_num, int nb_sectors, 40 QEMUIOVector *iov); 41 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 42 int64_t sector_num, int nb_sectors, 43 QEMUIOVector *iov); 44 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 45 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 46 BdrvRequestFlags flags); 47 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 48 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 49 BdrvRequestFlags flags); 50 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 51 int64_t sector_num, 52 QEMUIOVector *qiov, 53 int nb_sectors, 54 BdrvRequestFlags flags, 55 BlockCompletionFunc *cb, 56 void *opaque, 57 bool is_write); 58 static void coroutine_fn bdrv_co_do_rw(void *opaque); 59 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 60 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 61 62 /* throttling disk I/O limits */ 63 void bdrv_set_io_limits(BlockDriverState *bs, 64 ThrottleConfig *cfg) 65 { 66 int i; 67 68 throttle_config(&bs->throttle_state, cfg); 69 70 for (i = 0; i < 2; i++) { 71 qemu_co_enter_next(&bs->throttled_reqs[i]); 72 } 73 } 74 75 /* this function drain all the throttled IOs */ 76 static bool bdrv_start_throttled_reqs(BlockDriverState *bs) 77 { 78 bool drained = false; 79 bool enabled = bs->io_limits_enabled; 80 int i; 81 82 bs->io_limits_enabled = false; 83 84 for (i = 0; i < 2; i++) { 85 while (qemu_co_enter_next(&bs->throttled_reqs[i])) { 86 drained = true; 87 } 88 } 89 90 bs->io_limits_enabled = enabled; 91 92 return drained; 93 } 94 95 void bdrv_io_limits_disable(BlockDriverState *bs) 96 { 97 bs->io_limits_enabled = false; 98 99 bdrv_start_throttled_reqs(bs); 100 101 throttle_destroy(&bs->throttle_state); 102 } 103 104 static void bdrv_throttle_read_timer_cb(void *opaque) 105 { 106 BlockDriverState *bs = opaque; 107 qemu_co_enter_next(&bs->throttled_reqs[0]); 108 } 109 110 static void bdrv_throttle_write_timer_cb(void *opaque) 111 { 112 BlockDriverState *bs = opaque; 113 qemu_co_enter_next(&bs->throttled_reqs[1]); 114 } 115 116 /* should be called before bdrv_set_io_limits if a limit is set */ 117 void bdrv_io_limits_enable(BlockDriverState *bs) 118 { 119 int clock_type = QEMU_CLOCK_REALTIME; 120 121 if (qtest_enabled()) { 122 /* For testing block IO throttling only */ 123 clock_type = QEMU_CLOCK_VIRTUAL; 124 } 125 assert(!bs->io_limits_enabled); 126 throttle_init(&bs->throttle_state, 127 bdrv_get_aio_context(bs), 128 clock_type, 129 bdrv_throttle_read_timer_cb, 130 bdrv_throttle_write_timer_cb, 131 bs); 132 bs->io_limits_enabled = true; 133 } 134 135 /* This function makes an IO wait if needed 136 * 137 * @nb_sectors: the number of sectors of the IO 138 * @is_write: is the IO a write 139 */ 140 static void bdrv_io_limits_intercept(BlockDriverState *bs, 141 unsigned int bytes, 142 bool is_write) 143 { 144 /* does this io must wait */ 145 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); 146 147 /* if must wait or any request of this type throttled queue the IO */ 148 if (must_wait || 149 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { 150 qemu_co_queue_wait(&bs->throttled_reqs[is_write]); 151 } 152 153 /* the IO will be executed, do the accounting */ 154 throttle_account(&bs->throttle_state, is_write, bytes); 155 156 157 /* if the next request must wait -> do nothing */ 158 if (throttle_schedule_timer(&bs->throttle_state, is_write)) { 159 return; 160 } 161 162 /* else queue next request for execution */ 163 qemu_co_queue_next(&bs->throttled_reqs[is_write]); 164 } 165 166 void bdrv_setup_io_funcs(BlockDriver *bdrv) 167 { 168 /* Block drivers without coroutine functions need emulation */ 169 if (!bdrv->bdrv_co_readv) { 170 bdrv->bdrv_co_readv = bdrv_co_readv_em; 171 bdrv->bdrv_co_writev = bdrv_co_writev_em; 172 173 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if 174 * the block driver lacks aio we need to emulate that too. 175 */ 176 if (!bdrv->bdrv_aio_readv) { 177 /* add AIO emulation layer */ 178 bdrv->bdrv_aio_readv = bdrv_aio_readv_em; 179 bdrv->bdrv_aio_writev = bdrv_aio_writev_em; 180 } 181 } 182 } 183 184 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 185 { 186 BlockDriver *drv = bs->drv; 187 Error *local_err = NULL; 188 189 memset(&bs->bl, 0, sizeof(bs->bl)); 190 191 if (!drv) { 192 return; 193 } 194 195 /* Take some limits from the children as a default */ 196 if (bs->file) { 197 bdrv_refresh_limits(bs->file, &local_err); 198 if (local_err) { 199 error_propagate(errp, local_err); 200 return; 201 } 202 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length; 203 bs->bl.max_transfer_length = bs->file->bl.max_transfer_length; 204 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment; 205 } else { 206 bs->bl.opt_mem_alignment = 512; 207 } 208 209 if (bs->backing_hd) { 210 bdrv_refresh_limits(bs->backing_hd, &local_err); 211 if (local_err) { 212 error_propagate(errp, local_err); 213 return; 214 } 215 bs->bl.opt_transfer_length = 216 MAX(bs->bl.opt_transfer_length, 217 bs->backing_hd->bl.opt_transfer_length); 218 bs->bl.max_transfer_length = 219 MIN_NON_ZERO(bs->bl.max_transfer_length, 220 bs->backing_hd->bl.max_transfer_length); 221 bs->bl.opt_mem_alignment = 222 MAX(bs->bl.opt_mem_alignment, 223 bs->backing_hd->bl.opt_mem_alignment); 224 } 225 226 /* Then let the driver override it */ 227 if (drv->bdrv_refresh_limits) { 228 drv->bdrv_refresh_limits(bs, errp); 229 } 230 } 231 232 /** 233 * The copy-on-read flag is actually a reference count so multiple users may 234 * use the feature without worrying about clobbering its previous state. 235 * Copy-on-read stays enabled until all users have called to disable it. 236 */ 237 void bdrv_enable_copy_on_read(BlockDriverState *bs) 238 { 239 bs->copy_on_read++; 240 } 241 242 void bdrv_disable_copy_on_read(BlockDriverState *bs) 243 { 244 assert(bs->copy_on_read > 0); 245 bs->copy_on_read--; 246 } 247 248 /* Check if any requests are in-flight (including throttled requests) */ 249 static bool bdrv_requests_pending(BlockDriverState *bs) 250 { 251 if (!QLIST_EMPTY(&bs->tracked_requests)) { 252 return true; 253 } 254 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { 255 return true; 256 } 257 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { 258 return true; 259 } 260 if (bs->file && bdrv_requests_pending(bs->file)) { 261 return true; 262 } 263 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { 264 return true; 265 } 266 return false; 267 } 268 269 static bool bdrv_drain_one(BlockDriverState *bs) 270 { 271 bool bs_busy; 272 273 bdrv_flush_io_queue(bs); 274 bdrv_start_throttled_reqs(bs); 275 bs_busy = bdrv_requests_pending(bs); 276 bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy); 277 return bs_busy; 278 } 279 280 /* 281 * Wait for pending requests to complete on a single BlockDriverState subtree 282 * 283 * See the warning in bdrv_drain_all(). This function can only be called if 284 * you are sure nothing can generate I/O because you have op blockers 285 * installed. 286 * 287 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 288 * AioContext. 289 */ 290 void bdrv_drain(BlockDriverState *bs) 291 { 292 while (bdrv_drain_one(bs)) { 293 /* Keep iterating */ 294 } 295 } 296 297 /* 298 * Wait for pending requests to complete across all BlockDriverStates 299 * 300 * This function does not flush data to disk, use bdrv_flush_all() for that 301 * after calling this function. 302 * 303 * Note that completion of an asynchronous I/O operation can trigger any 304 * number of other I/O operations on other devices---for example a coroutine 305 * can be arbitrarily complex and a constant flow of I/O can come until the 306 * coroutine is complete. Because of this, it is not possible to have a 307 * function to drain a single device's I/O queue. 308 */ 309 void bdrv_drain_all(void) 310 { 311 /* Always run first iteration so any pending completion BHs run */ 312 bool busy = true; 313 BlockDriverState *bs = NULL; 314 315 while ((bs = bdrv_next(bs))) { 316 AioContext *aio_context = bdrv_get_aio_context(bs); 317 318 aio_context_acquire(aio_context); 319 if (bs->job) { 320 block_job_pause(bs->job); 321 } 322 aio_context_release(aio_context); 323 } 324 325 while (busy) { 326 busy = false; 327 bs = NULL; 328 329 while ((bs = bdrv_next(bs))) { 330 AioContext *aio_context = bdrv_get_aio_context(bs); 331 332 aio_context_acquire(aio_context); 333 busy |= bdrv_drain_one(bs); 334 aio_context_release(aio_context); 335 } 336 } 337 338 bs = NULL; 339 while ((bs = bdrv_next(bs))) { 340 AioContext *aio_context = bdrv_get_aio_context(bs); 341 342 aio_context_acquire(aio_context); 343 if (bs->job) { 344 block_job_resume(bs->job); 345 } 346 aio_context_release(aio_context); 347 } 348 } 349 350 /** 351 * Remove an active request from the tracked requests list 352 * 353 * This function should be called when a tracked request is completing. 354 */ 355 static void tracked_request_end(BdrvTrackedRequest *req) 356 { 357 if (req->serialising) { 358 req->bs->serialising_in_flight--; 359 } 360 361 QLIST_REMOVE(req, list); 362 qemu_co_queue_restart_all(&req->wait_queue); 363 } 364 365 /** 366 * Add an active request to the tracked requests list 367 */ 368 static void tracked_request_begin(BdrvTrackedRequest *req, 369 BlockDriverState *bs, 370 int64_t offset, 371 unsigned int bytes, bool is_write) 372 { 373 *req = (BdrvTrackedRequest){ 374 .bs = bs, 375 .offset = offset, 376 .bytes = bytes, 377 .is_write = is_write, 378 .co = qemu_coroutine_self(), 379 .serialising = false, 380 .overlap_offset = offset, 381 .overlap_bytes = bytes, 382 }; 383 384 qemu_co_queue_init(&req->wait_queue); 385 386 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 387 } 388 389 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 390 { 391 int64_t overlap_offset = req->offset & ~(align - 1); 392 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 393 - overlap_offset; 394 395 if (!req->serialising) { 396 req->bs->serialising_in_flight++; 397 req->serialising = true; 398 } 399 400 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 401 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 402 } 403 404 /** 405 * Round a region to cluster boundaries 406 */ 407 void bdrv_round_to_clusters(BlockDriverState *bs, 408 int64_t sector_num, int nb_sectors, 409 int64_t *cluster_sector_num, 410 int *cluster_nb_sectors) 411 { 412 BlockDriverInfo bdi; 413 414 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 415 *cluster_sector_num = sector_num; 416 *cluster_nb_sectors = nb_sectors; 417 } else { 418 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 419 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 420 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 421 nb_sectors, c); 422 } 423 } 424 425 static int bdrv_get_cluster_size(BlockDriverState *bs) 426 { 427 BlockDriverInfo bdi; 428 int ret; 429 430 ret = bdrv_get_info(bs, &bdi); 431 if (ret < 0 || bdi.cluster_size == 0) { 432 return bs->request_alignment; 433 } else { 434 return bdi.cluster_size; 435 } 436 } 437 438 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 439 int64_t offset, unsigned int bytes) 440 { 441 /* aaaa bbbb */ 442 if (offset >= req->overlap_offset + req->overlap_bytes) { 443 return false; 444 } 445 /* bbbb aaaa */ 446 if (req->overlap_offset >= offset + bytes) { 447 return false; 448 } 449 return true; 450 } 451 452 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 453 { 454 BlockDriverState *bs = self->bs; 455 BdrvTrackedRequest *req; 456 bool retry; 457 bool waited = false; 458 459 if (!bs->serialising_in_flight) { 460 return false; 461 } 462 463 do { 464 retry = false; 465 QLIST_FOREACH(req, &bs->tracked_requests, list) { 466 if (req == self || (!req->serialising && !self->serialising)) { 467 continue; 468 } 469 if (tracked_request_overlaps(req, self->overlap_offset, 470 self->overlap_bytes)) 471 { 472 /* Hitting this means there was a reentrant request, for 473 * example, a block driver issuing nested requests. This must 474 * never happen since it means deadlock. 475 */ 476 assert(qemu_coroutine_self() != req->co); 477 478 /* If the request is already (indirectly) waiting for us, or 479 * will wait for us as soon as it wakes up, then just go on 480 * (instead of producing a deadlock in the former case). */ 481 if (!req->waiting_for) { 482 self->waiting_for = req; 483 qemu_co_queue_wait(&req->wait_queue); 484 self->waiting_for = NULL; 485 retry = true; 486 waited = true; 487 break; 488 } 489 } 490 } 491 } while (retry); 492 493 return waited; 494 } 495 496 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 497 size_t size) 498 { 499 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { 500 return -EIO; 501 } 502 503 if (!bdrv_is_inserted(bs)) { 504 return -ENOMEDIUM; 505 } 506 507 if (offset < 0) { 508 return -EIO; 509 } 510 511 return 0; 512 } 513 514 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 515 int nb_sectors) 516 { 517 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 518 return -EIO; 519 } 520 521 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 522 nb_sectors * BDRV_SECTOR_SIZE); 523 } 524 525 typedef struct RwCo { 526 BlockDriverState *bs; 527 int64_t offset; 528 QEMUIOVector *qiov; 529 bool is_write; 530 int ret; 531 BdrvRequestFlags flags; 532 } RwCo; 533 534 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 535 { 536 RwCo *rwco = opaque; 537 538 if (!rwco->is_write) { 539 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, 540 rwco->qiov->size, rwco->qiov, 541 rwco->flags); 542 } else { 543 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, 544 rwco->qiov->size, rwco->qiov, 545 rwco->flags); 546 } 547 } 548 549 /* 550 * Process a vectored synchronous request using coroutines 551 */ 552 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 553 QEMUIOVector *qiov, bool is_write, 554 BdrvRequestFlags flags) 555 { 556 Coroutine *co; 557 RwCo rwco = { 558 .bs = bs, 559 .offset = offset, 560 .qiov = qiov, 561 .is_write = is_write, 562 .ret = NOT_DONE, 563 .flags = flags, 564 }; 565 566 /** 567 * In sync call context, when the vcpu is blocked, this throttling timer 568 * will not fire; so the I/O throttling function has to be disabled here 569 * if it has been enabled. 570 */ 571 if (bs->io_limits_enabled) { 572 fprintf(stderr, "Disabling I/O throttling on '%s' due " 573 "to synchronous I/O.\n", bdrv_get_device_name(bs)); 574 bdrv_io_limits_disable(bs); 575 } 576 577 if (qemu_in_coroutine()) { 578 /* Fast-path if already in coroutine context */ 579 bdrv_rw_co_entry(&rwco); 580 } else { 581 AioContext *aio_context = bdrv_get_aio_context(bs); 582 583 co = qemu_coroutine_create(bdrv_rw_co_entry); 584 qemu_coroutine_enter(co, &rwco); 585 while (rwco.ret == NOT_DONE) { 586 aio_poll(aio_context, true); 587 } 588 } 589 return rwco.ret; 590 } 591 592 /* 593 * Process a synchronous request using coroutines 594 */ 595 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 596 int nb_sectors, bool is_write, BdrvRequestFlags flags) 597 { 598 QEMUIOVector qiov; 599 struct iovec iov = { 600 .iov_base = (void *)buf, 601 .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 602 }; 603 604 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 605 return -EINVAL; 606 } 607 608 qemu_iovec_init_external(&qiov, &iov, 1); 609 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 610 &qiov, is_write, flags); 611 } 612 613 /* return < 0 if error. See bdrv_write() for the return codes */ 614 int bdrv_read(BlockDriverState *bs, int64_t sector_num, 615 uint8_t *buf, int nb_sectors) 616 { 617 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 618 } 619 620 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */ 621 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, 622 uint8_t *buf, int nb_sectors) 623 { 624 bool enabled; 625 int ret; 626 627 enabled = bs->io_limits_enabled; 628 bs->io_limits_enabled = false; 629 ret = bdrv_read(bs, sector_num, buf, nb_sectors); 630 bs->io_limits_enabled = enabled; 631 return ret; 632 } 633 634 /* Return < 0 if error. Important errors are: 635 -EIO generic I/O error (may happen for all errors) 636 -ENOMEDIUM No media inserted. 637 -EINVAL Invalid sector number or nb_sectors 638 -EACCES Trying to write a read-only device 639 */ 640 int bdrv_write(BlockDriverState *bs, int64_t sector_num, 641 const uint8_t *buf, int nb_sectors) 642 { 643 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 644 } 645 646 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, 647 int nb_sectors, BdrvRequestFlags flags) 648 { 649 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, 650 BDRV_REQ_ZERO_WRITE | flags); 651 } 652 653 /* 654 * Completely zero out a block device with the help of bdrv_write_zeroes. 655 * The operation is sped up by checking the block status and only writing 656 * zeroes to the device if they currently do not return zeroes. Optional 657 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). 658 * 659 * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 660 */ 661 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 662 { 663 int64_t target_sectors, ret, nb_sectors, sector_num = 0; 664 int n; 665 666 target_sectors = bdrv_nb_sectors(bs); 667 if (target_sectors < 0) { 668 return target_sectors; 669 } 670 671 for (;;) { 672 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); 673 if (nb_sectors <= 0) { 674 return 0; 675 } 676 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); 677 if (ret < 0) { 678 error_report("error getting block status at sector %" PRId64 ": %s", 679 sector_num, strerror(-ret)); 680 return ret; 681 } 682 if (ret & BDRV_BLOCK_ZERO) { 683 sector_num += n; 684 continue; 685 } 686 ret = bdrv_write_zeroes(bs, sector_num, n, flags); 687 if (ret < 0) { 688 error_report("error writing zeroes at sector %" PRId64 ": %s", 689 sector_num, strerror(-ret)); 690 return ret; 691 } 692 sector_num += n; 693 } 694 } 695 696 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) 697 { 698 QEMUIOVector qiov; 699 struct iovec iov = { 700 .iov_base = (void *)buf, 701 .iov_len = bytes, 702 }; 703 int ret; 704 705 if (bytes < 0) { 706 return -EINVAL; 707 } 708 709 qemu_iovec_init_external(&qiov, &iov, 1); 710 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); 711 if (ret < 0) { 712 return ret; 713 } 714 715 return bytes; 716 } 717 718 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 719 { 720 int ret; 721 722 ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 723 if (ret < 0) { 724 return ret; 725 } 726 727 return qiov->size; 728 } 729 730 int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 731 const void *buf, int bytes) 732 { 733 QEMUIOVector qiov; 734 struct iovec iov = { 735 .iov_base = (void *) buf, 736 .iov_len = bytes, 737 }; 738 739 if (bytes < 0) { 740 return -EINVAL; 741 } 742 743 qemu_iovec_init_external(&qiov, &iov, 1); 744 return bdrv_pwritev(bs, offset, &qiov); 745 } 746 747 /* 748 * Writes to the file and ensures that no writes are reordered across this 749 * request (acts as a barrier) 750 * 751 * Returns 0 on success, -errno in error cases. 752 */ 753 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 754 const void *buf, int count) 755 { 756 int ret; 757 758 ret = bdrv_pwrite(bs, offset, buf, count); 759 if (ret < 0) { 760 return ret; 761 } 762 763 /* No flush needed for cache modes that already do it */ 764 if (bs->enable_write_cache) { 765 bdrv_flush(bs); 766 } 767 768 return 0; 769 } 770 771 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 772 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 773 { 774 /* Perform I/O through a temporary buffer so that users who scribble over 775 * their read buffer while the operation is in progress do not end up 776 * modifying the image file. This is critical for zero-copy guest I/O 777 * where anything might happen inside guest memory. 778 */ 779 void *bounce_buffer; 780 781 BlockDriver *drv = bs->drv; 782 struct iovec iov; 783 QEMUIOVector bounce_qiov; 784 int64_t cluster_sector_num; 785 int cluster_nb_sectors; 786 size_t skip_bytes; 787 int ret; 788 789 /* Cover entire cluster so no additional backing file I/O is required when 790 * allocating cluster in the image file. 791 */ 792 bdrv_round_to_clusters(bs, sector_num, nb_sectors, 793 &cluster_sector_num, &cluster_nb_sectors); 794 795 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 796 cluster_sector_num, cluster_nb_sectors); 797 798 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 799 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); 800 if (bounce_buffer == NULL) { 801 ret = -ENOMEM; 802 goto err; 803 } 804 805 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 806 807 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, 808 &bounce_qiov); 809 if (ret < 0) { 810 goto err; 811 } 812 813 if (drv->bdrv_co_write_zeroes && 814 buffer_is_zero(bounce_buffer, iov.iov_len)) { 815 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, 816 cluster_nb_sectors, 0); 817 } else { 818 /* This does not change the data on the disk, it is not necessary 819 * to flush even in cache=writethrough mode. 820 */ 821 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, 822 &bounce_qiov); 823 } 824 825 if (ret < 0) { 826 /* It might be okay to ignore write errors for guest requests. If this 827 * is a deliberate copy-on-read then we don't want to ignore the error. 828 * Simply report it in all cases. 829 */ 830 goto err; 831 } 832 833 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 834 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, 835 nb_sectors * BDRV_SECTOR_SIZE); 836 837 err: 838 qemu_vfree(bounce_buffer); 839 return ret; 840 } 841 842 /* 843 * Forwards an already correctly aligned request to the BlockDriver. This 844 * handles copy on read and zeroing after EOF; any other features must be 845 * implemented by the caller. 846 */ 847 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 848 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 849 int64_t align, QEMUIOVector *qiov, int flags) 850 { 851 BlockDriver *drv = bs->drv; 852 int ret; 853 854 int64_t sector_num = offset >> BDRV_SECTOR_BITS; 855 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 856 857 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 858 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 859 assert(!qiov || bytes == qiov->size); 860 861 /* Handle Copy on Read and associated serialisation */ 862 if (flags & BDRV_REQ_COPY_ON_READ) { 863 /* If we touch the same cluster it counts as an overlap. This 864 * guarantees that allocating writes will be serialized and not race 865 * with each other for the same cluster. For example, in copy-on-read 866 * it ensures that the CoR read and write operations are atomic and 867 * guest writes cannot interleave between them. */ 868 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 869 } 870 871 wait_serialising_requests(req); 872 873 if (flags & BDRV_REQ_COPY_ON_READ) { 874 int pnum; 875 876 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); 877 if (ret < 0) { 878 goto out; 879 } 880 881 if (!ret || pnum != nb_sectors) { 882 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 883 goto out; 884 } 885 } 886 887 /* Forward the request to the BlockDriver */ 888 if (!bs->zero_beyond_eof) { 889 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 890 } else { 891 /* Read zeros after EOF */ 892 int64_t total_sectors, max_nb_sectors; 893 894 total_sectors = bdrv_nb_sectors(bs); 895 if (total_sectors < 0) { 896 ret = total_sectors; 897 goto out; 898 } 899 900 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), 901 align >> BDRV_SECTOR_BITS); 902 if (nb_sectors < max_nb_sectors) { 903 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 904 } else if (max_nb_sectors > 0) { 905 QEMUIOVector local_qiov; 906 907 qemu_iovec_init(&local_qiov, qiov->niov); 908 qemu_iovec_concat(&local_qiov, qiov, 0, 909 max_nb_sectors * BDRV_SECTOR_SIZE); 910 911 ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors, 912 &local_qiov); 913 914 qemu_iovec_destroy(&local_qiov); 915 } else { 916 ret = 0; 917 } 918 919 /* Reading beyond end of file is supposed to produce zeroes */ 920 if (ret == 0 && total_sectors < sector_num + nb_sectors) { 921 uint64_t offset = MAX(0, total_sectors - sector_num); 922 uint64_t bytes = (sector_num + nb_sectors - offset) * 923 BDRV_SECTOR_SIZE; 924 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); 925 } 926 } 927 928 out: 929 return ret; 930 } 931 932 static inline uint64_t bdrv_get_align(BlockDriverState *bs) 933 { 934 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 935 return MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 936 } 937 938 static inline bool bdrv_req_is_aligned(BlockDriverState *bs, 939 int64_t offset, size_t bytes) 940 { 941 int64_t align = bdrv_get_align(bs); 942 return !(offset & (align - 1) || (bytes & (align - 1))); 943 } 944 945 /* 946 * Handle a read request in coroutine context 947 */ 948 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 949 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 950 BdrvRequestFlags flags) 951 { 952 BlockDriver *drv = bs->drv; 953 BdrvTrackedRequest req; 954 955 uint64_t align = bdrv_get_align(bs); 956 uint8_t *head_buf = NULL; 957 uint8_t *tail_buf = NULL; 958 QEMUIOVector local_qiov; 959 bool use_local_qiov = false; 960 int ret; 961 962 if (!drv) { 963 return -ENOMEDIUM; 964 } 965 966 ret = bdrv_check_byte_request(bs, offset, bytes); 967 if (ret < 0) { 968 return ret; 969 } 970 971 if (bs->copy_on_read) { 972 flags |= BDRV_REQ_COPY_ON_READ; 973 } 974 975 /* throttling disk I/O */ 976 if (bs->io_limits_enabled) { 977 bdrv_io_limits_intercept(bs, bytes, false); 978 } 979 980 /* Align read if necessary by padding qiov */ 981 if (offset & (align - 1)) { 982 head_buf = qemu_blockalign(bs, align); 983 qemu_iovec_init(&local_qiov, qiov->niov + 2); 984 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 985 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 986 use_local_qiov = true; 987 988 bytes += offset & (align - 1); 989 offset = offset & ~(align - 1); 990 } 991 992 if ((offset + bytes) & (align - 1)) { 993 if (!use_local_qiov) { 994 qemu_iovec_init(&local_qiov, qiov->niov + 1); 995 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 996 use_local_qiov = true; 997 } 998 tail_buf = qemu_blockalign(bs, align); 999 qemu_iovec_add(&local_qiov, tail_buf, 1000 align - ((offset + bytes) & (align - 1))); 1001 1002 bytes = ROUND_UP(bytes, align); 1003 } 1004 1005 tracked_request_begin(&req, bs, offset, bytes, false); 1006 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 1007 use_local_qiov ? &local_qiov : qiov, 1008 flags); 1009 tracked_request_end(&req); 1010 1011 if (use_local_qiov) { 1012 qemu_iovec_destroy(&local_qiov); 1013 qemu_vfree(head_buf); 1014 qemu_vfree(tail_buf); 1015 } 1016 1017 return ret; 1018 } 1019 1020 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 1021 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 1022 BdrvRequestFlags flags) 1023 { 1024 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 1025 return -EINVAL; 1026 } 1027 1028 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, 1029 nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 1030 } 1031 1032 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 1033 int nb_sectors, QEMUIOVector *qiov) 1034 { 1035 trace_bdrv_co_readv(bs, sector_num, nb_sectors); 1036 1037 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 1038 } 1039 1040 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, 1041 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 1042 { 1043 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); 1044 1045 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 1046 BDRV_REQ_COPY_ON_READ); 1047 } 1048 1049 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768 1050 1051 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 1052 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 1053 { 1054 BlockDriver *drv = bs->drv; 1055 QEMUIOVector qiov; 1056 struct iovec iov = {0}; 1057 int ret = 0; 1058 1059 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes, 1060 BDRV_REQUEST_MAX_SECTORS); 1061 1062 while (nb_sectors > 0 && !ret) { 1063 int num = nb_sectors; 1064 1065 /* Align request. Block drivers can expect the "bulk" of the request 1066 * to be aligned. 1067 */ 1068 if (bs->bl.write_zeroes_alignment 1069 && num > bs->bl.write_zeroes_alignment) { 1070 if (sector_num % bs->bl.write_zeroes_alignment != 0) { 1071 /* Make a small request up to the first aligned sector. */ 1072 num = bs->bl.write_zeroes_alignment; 1073 num -= sector_num % bs->bl.write_zeroes_alignment; 1074 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { 1075 /* Shorten the request to the last aligned sector. num cannot 1076 * underflow because num > bs->bl.write_zeroes_alignment. 1077 */ 1078 num -= (sector_num + num) % bs->bl.write_zeroes_alignment; 1079 } 1080 } 1081 1082 /* limit request size */ 1083 if (num > max_write_zeroes) { 1084 num = max_write_zeroes; 1085 } 1086 1087 ret = -ENOTSUP; 1088 /* First try the efficient write zeroes operation */ 1089 if (drv->bdrv_co_write_zeroes) { 1090 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); 1091 } 1092 1093 if (ret == -ENOTSUP) { 1094 /* Fall back to bounce buffer if write zeroes is unsupported */ 1095 int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, 1096 MAX_WRITE_ZEROES_BOUNCE_BUFFER); 1097 num = MIN(num, max_xfer_len); 1098 iov.iov_len = num * BDRV_SECTOR_SIZE; 1099 if (iov.iov_base == NULL) { 1100 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE); 1101 if (iov.iov_base == NULL) { 1102 ret = -ENOMEM; 1103 goto fail; 1104 } 1105 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); 1106 } 1107 qemu_iovec_init_external(&qiov, &iov, 1); 1108 1109 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); 1110 1111 /* Keep bounce buffer around if it is big enough for all 1112 * all future requests. 1113 */ 1114 if (num < max_xfer_len) { 1115 qemu_vfree(iov.iov_base); 1116 iov.iov_base = NULL; 1117 } 1118 } 1119 1120 sector_num += num; 1121 nb_sectors -= num; 1122 } 1123 1124 fail: 1125 qemu_vfree(iov.iov_base); 1126 return ret; 1127 } 1128 1129 /* 1130 * Forwards an already correctly aligned write request to the BlockDriver. 1131 */ 1132 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 1133 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 1134 QEMUIOVector *qiov, int flags) 1135 { 1136 BlockDriver *drv = bs->drv; 1137 bool waited; 1138 int ret; 1139 1140 int64_t sector_num = offset >> BDRV_SECTOR_BITS; 1141 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 1142 1143 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1144 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1145 assert(!qiov || bytes == qiov->size); 1146 1147 waited = wait_serialising_requests(req); 1148 assert(!waited || !req->serialising); 1149 assert(req->overlap_offset <= offset); 1150 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 1151 1152 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 1153 1154 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 1155 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && 1156 qemu_iovec_is_zero(qiov)) { 1157 flags |= BDRV_REQ_ZERO_WRITE; 1158 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 1159 flags |= BDRV_REQ_MAY_UNMAP; 1160 } 1161 } 1162 1163 if (ret < 0) { 1164 /* Do nothing, write notifier decided to fail this request */ 1165 } else if (flags & BDRV_REQ_ZERO_WRITE) { 1166 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO); 1167 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); 1168 } else { 1169 BLKDBG_EVENT(bs, BLKDBG_PWRITEV); 1170 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 1171 } 1172 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE); 1173 1174 if (ret == 0 && !bs->enable_write_cache) { 1175 ret = bdrv_co_flush(bs); 1176 } 1177 1178 bdrv_set_dirty(bs, sector_num, nb_sectors); 1179 1180 block_acct_highest_sector(&bs->stats, sector_num, nb_sectors); 1181 1182 if (ret >= 0) { 1183 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); 1184 } 1185 1186 return ret; 1187 } 1188 1189 /* 1190 * Handle a write request in coroutine context 1191 */ 1192 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 1193 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 1194 BdrvRequestFlags flags) 1195 { 1196 BdrvTrackedRequest req; 1197 uint64_t align = bdrv_get_align(bs); 1198 uint8_t *head_buf = NULL; 1199 uint8_t *tail_buf = NULL; 1200 QEMUIOVector local_qiov; 1201 bool use_local_qiov = false; 1202 int ret; 1203 1204 if (!bs->drv) { 1205 return -ENOMEDIUM; 1206 } 1207 if (bs->read_only) { 1208 return -EACCES; 1209 } 1210 1211 ret = bdrv_check_byte_request(bs, offset, bytes); 1212 if (ret < 0) { 1213 return ret; 1214 } 1215 1216 /* throttling disk I/O */ 1217 if (bs->io_limits_enabled) { 1218 bdrv_io_limits_intercept(bs, bytes, true); 1219 } 1220 1221 /* 1222 * Align write if necessary by performing a read-modify-write cycle. 1223 * Pad qiov with the read parts and be sure to have a tracked request not 1224 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 1225 */ 1226 tracked_request_begin(&req, bs, offset, bytes, true); 1227 1228 if (offset & (align - 1)) { 1229 QEMUIOVector head_qiov; 1230 struct iovec head_iov; 1231 1232 mark_request_serialising(&req, align); 1233 wait_serialising_requests(&req); 1234 1235 head_buf = qemu_blockalign(bs, align); 1236 head_iov = (struct iovec) { 1237 .iov_base = head_buf, 1238 .iov_len = align, 1239 }; 1240 qemu_iovec_init_external(&head_qiov, &head_iov, 1); 1241 1242 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); 1243 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 1244 align, &head_qiov, 0); 1245 if (ret < 0) { 1246 goto fail; 1247 } 1248 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 1249 1250 qemu_iovec_init(&local_qiov, qiov->niov + 2); 1251 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 1252 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1253 use_local_qiov = true; 1254 1255 bytes += offset & (align - 1); 1256 offset = offset & ~(align - 1); 1257 } 1258 1259 if ((offset + bytes) & (align - 1)) { 1260 QEMUIOVector tail_qiov; 1261 struct iovec tail_iov; 1262 size_t tail_bytes; 1263 bool waited; 1264 1265 mark_request_serialising(&req, align); 1266 waited = wait_serialising_requests(&req); 1267 assert(!waited || !use_local_qiov); 1268 1269 tail_buf = qemu_blockalign(bs, align); 1270 tail_iov = (struct iovec) { 1271 .iov_base = tail_buf, 1272 .iov_len = align, 1273 }; 1274 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 1275 1276 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); 1277 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 1278 align, &tail_qiov, 0); 1279 if (ret < 0) { 1280 goto fail; 1281 } 1282 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 1283 1284 if (!use_local_qiov) { 1285 qemu_iovec_init(&local_qiov, qiov->niov + 1); 1286 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 1287 use_local_qiov = true; 1288 } 1289 1290 tail_bytes = (offset + bytes) & (align - 1); 1291 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 1292 1293 bytes = ROUND_UP(bytes, align); 1294 } 1295 1296 if (use_local_qiov) { 1297 /* Local buffer may have non-zero data. */ 1298 flags &= ~BDRV_REQ_ZERO_WRITE; 1299 } 1300 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, 1301 use_local_qiov ? &local_qiov : qiov, 1302 flags); 1303 1304 fail: 1305 tracked_request_end(&req); 1306 1307 if (use_local_qiov) { 1308 qemu_iovec_destroy(&local_qiov); 1309 } 1310 qemu_vfree(head_buf); 1311 qemu_vfree(tail_buf); 1312 1313 return ret; 1314 } 1315 1316 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 1317 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 1318 BdrvRequestFlags flags) 1319 { 1320 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 1321 return -EINVAL; 1322 } 1323 1324 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, 1325 nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 1326 } 1327 1328 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 1329 int nb_sectors, QEMUIOVector *qiov) 1330 { 1331 trace_bdrv_co_writev(bs, sector_num, nb_sectors); 1332 1333 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 1334 } 1335 1336 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 1337 int64_t sector_num, int nb_sectors, 1338 BdrvRequestFlags flags) 1339 { 1340 int ret; 1341 1342 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); 1343 1344 if (!(bs->open_flags & BDRV_O_UNMAP)) { 1345 flags &= ~BDRV_REQ_MAY_UNMAP; 1346 } 1347 if (bdrv_req_is_aligned(bs, sector_num << BDRV_SECTOR_BITS, 1348 nb_sectors << BDRV_SECTOR_BITS)) { 1349 ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 1350 BDRV_REQ_ZERO_WRITE | flags); 1351 } else { 1352 uint8_t *buf; 1353 QEMUIOVector local_qiov; 1354 size_t bytes = nb_sectors << BDRV_SECTOR_BITS; 1355 1356 buf = qemu_memalign(bdrv_opt_mem_align(bs), bytes); 1357 memset(buf, 0, bytes); 1358 qemu_iovec_init(&local_qiov, 1); 1359 qemu_iovec_add(&local_qiov, buf, bytes); 1360 1361 ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, &local_qiov, 1362 BDRV_REQ_ZERO_WRITE | flags); 1363 qemu_vfree(buf); 1364 } 1365 return ret; 1366 } 1367 1368 int bdrv_flush_all(void) 1369 { 1370 BlockDriverState *bs = NULL; 1371 int result = 0; 1372 1373 while ((bs = bdrv_next(bs))) { 1374 AioContext *aio_context = bdrv_get_aio_context(bs); 1375 int ret; 1376 1377 aio_context_acquire(aio_context); 1378 ret = bdrv_flush(bs); 1379 if (ret < 0 && !result) { 1380 result = ret; 1381 } 1382 aio_context_release(aio_context); 1383 } 1384 1385 return result; 1386 } 1387 1388 typedef struct BdrvCoGetBlockStatusData { 1389 BlockDriverState *bs; 1390 BlockDriverState *base; 1391 int64_t sector_num; 1392 int nb_sectors; 1393 int *pnum; 1394 int64_t ret; 1395 bool done; 1396 } BdrvCoGetBlockStatusData; 1397 1398 /* 1399 * Returns the allocation status of the specified sectors. 1400 * Drivers not implementing the functionality are assumed to not support 1401 * backing files, hence all their sectors are reported as allocated. 1402 * 1403 * If 'sector_num' is beyond the end of the disk image the return value is 0 1404 * and 'pnum' is set to 0. 1405 * 1406 * 'pnum' is set to the number of sectors (including and immediately following 1407 * the specified sector) that are known to be in the same 1408 * allocated/unallocated state. 1409 * 1410 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 1411 * beyond the end of the disk image it will be clamped. 1412 */ 1413 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 1414 int64_t sector_num, 1415 int nb_sectors, int *pnum) 1416 { 1417 int64_t total_sectors; 1418 int64_t n; 1419 int64_t ret, ret2; 1420 1421 total_sectors = bdrv_nb_sectors(bs); 1422 if (total_sectors < 0) { 1423 return total_sectors; 1424 } 1425 1426 if (sector_num >= total_sectors) { 1427 *pnum = 0; 1428 return 0; 1429 } 1430 1431 n = total_sectors - sector_num; 1432 if (n < nb_sectors) { 1433 nb_sectors = n; 1434 } 1435 1436 if (!bs->drv->bdrv_co_get_block_status) { 1437 *pnum = nb_sectors; 1438 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 1439 if (bs->drv->protocol_name) { 1440 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 1441 } 1442 return ret; 1443 } 1444 1445 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); 1446 if (ret < 0) { 1447 *pnum = 0; 1448 return ret; 1449 } 1450 1451 if (ret & BDRV_BLOCK_RAW) { 1452 assert(ret & BDRV_BLOCK_OFFSET_VALID); 1453 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 1454 *pnum, pnum); 1455 } 1456 1457 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 1458 ret |= BDRV_BLOCK_ALLOCATED; 1459 } 1460 1461 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) { 1462 if (bdrv_unallocated_blocks_are_zero(bs)) { 1463 ret |= BDRV_BLOCK_ZERO; 1464 } else if (bs->backing_hd) { 1465 BlockDriverState *bs2 = bs->backing_hd; 1466 int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 1467 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 1468 ret |= BDRV_BLOCK_ZERO; 1469 } 1470 } 1471 } 1472 1473 if (bs->file && 1474 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 1475 (ret & BDRV_BLOCK_OFFSET_VALID)) { 1476 int file_pnum; 1477 1478 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 1479 *pnum, &file_pnum); 1480 if (ret2 >= 0) { 1481 /* Ignore errors. This is just providing extra information, it 1482 * is useful but not necessary. 1483 */ 1484 if (!file_pnum) { 1485 /* !file_pnum indicates an offset at or beyond the EOF; it is 1486 * perfectly valid for the format block driver to point to such 1487 * offsets, so catch it and mark everything as zero */ 1488 ret |= BDRV_BLOCK_ZERO; 1489 } else { 1490 /* Limit request to the range reported by the protocol driver */ 1491 *pnum = file_pnum; 1492 ret |= (ret2 & BDRV_BLOCK_ZERO); 1493 } 1494 } 1495 } 1496 1497 return ret; 1498 } 1499 1500 /* Coroutine wrapper for bdrv_get_block_status() */ 1501 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) 1502 { 1503 BdrvCoGetBlockStatusData *data = opaque; 1504 BlockDriverState *bs = data->bs; 1505 1506 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, 1507 data->pnum); 1508 data->done = true; 1509 } 1510 1511 /* 1512 * Synchronous wrapper around bdrv_co_get_block_status(). 1513 * 1514 * See bdrv_co_get_block_status() for details. 1515 */ 1516 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, 1517 int nb_sectors, int *pnum) 1518 { 1519 Coroutine *co; 1520 BdrvCoGetBlockStatusData data = { 1521 .bs = bs, 1522 .sector_num = sector_num, 1523 .nb_sectors = nb_sectors, 1524 .pnum = pnum, 1525 .done = false, 1526 }; 1527 1528 if (qemu_in_coroutine()) { 1529 /* Fast-path if already in coroutine context */ 1530 bdrv_get_block_status_co_entry(&data); 1531 } else { 1532 AioContext *aio_context = bdrv_get_aio_context(bs); 1533 1534 co = qemu_coroutine_create(bdrv_get_block_status_co_entry); 1535 qemu_coroutine_enter(co, &data); 1536 while (!data.done) { 1537 aio_poll(aio_context, true); 1538 } 1539 } 1540 return data.ret; 1541 } 1542 1543 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 1544 int nb_sectors, int *pnum) 1545 { 1546 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); 1547 if (ret < 0) { 1548 return ret; 1549 } 1550 return !!(ret & BDRV_BLOCK_ALLOCATED); 1551 } 1552 1553 /* 1554 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 1555 * 1556 * Return true if the given sector is allocated in any image between 1557 * BASE and TOP (inclusive). BASE can be NULL to check if the given 1558 * sector is allocated in any image of the chain. Return false otherwise. 1559 * 1560 * 'pnum' is set to the number of sectors (including and immediately following 1561 * the specified sector) that are known to be in the same 1562 * allocated/unallocated state. 1563 * 1564 */ 1565 int bdrv_is_allocated_above(BlockDriverState *top, 1566 BlockDriverState *base, 1567 int64_t sector_num, 1568 int nb_sectors, int *pnum) 1569 { 1570 BlockDriverState *intermediate; 1571 int ret, n = nb_sectors; 1572 1573 intermediate = top; 1574 while (intermediate && intermediate != base) { 1575 int pnum_inter; 1576 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 1577 &pnum_inter); 1578 if (ret < 0) { 1579 return ret; 1580 } else if (ret) { 1581 *pnum = pnum_inter; 1582 return 1; 1583 } 1584 1585 /* 1586 * [sector_num, nb_sectors] is unallocated on top but intermediate 1587 * might have 1588 * 1589 * [sector_num+x, nr_sectors] allocated. 1590 */ 1591 if (n > pnum_inter && 1592 (intermediate == top || 1593 sector_num + pnum_inter < intermediate->total_sectors)) { 1594 n = pnum_inter; 1595 } 1596 1597 intermediate = intermediate->backing_hd; 1598 } 1599 1600 *pnum = n; 1601 return 0; 1602 } 1603 1604 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 1605 const uint8_t *buf, int nb_sectors) 1606 { 1607 BlockDriver *drv = bs->drv; 1608 int ret; 1609 1610 if (!drv) { 1611 return -ENOMEDIUM; 1612 } 1613 if (!drv->bdrv_write_compressed) { 1614 return -ENOTSUP; 1615 } 1616 ret = bdrv_check_request(bs, sector_num, nb_sectors); 1617 if (ret < 0) { 1618 return ret; 1619 } 1620 1621 assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 1622 1623 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 1624 } 1625 1626 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 1627 int64_t pos, int size) 1628 { 1629 QEMUIOVector qiov; 1630 struct iovec iov = { 1631 .iov_base = (void *) buf, 1632 .iov_len = size, 1633 }; 1634 1635 qemu_iovec_init_external(&qiov, &iov, 1); 1636 return bdrv_writev_vmstate(bs, &qiov, pos); 1637 } 1638 1639 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 1640 { 1641 BlockDriver *drv = bs->drv; 1642 1643 if (!drv) { 1644 return -ENOMEDIUM; 1645 } else if (drv->bdrv_save_vmstate) { 1646 return drv->bdrv_save_vmstate(bs, qiov, pos); 1647 } else if (bs->file) { 1648 return bdrv_writev_vmstate(bs->file, qiov, pos); 1649 } 1650 1651 return -ENOTSUP; 1652 } 1653 1654 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 1655 int64_t pos, int size) 1656 { 1657 BlockDriver *drv = bs->drv; 1658 if (!drv) 1659 return -ENOMEDIUM; 1660 if (drv->bdrv_load_vmstate) 1661 return drv->bdrv_load_vmstate(bs, buf, pos, size); 1662 if (bs->file) 1663 return bdrv_load_vmstate(bs->file, buf, pos, size); 1664 return -ENOTSUP; 1665 } 1666 1667 /**************************************************************/ 1668 /* async I/Os */ 1669 1670 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 1671 QEMUIOVector *qiov, int nb_sectors, 1672 BlockCompletionFunc *cb, void *opaque) 1673 { 1674 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 1675 1676 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 1677 cb, opaque, false); 1678 } 1679 1680 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 1681 QEMUIOVector *qiov, int nb_sectors, 1682 BlockCompletionFunc *cb, void *opaque) 1683 { 1684 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 1685 1686 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 1687 cb, opaque, true); 1688 } 1689 1690 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, 1691 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, 1692 BlockCompletionFunc *cb, void *opaque) 1693 { 1694 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); 1695 1696 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, 1697 BDRV_REQ_ZERO_WRITE | flags, 1698 cb, opaque, true); 1699 } 1700 1701 1702 typedef struct MultiwriteCB { 1703 int error; 1704 int num_requests; 1705 int num_callbacks; 1706 struct { 1707 BlockCompletionFunc *cb; 1708 void *opaque; 1709 QEMUIOVector *free_qiov; 1710 } callbacks[]; 1711 } MultiwriteCB; 1712 1713 static void multiwrite_user_cb(MultiwriteCB *mcb) 1714 { 1715 int i; 1716 1717 for (i = 0; i < mcb->num_callbacks; i++) { 1718 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); 1719 if (mcb->callbacks[i].free_qiov) { 1720 qemu_iovec_destroy(mcb->callbacks[i].free_qiov); 1721 } 1722 g_free(mcb->callbacks[i].free_qiov); 1723 } 1724 } 1725 1726 static void multiwrite_cb(void *opaque, int ret) 1727 { 1728 MultiwriteCB *mcb = opaque; 1729 1730 trace_multiwrite_cb(mcb, ret); 1731 1732 if (ret < 0 && !mcb->error) { 1733 mcb->error = ret; 1734 } 1735 1736 mcb->num_requests--; 1737 if (mcb->num_requests == 0) { 1738 multiwrite_user_cb(mcb); 1739 g_free(mcb); 1740 } 1741 } 1742 1743 static int multiwrite_req_compare(const void *a, const void *b) 1744 { 1745 const BlockRequest *req1 = a, *req2 = b; 1746 1747 /* 1748 * Note that we can't simply subtract req2->sector from req1->sector 1749 * here as that could overflow the return value. 1750 */ 1751 if (req1->sector > req2->sector) { 1752 return 1; 1753 } else if (req1->sector < req2->sector) { 1754 return -1; 1755 } else { 1756 return 0; 1757 } 1758 } 1759 1760 /* 1761 * Takes a bunch of requests and tries to merge them. Returns the number of 1762 * requests that remain after merging. 1763 */ 1764 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, 1765 int num_reqs, MultiwriteCB *mcb) 1766 { 1767 int i, outidx; 1768 1769 // Sort requests by start sector 1770 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); 1771 1772 // Check if adjacent requests touch the same clusters. If so, combine them, 1773 // filling up gaps with zero sectors. 1774 outidx = 0; 1775 for (i = 1; i < num_reqs; i++) { 1776 int merge = 0; 1777 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; 1778 1779 // Handle exactly sequential writes and overlapping writes. 1780 if (reqs[i].sector <= oldreq_last) { 1781 merge = 1; 1782 } 1783 1784 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { 1785 merge = 0; 1786 } 1787 1788 if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors + 1789 reqs[i].nb_sectors > bs->bl.max_transfer_length) { 1790 merge = 0; 1791 } 1792 1793 if (merge) { 1794 size_t size; 1795 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); 1796 qemu_iovec_init(qiov, 1797 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); 1798 1799 // Add the first request to the merged one. If the requests are 1800 // overlapping, drop the last sectors of the first request. 1801 size = (reqs[i].sector - reqs[outidx].sector) << 9; 1802 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); 1803 1804 // We should need to add any zeros between the two requests 1805 assert (reqs[i].sector <= oldreq_last); 1806 1807 // Add the second request 1808 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); 1809 1810 // Add tail of first request, if necessary 1811 if (qiov->size < reqs[outidx].qiov->size) { 1812 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, 1813 reqs[outidx].qiov->size - qiov->size); 1814 } 1815 1816 reqs[outidx].nb_sectors = qiov->size >> 9; 1817 reqs[outidx].qiov = qiov; 1818 1819 mcb->callbacks[i].free_qiov = reqs[outidx].qiov; 1820 } else { 1821 outidx++; 1822 reqs[outidx].sector = reqs[i].sector; 1823 reqs[outidx].nb_sectors = reqs[i].nb_sectors; 1824 reqs[outidx].qiov = reqs[i].qiov; 1825 } 1826 } 1827 1828 block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1); 1829 1830 return outidx + 1; 1831 } 1832 1833 /* 1834 * Submit multiple AIO write requests at once. 1835 * 1836 * On success, the function returns 0 and all requests in the reqs array have 1837 * been submitted. In error case this function returns -1, and any of the 1838 * requests may or may not be submitted yet. In particular, this means that the 1839 * callback will be called for some of the requests, for others it won't. The 1840 * caller must check the error field of the BlockRequest to wait for the right 1841 * callbacks (if error != 0, no callback will be called). 1842 * 1843 * The implementation may modify the contents of the reqs array, e.g. to merge 1844 * requests. However, the fields opaque and error are left unmodified as they 1845 * are used to signal failure for a single request to the caller. 1846 */ 1847 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) 1848 { 1849 MultiwriteCB *mcb; 1850 int i; 1851 1852 /* don't submit writes if we don't have a medium */ 1853 if (bs->drv == NULL) { 1854 for (i = 0; i < num_reqs; i++) { 1855 reqs[i].error = -ENOMEDIUM; 1856 } 1857 return -1; 1858 } 1859 1860 if (num_reqs == 0) { 1861 return 0; 1862 } 1863 1864 // Create MultiwriteCB structure 1865 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); 1866 mcb->num_requests = 0; 1867 mcb->num_callbacks = num_reqs; 1868 1869 for (i = 0; i < num_reqs; i++) { 1870 mcb->callbacks[i].cb = reqs[i].cb; 1871 mcb->callbacks[i].opaque = reqs[i].opaque; 1872 } 1873 1874 // Check for mergable requests 1875 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); 1876 1877 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); 1878 1879 /* Run the aio requests. */ 1880 mcb->num_requests = num_reqs; 1881 for (i = 0; i < num_reqs; i++) { 1882 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, 1883 reqs[i].nb_sectors, reqs[i].flags, 1884 multiwrite_cb, mcb, 1885 true); 1886 } 1887 1888 return 0; 1889 } 1890 1891 void bdrv_aio_cancel(BlockAIOCB *acb) 1892 { 1893 qemu_aio_ref(acb); 1894 bdrv_aio_cancel_async(acb); 1895 while (acb->refcnt > 1) { 1896 if (acb->aiocb_info->get_aio_context) { 1897 aio_poll(acb->aiocb_info->get_aio_context(acb), true); 1898 } else if (acb->bs) { 1899 aio_poll(bdrv_get_aio_context(acb->bs), true); 1900 } else { 1901 abort(); 1902 } 1903 } 1904 qemu_aio_unref(acb); 1905 } 1906 1907 /* Async version of aio cancel. The caller is not blocked if the acb implements 1908 * cancel_async, otherwise we do nothing and let the request normally complete. 1909 * In either case the completion callback must be called. */ 1910 void bdrv_aio_cancel_async(BlockAIOCB *acb) 1911 { 1912 if (acb->aiocb_info->cancel_async) { 1913 acb->aiocb_info->cancel_async(acb); 1914 } 1915 } 1916 1917 /**************************************************************/ 1918 /* async block device emulation */ 1919 1920 typedef struct BlockAIOCBSync { 1921 BlockAIOCB common; 1922 QEMUBH *bh; 1923 int ret; 1924 /* vector translation state */ 1925 QEMUIOVector *qiov; 1926 uint8_t *bounce; 1927 int is_write; 1928 } BlockAIOCBSync; 1929 1930 static const AIOCBInfo bdrv_em_aiocb_info = { 1931 .aiocb_size = sizeof(BlockAIOCBSync), 1932 }; 1933 1934 static void bdrv_aio_bh_cb(void *opaque) 1935 { 1936 BlockAIOCBSync *acb = opaque; 1937 1938 if (!acb->is_write && acb->ret >= 0) { 1939 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); 1940 } 1941 qemu_vfree(acb->bounce); 1942 acb->common.cb(acb->common.opaque, acb->ret); 1943 qemu_bh_delete(acb->bh); 1944 acb->bh = NULL; 1945 qemu_aio_unref(acb); 1946 } 1947 1948 static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, 1949 int64_t sector_num, 1950 QEMUIOVector *qiov, 1951 int nb_sectors, 1952 BlockCompletionFunc *cb, 1953 void *opaque, 1954 int is_write) 1955 1956 { 1957 BlockAIOCBSync *acb; 1958 1959 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); 1960 acb->is_write = is_write; 1961 acb->qiov = qiov; 1962 acb->bounce = qemu_try_blockalign(bs, qiov->size); 1963 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); 1964 1965 if (acb->bounce == NULL) { 1966 acb->ret = -ENOMEM; 1967 } else if (is_write) { 1968 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); 1969 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); 1970 } else { 1971 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); 1972 } 1973 1974 qemu_bh_schedule(acb->bh); 1975 1976 return &acb->common; 1977 } 1978 1979 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 1980 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 1981 BlockCompletionFunc *cb, void *opaque) 1982 { 1983 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 1984 } 1985 1986 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 1987 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 1988 BlockCompletionFunc *cb, void *opaque) 1989 { 1990 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 1991 } 1992 1993 1994 typedef struct BlockAIOCBCoroutine { 1995 BlockAIOCB common; 1996 BlockRequest req; 1997 bool is_write; 1998 bool need_bh; 1999 bool *done; 2000 QEMUBH* bh; 2001 } BlockAIOCBCoroutine; 2002 2003 static const AIOCBInfo bdrv_em_co_aiocb_info = { 2004 .aiocb_size = sizeof(BlockAIOCBCoroutine), 2005 }; 2006 2007 static void bdrv_co_complete(BlockAIOCBCoroutine *acb) 2008 { 2009 if (!acb->need_bh) { 2010 acb->common.cb(acb->common.opaque, acb->req.error); 2011 qemu_aio_unref(acb); 2012 } 2013 } 2014 2015 static void bdrv_co_em_bh(void *opaque) 2016 { 2017 BlockAIOCBCoroutine *acb = opaque; 2018 2019 assert(!acb->need_bh); 2020 qemu_bh_delete(acb->bh); 2021 bdrv_co_complete(acb); 2022 } 2023 2024 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) 2025 { 2026 acb->need_bh = false; 2027 if (acb->req.error != -EINPROGRESS) { 2028 BlockDriverState *bs = acb->common.bs; 2029 2030 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 2031 qemu_bh_schedule(acb->bh); 2032 } 2033 } 2034 2035 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 2036 static void coroutine_fn bdrv_co_do_rw(void *opaque) 2037 { 2038 BlockAIOCBCoroutine *acb = opaque; 2039 BlockDriverState *bs = acb->common.bs; 2040 2041 if (!acb->is_write) { 2042 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 2043 acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 2044 } else { 2045 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 2046 acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 2047 } 2048 2049 bdrv_co_complete(acb); 2050 } 2051 2052 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 2053 int64_t sector_num, 2054 QEMUIOVector *qiov, 2055 int nb_sectors, 2056 BdrvRequestFlags flags, 2057 BlockCompletionFunc *cb, 2058 void *opaque, 2059 bool is_write) 2060 { 2061 Coroutine *co; 2062 BlockAIOCBCoroutine *acb; 2063 2064 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 2065 acb->need_bh = true; 2066 acb->req.error = -EINPROGRESS; 2067 acb->req.sector = sector_num; 2068 acb->req.nb_sectors = nb_sectors; 2069 acb->req.qiov = qiov; 2070 acb->req.flags = flags; 2071 acb->is_write = is_write; 2072 2073 co = qemu_coroutine_create(bdrv_co_do_rw); 2074 qemu_coroutine_enter(co, acb); 2075 2076 bdrv_co_maybe_schedule_bh(acb); 2077 return &acb->common; 2078 } 2079 2080 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 2081 { 2082 BlockAIOCBCoroutine *acb = opaque; 2083 BlockDriverState *bs = acb->common.bs; 2084 2085 acb->req.error = bdrv_co_flush(bs); 2086 bdrv_co_complete(acb); 2087 } 2088 2089 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, 2090 BlockCompletionFunc *cb, void *opaque) 2091 { 2092 trace_bdrv_aio_flush(bs, opaque); 2093 2094 Coroutine *co; 2095 BlockAIOCBCoroutine *acb; 2096 2097 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 2098 acb->need_bh = true; 2099 acb->req.error = -EINPROGRESS; 2100 2101 co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 2102 qemu_coroutine_enter(co, acb); 2103 2104 bdrv_co_maybe_schedule_bh(acb); 2105 return &acb->common; 2106 } 2107 2108 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 2109 { 2110 BlockAIOCBCoroutine *acb = opaque; 2111 BlockDriverState *bs = acb->common.bs; 2112 2113 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 2114 bdrv_co_complete(acb); 2115 } 2116 2117 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, 2118 int64_t sector_num, int nb_sectors, 2119 BlockCompletionFunc *cb, void *opaque) 2120 { 2121 Coroutine *co; 2122 BlockAIOCBCoroutine *acb; 2123 2124 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 2125 2126 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 2127 acb->need_bh = true; 2128 acb->req.error = -EINPROGRESS; 2129 acb->req.sector = sector_num; 2130 acb->req.nb_sectors = nb_sectors; 2131 co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 2132 qemu_coroutine_enter(co, acb); 2133 2134 bdrv_co_maybe_schedule_bh(acb); 2135 return &acb->common; 2136 } 2137 2138 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 2139 BlockCompletionFunc *cb, void *opaque) 2140 { 2141 BlockAIOCB *acb; 2142 2143 acb = g_slice_alloc(aiocb_info->aiocb_size); 2144 acb->aiocb_info = aiocb_info; 2145 acb->bs = bs; 2146 acb->cb = cb; 2147 acb->opaque = opaque; 2148 acb->refcnt = 1; 2149 return acb; 2150 } 2151 2152 void qemu_aio_ref(void *p) 2153 { 2154 BlockAIOCB *acb = p; 2155 acb->refcnt++; 2156 } 2157 2158 void qemu_aio_unref(void *p) 2159 { 2160 BlockAIOCB *acb = p; 2161 assert(acb->refcnt > 0); 2162 if (--acb->refcnt == 0) { 2163 g_slice_free1(acb->aiocb_info->aiocb_size, acb); 2164 } 2165 } 2166 2167 /**************************************************************/ 2168 /* Coroutine block device emulation */ 2169 2170 typedef struct CoroutineIOCompletion { 2171 Coroutine *coroutine; 2172 int ret; 2173 } CoroutineIOCompletion; 2174 2175 static void bdrv_co_io_em_complete(void *opaque, int ret) 2176 { 2177 CoroutineIOCompletion *co = opaque; 2178 2179 co->ret = ret; 2180 qemu_coroutine_enter(co->coroutine, NULL); 2181 } 2182 2183 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, 2184 int nb_sectors, QEMUIOVector *iov, 2185 bool is_write) 2186 { 2187 CoroutineIOCompletion co = { 2188 .coroutine = qemu_coroutine_self(), 2189 }; 2190 BlockAIOCB *acb; 2191 2192 if (is_write) { 2193 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, 2194 bdrv_co_io_em_complete, &co); 2195 } else { 2196 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, 2197 bdrv_co_io_em_complete, &co); 2198 } 2199 2200 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); 2201 if (!acb) { 2202 return -EIO; 2203 } 2204 qemu_coroutine_yield(); 2205 2206 return co.ret; 2207 } 2208 2209 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 2210 int64_t sector_num, int nb_sectors, 2211 QEMUIOVector *iov) 2212 { 2213 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); 2214 } 2215 2216 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 2217 int64_t sector_num, int nb_sectors, 2218 QEMUIOVector *iov) 2219 { 2220 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); 2221 } 2222 2223 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 2224 { 2225 RwCo *rwco = opaque; 2226 2227 rwco->ret = bdrv_co_flush(rwco->bs); 2228 } 2229 2230 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 2231 { 2232 int ret; 2233 2234 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { 2235 return 0; 2236 } 2237 2238 /* Write back cached data to the OS even with cache=unsafe */ 2239 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 2240 if (bs->drv->bdrv_co_flush_to_os) { 2241 ret = bs->drv->bdrv_co_flush_to_os(bs); 2242 if (ret < 0) { 2243 return ret; 2244 } 2245 } 2246 2247 /* But don't actually force it to the disk with cache=unsafe */ 2248 if (bs->open_flags & BDRV_O_NO_FLUSH) { 2249 goto flush_parent; 2250 } 2251 2252 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 2253 if (bs->drv->bdrv_co_flush_to_disk) { 2254 ret = bs->drv->bdrv_co_flush_to_disk(bs); 2255 } else if (bs->drv->bdrv_aio_flush) { 2256 BlockAIOCB *acb; 2257 CoroutineIOCompletion co = { 2258 .coroutine = qemu_coroutine_self(), 2259 }; 2260 2261 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 2262 if (acb == NULL) { 2263 ret = -EIO; 2264 } else { 2265 qemu_coroutine_yield(); 2266 ret = co.ret; 2267 } 2268 } else { 2269 /* 2270 * Some block drivers always operate in either writethrough or unsafe 2271 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 2272 * know how the server works (because the behaviour is hardcoded or 2273 * depends on server-side configuration), so we can't ensure that 2274 * everything is safe on disk. Returning an error doesn't work because 2275 * that would break guests even if the server operates in writethrough 2276 * mode. 2277 * 2278 * Let's hope the user knows what he's doing. 2279 */ 2280 ret = 0; 2281 } 2282 if (ret < 0) { 2283 return ret; 2284 } 2285 2286 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 2287 * in the case of cache=unsafe, so there are no useless flushes. 2288 */ 2289 flush_parent: 2290 return bdrv_co_flush(bs->file); 2291 } 2292 2293 int bdrv_flush(BlockDriverState *bs) 2294 { 2295 Coroutine *co; 2296 RwCo rwco = { 2297 .bs = bs, 2298 .ret = NOT_DONE, 2299 }; 2300 2301 if (qemu_in_coroutine()) { 2302 /* Fast-path if already in coroutine context */ 2303 bdrv_flush_co_entry(&rwco); 2304 } else { 2305 AioContext *aio_context = bdrv_get_aio_context(bs); 2306 2307 co = qemu_coroutine_create(bdrv_flush_co_entry); 2308 qemu_coroutine_enter(co, &rwco); 2309 while (rwco.ret == NOT_DONE) { 2310 aio_poll(aio_context, true); 2311 } 2312 } 2313 2314 return rwco.ret; 2315 } 2316 2317 typedef struct DiscardCo { 2318 BlockDriverState *bs; 2319 int64_t sector_num; 2320 int nb_sectors; 2321 int ret; 2322 } DiscardCo; 2323 static void coroutine_fn bdrv_discard_co_entry(void *opaque) 2324 { 2325 DiscardCo *rwco = opaque; 2326 2327 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 2328 } 2329 2330 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 2331 int nb_sectors) 2332 { 2333 int max_discard, ret; 2334 2335 if (!bs->drv) { 2336 return -ENOMEDIUM; 2337 } 2338 2339 ret = bdrv_check_request(bs, sector_num, nb_sectors); 2340 if (ret < 0) { 2341 return ret; 2342 } else if (bs->read_only) { 2343 return -EROFS; 2344 } 2345 2346 bdrv_reset_dirty(bs, sector_num, nb_sectors); 2347 2348 /* Do nothing if disabled. */ 2349 if (!(bs->open_flags & BDRV_O_UNMAP)) { 2350 return 0; 2351 } 2352 2353 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 2354 return 0; 2355 } 2356 2357 max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS); 2358 while (nb_sectors > 0) { 2359 int ret; 2360 int num = nb_sectors; 2361 2362 /* align request */ 2363 if (bs->bl.discard_alignment && 2364 num >= bs->bl.discard_alignment && 2365 sector_num % bs->bl.discard_alignment) { 2366 if (num > bs->bl.discard_alignment) { 2367 num = bs->bl.discard_alignment; 2368 } 2369 num -= sector_num % bs->bl.discard_alignment; 2370 } 2371 2372 /* limit request size */ 2373 if (num > max_discard) { 2374 num = max_discard; 2375 } 2376 2377 if (bs->drv->bdrv_co_discard) { 2378 ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 2379 } else { 2380 BlockAIOCB *acb; 2381 CoroutineIOCompletion co = { 2382 .coroutine = qemu_coroutine_self(), 2383 }; 2384 2385 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 2386 bdrv_co_io_em_complete, &co); 2387 if (acb == NULL) { 2388 return -EIO; 2389 } else { 2390 qemu_coroutine_yield(); 2391 ret = co.ret; 2392 } 2393 } 2394 if (ret && ret != -ENOTSUP) { 2395 return ret; 2396 } 2397 2398 sector_num += num; 2399 nb_sectors -= num; 2400 } 2401 return 0; 2402 } 2403 2404 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 2405 { 2406 Coroutine *co; 2407 DiscardCo rwco = { 2408 .bs = bs, 2409 .sector_num = sector_num, 2410 .nb_sectors = nb_sectors, 2411 .ret = NOT_DONE, 2412 }; 2413 2414 if (qemu_in_coroutine()) { 2415 /* Fast-path if already in coroutine context */ 2416 bdrv_discard_co_entry(&rwco); 2417 } else { 2418 AioContext *aio_context = bdrv_get_aio_context(bs); 2419 2420 co = qemu_coroutine_create(bdrv_discard_co_entry); 2421 qemu_coroutine_enter(co, &rwco); 2422 while (rwco.ret == NOT_DONE) { 2423 aio_poll(aio_context, true); 2424 } 2425 } 2426 2427 return rwco.ret; 2428 } 2429 2430 /* needed for generic scsi interface */ 2431 2432 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 2433 { 2434 BlockDriver *drv = bs->drv; 2435 2436 if (drv && drv->bdrv_ioctl) 2437 return drv->bdrv_ioctl(bs, req, buf); 2438 return -ENOTSUP; 2439 } 2440 2441 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 2442 unsigned long int req, void *buf, 2443 BlockCompletionFunc *cb, void *opaque) 2444 { 2445 BlockDriver *drv = bs->drv; 2446 2447 if (drv && drv->bdrv_aio_ioctl) 2448 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); 2449 return NULL; 2450 } 2451 2452 void *qemu_blockalign(BlockDriverState *bs, size_t size) 2453 { 2454 return qemu_memalign(bdrv_opt_mem_align(bs), size); 2455 } 2456 2457 void *qemu_blockalign0(BlockDriverState *bs, size_t size) 2458 { 2459 return memset(qemu_blockalign(bs, size), 0, size); 2460 } 2461 2462 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) 2463 { 2464 size_t align = bdrv_opt_mem_align(bs); 2465 2466 /* Ensure that NULL is never returned on success */ 2467 assert(align > 0); 2468 if (size == 0) { 2469 size = align; 2470 } 2471 2472 return qemu_try_memalign(align, size); 2473 } 2474 2475 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) 2476 { 2477 void *mem = qemu_try_blockalign(bs, size); 2478 2479 if (mem) { 2480 memset(mem, 0, size); 2481 } 2482 2483 return mem; 2484 } 2485 2486 /* 2487 * Check if all memory in this vector is sector aligned. 2488 */ 2489 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 2490 { 2491 int i; 2492 size_t alignment = bdrv_opt_mem_align(bs); 2493 2494 for (i = 0; i < qiov->niov; i++) { 2495 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 2496 return false; 2497 } 2498 if (qiov->iov[i].iov_len % alignment) { 2499 return false; 2500 } 2501 } 2502 2503 return true; 2504 } 2505 2506 void bdrv_add_before_write_notifier(BlockDriverState *bs, 2507 NotifierWithReturn *notifier) 2508 { 2509 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 2510 } 2511 2512 void bdrv_io_plug(BlockDriverState *bs) 2513 { 2514 BlockDriver *drv = bs->drv; 2515 if (drv && drv->bdrv_io_plug) { 2516 drv->bdrv_io_plug(bs); 2517 } else if (bs->file) { 2518 bdrv_io_plug(bs->file); 2519 } 2520 } 2521 2522 void bdrv_io_unplug(BlockDriverState *bs) 2523 { 2524 BlockDriver *drv = bs->drv; 2525 if (drv && drv->bdrv_io_unplug) { 2526 drv->bdrv_io_unplug(bs); 2527 } else if (bs->file) { 2528 bdrv_io_unplug(bs->file); 2529 } 2530 } 2531 2532 void bdrv_flush_io_queue(BlockDriverState *bs) 2533 { 2534 BlockDriver *drv = bs->drv; 2535 if (drv && drv->bdrv_flush_io_queue) { 2536 drv->bdrv_flush_io_queue(bs); 2537 } else if (bs->file) { 2538 bdrv_flush_io_queue(bs->file); 2539 } 2540 } 2541