1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #ifndef BLOCK_INT_H 25 #define BLOCK_INT_H 26 27 #include "block/accounting.h" 28 #include "block/block.h" 29 #include "block/throttle-groups.h" 30 #include "qemu/option.h" 31 #include "qemu/queue.h" 32 #include "qemu/coroutine.h" 33 #include "qemu/timer.h" 34 #include "qapi-types.h" 35 #include "qemu/hbitmap.h" 36 #include "block/snapshot.h" 37 #include "qemu/main-loop.h" 38 #include "qemu/throttle.h" 39 40 #define BLOCK_FLAG_ENCRYPT 1 41 #define BLOCK_FLAG_COMPAT6 4 42 #define BLOCK_FLAG_LAZY_REFCOUNTS 8 43 44 #define BLOCK_OPT_SIZE "size" 45 #define BLOCK_OPT_ENCRYPT "encryption" 46 #define BLOCK_OPT_COMPAT6 "compat6" 47 #define BLOCK_OPT_BACKING_FILE "backing_file" 48 #define BLOCK_OPT_BACKING_FMT "backing_fmt" 49 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size" 50 #define BLOCK_OPT_TABLE_SIZE "table_size" 51 #define BLOCK_OPT_PREALLOC "preallocation" 52 #define BLOCK_OPT_SUBFMT "subformat" 53 #define BLOCK_OPT_COMPAT_LEVEL "compat" 54 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" 55 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type" 56 #define BLOCK_OPT_REDUNDANCY "redundancy" 57 #define BLOCK_OPT_NOCOW "nocow" 58 #define BLOCK_OPT_OBJECT_SIZE "object_size" 59 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits" 60 61 #define BLOCK_PROBE_BUF_SIZE 512 62 63 typedef struct BdrvTrackedRequest { 64 BlockDriverState *bs; 65 int64_t offset; 66 unsigned int bytes; 67 bool is_write; 68 69 bool serialising; 70 int64_t overlap_offset; 71 unsigned int overlap_bytes; 72 73 QLIST_ENTRY(BdrvTrackedRequest) list; 74 Coroutine *co; /* owner, used for deadlock detection */ 75 CoQueue wait_queue; /* coroutines blocked on this request */ 76 77 struct BdrvTrackedRequest *waiting_for; 78 } BdrvTrackedRequest; 79 80 struct BlockDriver { 81 const char *format_name; 82 int instance_size; 83 84 /* set to true if the BlockDriver is a block filter */ 85 bool is_filter; 86 /* for snapshots block filter like Quorum can implement the 87 * following recursive callback. 88 * It's purpose is to recurse on the filter children while calling 89 * bdrv_recurse_is_first_non_filter on them. 90 * For a sample implementation look in the future Quorum block filter. 91 */ 92 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs, 93 BlockDriverState *candidate); 94 95 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); 96 int (*bdrv_probe_device)(const char *filename); 97 98 /* Any driver implementing this callback is expected to be able to handle 99 * NULL file names in its .bdrv_open() implementation */ 100 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); 101 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have 102 * this field set to true, except ones that are defined only by their 103 * child's bs. 104 * An example of the last type will be the quorum block driver. 105 */ 106 bool bdrv_needs_filename; 107 108 /* Set if a driver can support backing files */ 109 bool supports_backing; 110 111 /* For handling image reopen for split or non-split files */ 112 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, 113 BlockReopenQueue *queue, Error **errp); 114 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); 115 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); 116 117 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, 118 Error **errp); 119 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, 120 Error **errp); 121 int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num, 122 uint8_t *buf, int nb_sectors); 123 int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num, 124 const uint8_t *buf, int nb_sectors); 125 void (*bdrv_close)(BlockDriverState *bs); 126 int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp); 127 int (*bdrv_set_key)(BlockDriverState *bs, const char *key); 128 int (*bdrv_make_empty)(BlockDriverState *bs); 129 130 void (*bdrv_refresh_filename)(BlockDriverState *bs); 131 132 /* aio */ 133 BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs, 134 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 135 BlockCompletionFunc *cb, void *opaque); 136 BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs, 137 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 138 BlockCompletionFunc *cb, void *opaque); 139 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, 140 BlockCompletionFunc *cb, void *opaque); 141 BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs, 142 int64_t sector_num, int nb_sectors, 143 BlockCompletionFunc *cb, void *opaque); 144 145 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, 146 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 147 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, 148 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 149 /* 150 * Efficiently zero a region of the disk image. Typically an image format 151 * would use a compact metadata representation to implement this. This 152 * function pointer may be NULL and .bdrv_co_writev() will be called 153 * instead. 154 */ 155 int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs, 156 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 157 int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs, 158 int64_t sector_num, int nb_sectors); 159 int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs, 160 int64_t sector_num, int nb_sectors, int *pnum); 161 162 /* 163 * Invalidate any cached meta-data. 164 */ 165 void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp); 166 167 /* 168 * Flushes all data that was already written to the OS all the way down to 169 * the disk (for example raw-posix calls fsync()). 170 */ 171 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); 172 173 /* 174 * Flushes all internal caches to the OS. The data may still sit in a 175 * writeback cache of the host OS, but it will survive a crash of the qemu 176 * process. 177 */ 178 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); 179 180 const char *protocol_name; 181 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset); 182 183 int64_t (*bdrv_getlength)(BlockDriverState *bs); 184 bool has_variable_length; 185 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); 186 187 int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num, 188 const uint8_t *buf, int nb_sectors); 189 190 int (*bdrv_snapshot_create)(BlockDriverState *bs, 191 QEMUSnapshotInfo *sn_info); 192 int (*bdrv_snapshot_goto)(BlockDriverState *bs, 193 const char *snapshot_id); 194 int (*bdrv_snapshot_delete)(BlockDriverState *bs, 195 const char *snapshot_id, 196 const char *name, 197 Error **errp); 198 int (*bdrv_snapshot_list)(BlockDriverState *bs, 199 QEMUSnapshotInfo **psn_info); 200 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, 201 const char *snapshot_id, 202 const char *name, 203 Error **errp); 204 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); 205 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs); 206 207 int (*bdrv_save_vmstate)(BlockDriverState *bs, QEMUIOVector *qiov, 208 int64_t pos); 209 int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf, 210 int64_t pos, int size); 211 212 int (*bdrv_change_backing_file)(BlockDriverState *bs, 213 const char *backing_file, const char *backing_fmt); 214 215 /* removable device specific */ 216 bool (*bdrv_is_inserted)(BlockDriverState *bs); 217 int (*bdrv_media_changed)(BlockDriverState *bs); 218 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); 219 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); 220 221 /* to control generic scsi devices */ 222 int (*bdrv_ioctl)(BlockDriverState *bs, unsigned long int req, void *buf); 223 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, 224 unsigned long int req, void *buf, 225 BlockCompletionFunc *cb, void *opaque); 226 227 /* List of options for creating images, terminated by name == NULL */ 228 QemuOptsList *create_opts; 229 230 /* 231 * Returns 0 for completed check, -errno for internal errors. 232 * The check results are stored in result. 233 */ 234 int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result, 235 BdrvCheckMode fix); 236 237 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts, 238 BlockDriverAmendStatusCB *status_cb); 239 240 void (*bdrv_debug_event)(BlockDriverState *bs, BlkDebugEvent event); 241 242 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ 243 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, 244 const char *tag); 245 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, 246 const char *tag); 247 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); 248 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); 249 250 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); 251 252 /* 253 * Returns 1 if newly created images are guaranteed to contain only 254 * zeros, 0 otherwise. 255 */ 256 int (*bdrv_has_zero_init)(BlockDriverState *bs); 257 258 /* Remove fd handlers, timers, and other event loop callbacks so the event 259 * loop is no longer in use. Called with no in-flight requests and in 260 * depth-first traversal order with parents before child nodes. 261 */ 262 void (*bdrv_detach_aio_context)(BlockDriverState *bs); 263 264 /* Add fd handlers, timers, and other event loop callbacks so I/O requests 265 * can be processed again. Called with no in-flight requests and in 266 * depth-first traversal order with child nodes before parent nodes. 267 */ 268 void (*bdrv_attach_aio_context)(BlockDriverState *bs, 269 AioContext *new_context); 270 271 /* io queue for linux-aio */ 272 void (*bdrv_io_plug)(BlockDriverState *bs); 273 void (*bdrv_io_unplug)(BlockDriverState *bs); 274 void (*bdrv_flush_io_queue)(BlockDriverState *bs); 275 276 /** 277 * Try to get @bs's logical and physical block size. 278 * On success, store them in @bsz and return zero. 279 * On failure, return negative errno. 280 */ 281 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz); 282 /** 283 * Try to get @bs's geometry (cyls, heads, sectors) 284 * On success, store them in @geo and return 0. 285 * On failure return -errno. 286 * Only drivers that want to override guest geometry implement this 287 * callback; see hd_geometry_guess(). 288 */ 289 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); 290 291 QLIST_ENTRY(BlockDriver) list; 292 }; 293 294 typedef struct BlockLimits { 295 /* maximum number of sectors that can be discarded at once */ 296 int max_discard; 297 298 /* optimal alignment for discard requests in sectors */ 299 int64_t discard_alignment; 300 301 /* maximum number of sectors that can zeroized at once */ 302 int max_write_zeroes; 303 304 /* optimal alignment for write zeroes requests in sectors */ 305 int64_t write_zeroes_alignment; 306 307 /* optimal transfer length in sectors */ 308 int opt_transfer_length; 309 310 /* maximal transfer length in sectors */ 311 int max_transfer_length; 312 313 /* memory alignment so that no bounce buffer is needed */ 314 size_t min_mem_alignment; 315 316 /* memory alignment for bounce buffer */ 317 size_t opt_mem_alignment; 318 } BlockLimits; 319 320 typedef struct BdrvOpBlocker BdrvOpBlocker; 321 322 typedef struct BdrvAioNotifier { 323 void (*attached_aio_context)(AioContext *new_context, void *opaque); 324 void (*detach_aio_context)(void *opaque); 325 326 void *opaque; 327 328 QLIST_ENTRY(BdrvAioNotifier) list; 329 } BdrvAioNotifier; 330 331 struct BdrvChildRole { 332 int (*inherit_flags)(int parent_flags); 333 }; 334 335 extern const BdrvChildRole child_file; 336 extern const BdrvChildRole child_format; 337 338 struct BdrvChild { 339 BlockDriverState *bs; 340 const BdrvChildRole *role; 341 QLIST_ENTRY(BdrvChild) next; 342 QLIST_ENTRY(BdrvChild) next_parent; 343 }; 344 345 /* 346 * Note: the function bdrv_append() copies and swaps contents of 347 * BlockDriverStates, so if you add new fields to this struct, please 348 * inspect bdrv_append() to determine if the new fields need to be 349 * copied as well. 350 */ 351 struct BlockDriverState { 352 int64_t total_sectors; /* if we are reading a disk image, give its 353 size in sectors */ 354 int read_only; /* if true, the media is read only */ 355 int open_flags; /* flags used to open the file, re-used for re-open */ 356 int encrypted; /* if true, the media is encrypted */ 357 int valid_key; /* if true, a valid encryption key has been set */ 358 int sg; /* if true, the device is a /dev/sg* */ 359 int copy_on_read; /* if true, copy read backing sectors into image 360 note this is a reference count */ 361 bool probed; 362 363 BlockDriver *drv; /* NULL means no media */ 364 void *opaque; 365 366 BlockBackend *blk; /* owning backend, if any */ 367 368 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ 369 /* long-running tasks intended to always use the same AioContext as this 370 * BDS may register themselves in this list to be notified of changes 371 * regarding this BDS's context */ 372 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; 373 374 char filename[PATH_MAX]; 375 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of 376 this file image */ 377 char backing_format[16]; /* if non-zero and backing_file exists */ 378 379 QDict *full_open_options; 380 char exact_filename[PATH_MAX]; 381 382 BdrvChild *backing; 383 BdrvChild *file; 384 385 NotifierList close_notifiers; 386 387 /* Callback before write request is processed */ 388 NotifierWithReturnList before_write_notifiers; 389 390 /* number of in-flight serialising requests */ 391 unsigned int serialising_in_flight; 392 393 /* I/O throttling. 394 * throttle_state tells us if this BDS has I/O limits configured. 395 * io_limits_enabled tells us if they are currently being 396 * enforced, but it can be temporarily set to false */ 397 CoQueue throttled_reqs[2]; 398 bool io_limits_enabled; 399 /* The following fields are protected by the ThrottleGroup lock. 400 * See the ThrottleGroup documentation for details. */ 401 ThrottleState *throttle_state; 402 ThrottleTimers throttle_timers; 403 unsigned pending_reqs[2]; 404 QLIST_ENTRY(BlockDriverState) round_robin; 405 406 /* Offset after the highest byte written to */ 407 uint64_t wr_highest_offset; 408 409 /* I/O Limits */ 410 BlockLimits bl; 411 412 /* Whether produces zeros when read beyond eof */ 413 bool zero_beyond_eof; 414 415 /* Alignment requirement for offset/length of I/O requests */ 416 unsigned int request_alignment; 417 418 /* do we need to tell the quest if we have a volatile write cache? */ 419 int enable_write_cache; 420 421 /* the following member gives a name to every node on the bs graph. */ 422 char node_name[32]; 423 /* element of the list of named nodes building the graph */ 424 QTAILQ_ENTRY(BlockDriverState) node_list; 425 /* element of the list of "drives" the guest sees */ 426 QTAILQ_ENTRY(BlockDriverState) device_list; 427 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; 428 int refcnt; 429 430 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; 431 432 /* operation blockers */ 433 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; 434 435 /* long-running background operation */ 436 BlockJob *job; 437 438 /* The node that this node inherited default options from (and a reopen on 439 * which can affect this node by changing these defaults). This is always a 440 * parent node of this node. */ 441 BlockDriverState *inherits_from; 442 QLIST_HEAD(, BdrvChild) children; 443 QLIST_HEAD(, BdrvChild) parents; 444 445 QDict *options; 446 BlockdevDetectZeroesOptions detect_zeroes; 447 448 /* The error object in use for blocking operations on backing_hd */ 449 Error *backing_blocker; 450 451 /* threshold limit for writes, in bytes. "High water mark". */ 452 uint64_t write_threshold_offset; 453 NotifierWithReturn write_threshold_notifier; 454 455 int quiesce_counter; 456 }; 457 458 struct BlockBackendRootState { 459 int open_flags; 460 bool read_only; 461 BlockdevDetectZeroesOptions detect_zeroes; 462 463 char *throttle_group; 464 ThrottleState *throttle_state; 465 }; 466 467 static inline BlockDriverState *backing_bs(BlockDriverState *bs) 468 { 469 return bs->backing ? bs->backing->bs : NULL; 470 } 471 472 473 /* Essential block drivers which must always be statically linked into qemu, and 474 * which therefore can be accessed without using bdrv_find_format() */ 475 extern BlockDriver bdrv_file; 476 extern BlockDriver bdrv_raw; 477 extern BlockDriver bdrv_qcow2; 478 479 extern QTAILQ_HEAD(BdrvStates, BlockDriverState) bdrv_states; 480 481 /** 482 * bdrv_setup_io_funcs: 483 * 484 * Prepare a #BlockDriver for I/O request processing by populating 485 * unimplemented coroutine and AIO interfaces with generic wrapper functions 486 * that fall back to implemented interfaces. 487 */ 488 void bdrv_setup_io_funcs(BlockDriver *bdrv); 489 490 int get_tmp_filename(char *filename, int size); 491 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, 492 const char *filename); 493 494 void bdrv_set_io_limits(BlockDriverState *bs, 495 ThrottleConfig *cfg); 496 497 498 /** 499 * bdrv_add_before_write_notifier: 500 * 501 * Register a callback that is invoked before write requests are processed but 502 * after any throttling or waiting for overlapping requests. 503 */ 504 void bdrv_add_before_write_notifier(BlockDriverState *bs, 505 NotifierWithReturn *notifier); 506 507 /** 508 * bdrv_detach_aio_context: 509 * 510 * May be called from .bdrv_detach_aio_context() to detach children from the 511 * current #AioContext. This is only needed by block drivers that manage their 512 * own children. Both ->file and ->backing are automatically handled and 513 * block drivers should not call this function on them explicitly. 514 */ 515 void bdrv_detach_aio_context(BlockDriverState *bs); 516 517 /** 518 * bdrv_attach_aio_context: 519 * 520 * May be called from .bdrv_attach_aio_context() to attach children to the new 521 * #AioContext. This is only needed by block drivers that manage their own 522 * children. Both ->file and ->backing are automatically handled and block 523 * drivers should not call this function on them explicitly. 524 */ 525 void bdrv_attach_aio_context(BlockDriverState *bs, 526 AioContext *new_context); 527 528 /** 529 * bdrv_add_aio_context_notifier: 530 * 531 * If a long-running job intends to be always run in the same AioContext as a 532 * certain BDS, it may use this function to be notified of changes regarding the 533 * association of the BDS to an AioContext. 534 * 535 * attached_aio_context() is called after the target BDS has been attached to a 536 * new AioContext; detach_aio_context() is called before the target BDS is being 537 * detached from its old AioContext. 538 */ 539 void bdrv_add_aio_context_notifier(BlockDriverState *bs, 540 void (*attached_aio_context)(AioContext *new_context, void *opaque), 541 void (*detach_aio_context)(void *opaque), void *opaque); 542 543 /** 544 * bdrv_remove_aio_context_notifier: 545 * 546 * Unsubscribe of change notifications regarding the BDS's AioContext. The 547 * parameters given here have to be the same as those given to 548 * bdrv_add_aio_context_notifier(). 549 */ 550 void bdrv_remove_aio_context_notifier(BlockDriverState *bs, 551 void (*aio_context_attached)(AioContext *, 552 void *), 553 void (*aio_context_detached)(void *), 554 void *opaque); 555 556 #ifdef _WIN32 557 int is_windows_drive(const char *filename); 558 #endif 559 560 /** 561 * stream_start: 562 * @bs: Block device to operate on. 563 * @base: Block device that will become the new base, or %NULL to 564 * flatten the whole backing file chain onto @bs. 565 * @base_id: The file name that will be written to @bs as the new 566 * backing file if the job completes. Ignored if @base is %NULL. 567 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 568 * @on_error: The action to take upon error. 569 * @cb: Completion function for the job. 570 * @opaque: Opaque pointer value passed to @cb. 571 * @errp: Error object. 572 * 573 * Start a streaming operation on @bs. Clusters that are unallocated 574 * in @bs, but allocated in any image between @base and @bs (both 575 * exclusive) will be written to @bs. At the end of a successful 576 * streaming job, the backing file of @bs will be changed to 577 * @base_id in the written image and to @base in the live BlockDriverState. 578 */ 579 void stream_start(BlockDriverState *bs, BlockDriverState *base, 580 const char *base_id, int64_t speed, BlockdevOnError on_error, 581 BlockCompletionFunc *cb, 582 void *opaque, Error **errp); 583 584 /** 585 * commit_start: 586 * @bs: Active block device. 587 * @top: Top block device to be committed. 588 * @base: Block device that will be written into, and become the new top. 589 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 590 * @on_error: The action to take upon error. 591 * @cb: Completion function for the job. 592 * @opaque: Opaque pointer value passed to @cb. 593 * @backing_file_str: String to use as the backing file in @top's overlay 594 * @errp: Error object. 595 * 596 */ 597 void commit_start(BlockDriverState *bs, BlockDriverState *base, 598 BlockDriverState *top, int64_t speed, 599 BlockdevOnError on_error, BlockCompletionFunc *cb, 600 void *opaque, const char *backing_file_str, Error **errp); 601 /** 602 * commit_active_start: 603 * @bs: Active block device to be committed. 604 * @base: Block device that will be written into, and become the new top. 605 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 606 * @on_error: The action to take upon error. 607 * @cb: Completion function for the job. 608 * @opaque: Opaque pointer value passed to @cb. 609 * @errp: Error object. 610 * 611 */ 612 void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 613 int64_t speed, 614 BlockdevOnError on_error, 615 BlockCompletionFunc *cb, 616 void *opaque, Error **errp); 617 /* 618 * mirror_start: 619 * @bs: Block device to operate on. 620 * @target: Block device to write to. 621 * @replaces: Block graph node name to replace once the mirror is done. Can 622 * only be used when full mirroring is selected. 623 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 624 * @granularity: The chosen granularity for the dirty bitmap. 625 * @buf_size: The amount of data that can be in flight at one time. 626 * @mode: Whether to collapse all images in the chain to the target. 627 * @on_source_error: The action to take upon error reading from the source. 628 * @on_target_error: The action to take upon error writing to the target. 629 * @unmap: Whether to unmap target where source sectors only contain zeroes. 630 * @cb: Completion function for the job. 631 * @opaque: Opaque pointer value passed to @cb. 632 * @errp: Error object. 633 * 634 * Start a mirroring operation on @bs. Clusters that are allocated 635 * in @bs will be written to @bs until the job is cancelled or 636 * manually completed. At the end of a successful mirroring job, 637 * @bs will be switched to read from @target. 638 */ 639 void mirror_start(BlockDriverState *bs, BlockDriverState *target, 640 const char *replaces, 641 int64_t speed, uint32_t granularity, int64_t buf_size, 642 MirrorSyncMode mode, BlockdevOnError on_source_error, 643 BlockdevOnError on_target_error, 644 bool unmap, 645 BlockCompletionFunc *cb, 646 void *opaque, Error **errp); 647 648 /* 649 * backup_start: 650 * @bs: Block device to operate on. 651 * @target: Block device to write to. 652 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 653 * @sync_mode: What parts of the disk image should be copied to the destination. 654 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL. 655 * @on_source_error: The action to take upon error reading from the source. 656 * @on_target_error: The action to take upon error writing to the target. 657 * @cb: Completion function for the job. 658 * @opaque: Opaque pointer value passed to @cb. 659 * 660 * Start a backup operation on @bs. Clusters in @bs are written to @target 661 * until the job is cancelled or manually completed. 662 */ 663 void backup_start(BlockDriverState *bs, BlockDriverState *target, 664 int64_t speed, MirrorSyncMode sync_mode, 665 BdrvDirtyBitmap *sync_bitmap, 666 BlockdevOnError on_source_error, 667 BlockdevOnError on_target_error, 668 BlockCompletionFunc *cb, void *opaque, 669 Error **errp); 670 671 void blk_set_bs(BlockBackend *blk, BlockDriverState *bs); 672 673 void blk_dev_change_media_cb(BlockBackend *blk, bool load); 674 bool blk_dev_has_removable_media(BlockBackend *blk); 675 void blk_dev_eject_request(BlockBackend *blk, bool force); 676 bool blk_dev_is_tray_open(BlockBackend *blk); 677 bool blk_dev_is_medium_locked(BlockBackend *blk); 678 void blk_dev_resize_cb(BlockBackend *blk); 679 680 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); 681 bool bdrv_requests_pending(BlockDriverState *bs); 682 683 #endif /* BLOCK_INT_H */ 684