1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #ifndef BLOCK_INT_H 25 #define BLOCK_INT_H 26 27 #include "block/accounting.h" 28 #include "block/block.h" 29 #include "block/throttle-groups.h" 30 #include "qemu/option.h" 31 #include "qemu/queue.h" 32 #include "qemu/coroutine.h" 33 #include "qemu/timer.h" 34 #include "qapi-types.h" 35 #include "qemu/hbitmap.h" 36 #include "block/snapshot.h" 37 #include "qemu/main-loop.h" 38 #include "qemu/throttle.h" 39 40 #define BLOCK_FLAG_ENCRYPT 1 41 #define BLOCK_FLAG_COMPAT6 4 42 #define BLOCK_FLAG_LAZY_REFCOUNTS 8 43 44 #define BLOCK_OPT_SIZE "size" 45 #define BLOCK_OPT_ENCRYPT "encryption" 46 #define BLOCK_OPT_COMPAT6 "compat6" 47 #define BLOCK_OPT_BACKING_FILE "backing_file" 48 #define BLOCK_OPT_BACKING_FMT "backing_fmt" 49 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size" 50 #define BLOCK_OPT_TABLE_SIZE "table_size" 51 #define BLOCK_OPT_PREALLOC "preallocation" 52 #define BLOCK_OPT_SUBFMT "subformat" 53 #define BLOCK_OPT_COMPAT_LEVEL "compat" 54 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" 55 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type" 56 #define BLOCK_OPT_REDUNDANCY "redundancy" 57 #define BLOCK_OPT_NOCOW "nocow" 58 #define BLOCK_OPT_OBJECT_SIZE "object_size" 59 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits" 60 61 #define BLOCK_PROBE_BUF_SIZE 512 62 63 enum BdrvTrackedRequestType { 64 BDRV_TRACKED_READ, 65 BDRV_TRACKED_WRITE, 66 BDRV_TRACKED_FLUSH, 67 BDRV_TRACKED_IOCTL, 68 BDRV_TRACKED_DISCARD, 69 }; 70 71 typedef struct BdrvTrackedRequest { 72 BlockDriverState *bs; 73 int64_t offset; 74 unsigned int bytes; 75 enum BdrvTrackedRequestType type; 76 77 bool serialising; 78 int64_t overlap_offset; 79 unsigned int overlap_bytes; 80 81 QLIST_ENTRY(BdrvTrackedRequest) list; 82 Coroutine *co; /* owner, used for deadlock detection */ 83 CoQueue wait_queue; /* coroutines blocked on this request */ 84 85 struct BdrvTrackedRequest *waiting_for; 86 } BdrvTrackedRequest; 87 88 struct BlockDriver { 89 const char *format_name; 90 int instance_size; 91 92 /* set to true if the BlockDriver is a block filter */ 93 bool is_filter; 94 /* for snapshots block filter like Quorum can implement the 95 * following recursive callback. 96 * It's purpose is to recurse on the filter children while calling 97 * bdrv_recurse_is_first_non_filter on them. 98 * For a sample implementation look in the future Quorum block filter. 99 */ 100 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs, 101 BlockDriverState *candidate); 102 103 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); 104 int (*bdrv_probe_device)(const char *filename); 105 106 /* Any driver implementing this callback is expected to be able to handle 107 * NULL file names in its .bdrv_open() implementation */ 108 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); 109 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have 110 * this field set to true, except ones that are defined only by their 111 * child's bs. 112 * An example of the last type will be the quorum block driver. 113 */ 114 bool bdrv_needs_filename; 115 116 /* Set if a driver can support backing files */ 117 bool supports_backing; 118 119 /* For handling image reopen for split or non-split files */ 120 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, 121 BlockReopenQueue *queue, Error **errp); 122 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); 123 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); 124 void (*bdrv_join_options)(QDict *options, QDict *old_options); 125 126 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, 127 Error **errp); 128 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, 129 Error **errp); 130 int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num, 131 uint8_t *buf, int nb_sectors); 132 int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num, 133 const uint8_t *buf, int nb_sectors); 134 void (*bdrv_close)(BlockDriverState *bs); 135 int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp); 136 int (*bdrv_set_key)(BlockDriverState *bs, const char *key); 137 int (*bdrv_make_empty)(BlockDriverState *bs); 138 139 void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options); 140 141 /* aio */ 142 BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs, 143 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 144 BlockCompletionFunc *cb, void *opaque); 145 BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs, 146 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 147 BlockCompletionFunc *cb, void *opaque); 148 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, 149 BlockCompletionFunc *cb, void *opaque); 150 BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs, 151 int64_t sector_num, int nb_sectors, 152 BlockCompletionFunc *cb, void *opaque); 153 154 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, 155 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 156 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, 157 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 158 /* 159 * Efficiently zero a region of the disk image. Typically an image format 160 * would use a compact metadata representation to implement this. This 161 * function pointer may be NULL and .bdrv_co_writev() will be called 162 * instead. 163 */ 164 int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs, 165 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 166 int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs, 167 int64_t sector_num, int nb_sectors); 168 int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs, 169 int64_t sector_num, int nb_sectors, int *pnum, 170 BlockDriverState **file); 171 172 /* 173 * Invalidate any cached meta-data. 174 */ 175 void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp); 176 int (*bdrv_inactivate)(BlockDriverState *bs); 177 178 /* 179 * Flushes all data for all layers by calling bdrv_co_flush for underlying 180 * layers, if needed. This function is needed for deterministic 181 * synchronization of the flush finishing callback. 182 */ 183 int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs); 184 185 /* 186 * Flushes all data that was already written to the OS all the way down to 187 * the disk (for example raw-posix calls fsync()). 188 */ 189 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); 190 191 /* 192 * Flushes all internal caches to the OS. The data may still sit in a 193 * writeback cache of the host OS, but it will survive a crash of the qemu 194 * process. 195 */ 196 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); 197 198 const char *protocol_name; 199 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset); 200 201 int64_t (*bdrv_getlength)(BlockDriverState *bs); 202 bool has_variable_length; 203 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); 204 205 int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num, 206 const uint8_t *buf, int nb_sectors); 207 208 int (*bdrv_snapshot_create)(BlockDriverState *bs, 209 QEMUSnapshotInfo *sn_info); 210 int (*bdrv_snapshot_goto)(BlockDriverState *bs, 211 const char *snapshot_id); 212 int (*bdrv_snapshot_delete)(BlockDriverState *bs, 213 const char *snapshot_id, 214 const char *name, 215 Error **errp); 216 int (*bdrv_snapshot_list)(BlockDriverState *bs, 217 QEMUSnapshotInfo **psn_info); 218 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, 219 const char *snapshot_id, 220 const char *name, 221 Error **errp); 222 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); 223 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs); 224 225 int (*bdrv_save_vmstate)(BlockDriverState *bs, QEMUIOVector *qiov, 226 int64_t pos); 227 int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf, 228 int64_t pos, int size); 229 230 int (*bdrv_change_backing_file)(BlockDriverState *bs, 231 const char *backing_file, const char *backing_fmt); 232 233 /* removable device specific */ 234 bool (*bdrv_is_inserted)(BlockDriverState *bs); 235 int (*bdrv_media_changed)(BlockDriverState *bs); 236 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); 237 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); 238 239 /* to control generic scsi devices */ 240 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, 241 unsigned long int req, void *buf, 242 BlockCompletionFunc *cb, void *opaque); 243 244 /* List of options for creating images, terminated by name == NULL */ 245 QemuOptsList *create_opts; 246 247 /* 248 * Returns 0 for completed check, -errno for internal errors. 249 * The check results are stored in result. 250 */ 251 int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result, 252 BdrvCheckMode fix); 253 254 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts, 255 BlockDriverAmendStatusCB *status_cb, 256 void *cb_opaque); 257 258 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event); 259 260 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ 261 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, 262 const char *tag); 263 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, 264 const char *tag); 265 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); 266 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); 267 268 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); 269 270 /* 271 * Returns 1 if newly created images are guaranteed to contain only 272 * zeros, 0 otherwise. 273 */ 274 int (*bdrv_has_zero_init)(BlockDriverState *bs); 275 276 /* Remove fd handlers, timers, and other event loop callbacks so the event 277 * loop is no longer in use. Called with no in-flight requests and in 278 * depth-first traversal order with parents before child nodes. 279 */ 280 void (*bdrv_detach_aio_context)(BlockDriverState *bs); 281 282 /* Add fd handlers, timers, and other event loop callbacks so I/O requests 283 * can be processed again. Called with no in-flight requests and in 284 * depth-first traversal order with child nodes before parent nodes. 285 */ 286 void (*bdrv_attach_aio_context)(BlockDriverState *bs, 287 AioContext *new_context); 288 289 /* io queue for linux-aio */ 290 void (*bdrv_io_plug)(BlockDriverState *bs); 291 void (*bdrv_io_unplug)(BlockDriverState *bs); 292 void (*bdrv_flush_io_queue)(BlockDriverState *bs); 293 294 /** 295 * Try to get @bs's logical and physical block size. 296 * On success, store them in @bsz and return zero. 297 * On failure, return negative errno. 298 */ 299 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz); 300 /** 301 * Try to get @bs's geometry (cyls, heads, sectors) 302 * On success, store them in @geo and return 0. 303 * On failure return -errno. 304 * Only drivers that want to override guest geometry implement this 305 * callback; see hd_geometry_guess(). 306 */ 307 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); 308 309 /** 310 * Drain and stop any internal sources of requests in the driver, and 311 * remain so until next I/O callback (e.g. bdrv_co_writev) is called. 312 */ 313 void (*bdrv_drain)(BlockDriverState *bs); 314 315 QLIST_ENTRY(BlockDriver) list; 316 }; 317 318 typedef struct BlockLimits { 319 /* maximum number of sectors that can be discarded at once */ 320 int max_discard; 321 322 /* optimal alignment for discard requests in sectors */ 323 int64_t discard_alignment; 324 325 /* maximum number of sectors that can zeroized at once */ 326 int max_write_zeroes; 327 328 /* optimal alignment for write zeroes requests in sectors */ 329 int64_t write_zeroes_alignment; 330 331 /* optimal transfer length in sectors */ 332 int opt_transfer_length; 333 334 /* maximal transfer length in sectors */ 335 int max_transfer_length; 336 337 /* memory alignment so that no bounce buffer is needed */ 338 size_t min_mem_alignment; 339 340 /* memory alignment for bounce buffer */ 341 size_t opt_mem_alignment; 342 343 /* maximum number of iovec elements */ 344 int max_iov; 345 } BlockLimits; 346 347 typedef struct BdrvOpBlocker BdrvOpBlocker; 348 349 typedef struct BdrvAioNotifier { 350 void (*attached_aio_context)(AioContext *new_context, void *opaque); 351 void (*detach_aio_context)(void *opaque); 352 353 void *opaque; 354 355 QLIST_ENTRY(BdrvAioNotifier) list; 356 } BdrvAioNotifier; 357 358 struct BdrvChildRole { 359 void (*inherit_options)(int *child_flags, QDict *child_options, 360 int parent_flags, QDict *parent_options); 361 }; 362 363 extern const BdrvChildRole child_file; 364 extern const BdrvChildRole child_format; 365 366 struct BdrvChild { 367 BlockDriverState *bs; 368 char *name; 369 const BdrvChildRole *role; 370 QLIST_ENTRY(BdrvChild) next; 371 QLIST_ENTRY(BdrvChild) next_parent; 372 }; 373 374 /* 375 * Note: the function bdrv_append() copies and swaps contents of 376 * BlockDriverStates, so if you add new fields to this struct, please 377 * inspect bdrv_append() to determine if the new fields need to be 378 * copied as well. 379 */ 380 struct BlockDriverState { 381 int64_t total_sectors; /* if we are reading a disk image, give its 382 size in sectors */ 383 int read_only; /* if true, the media is read only */ 384 int open_flags; /* flags used to open the file, re-used for re-open */ 385 int encrypted; /* if true, the media is encrypted */ 386 int valid_key; /* if true, a valid encryption key has been set */ 387 int sg; /* if true, the device is a /dev/sg* */ 388 int copy_on_read; /* if true, copy read backing sectors into image 389 note this is a reference count */ 390 bool probed; 391 392 BlockDriver *drv; /* NULL means no media */ 393 void *opaque; 394 395 BlockBackend *blk; /* owning backend, if any */ 396 397 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ 398 /* long-running tasks intended to always use the same AioContext as this 399 * BDS may register themselves in this list to be notified of changes 400 * regarding this BDS's context */ 401 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; 402 403 char filename[PATH_MAX]; 404 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of 405 this file image */ 406 char backing_format[16]; /* if non-zero and backing_file exists */ 407 408 QDict *full_open_options; 409 char exact_filename[PATH_MAX]; 410 411 BdrvChild *backing; 412 BdrvChild *file; 413 414 /* Callback before write request is processed */ 415 NotifierWithReturnList before_write_notifiers; 416 417 /* number of in-flight serialising requests */ 418 unsigned int serialising_in_flight; 419 420 /* I/O throttling. 421 * throttle_state tells us if this BDS has I/O limits configured. 422 * io_limits_enabled tells us if they are currently being 423 * enforced, but it can be temporarily set to false */ 424 CoQueue throttled_reqs[2]; 425 bool io_limits_enabled; 426 /* The following fields are protected by the ThrottleGroup lock. 427 * See the ThrottleGroup documentation for details. */ 428 ThrottleState *throttle_state; 429 ThrottleTimers throttle_timers; 430 unsigned pending_reqs[2]; 431 QLIST_ENTRY(BlockDriverState) round_robin; 432 433 /* Offset after the highest byte written to */ 434 uint64_t wr_highest_offset; 435 436 /* I/O Limits */ 437 BlockLimits bl; 438 439 /* Whether produces zeros when read beyond eof */ 440 bool zero_beyond_eof; 441 442 /* Alignment requirement for offset/length of I/O requests */ 443 unsigned int request_alignment; 444 445 /* do we need to tell the quest if we have a volatile write cache? */ 446 int enable_write_cache; 447 448 /* the following member gives a name to every node on the bs graph. */ 449 char node_name[32]; 450 /* element of the list of named nodes building the graph */ 451 QTAILQ_ENTRY(BlockDriverState) node_list; 452 /* element of the list of all BlockDriverStates (all_bdrv_states) */ 453 QTAILQ_ENTRY(BlockDriverState) bs_list; 454 /* element of the list of monitor-owned BDS */ 455 QTAILQ_ENTRY(BlockDriverState) monitor_list; 456 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; 457 int refcnt; 458 459 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; 460 461 /* operation blockers */ 462 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; 463 464 /* long-running background operation */ 465 BlockJob *job; 466 467 /* The node that this node inherited default options from (and a reopen on 468 * which can affect this node by changing these defaults). This is always a 469 * parent node of this node. */ 470 BlockDriverState *inherits_from; 471 QLIST_HEAD(, BdrvChild) children; 472 QLIST_HEAD(, BdrvChild) parents; 473 474 QDict *options; 475 QDict *explicit_options; 476 BlockdevDetectZeroesOptions detect_zeroes; 477 478 /* The error object in use for blocking operations on backing_hd */ 479 Error *backing_blocker; 480 481 /* threshold limit for writes, in bytes. "High water mark". */ 482 uint64_t write_threshold_offset; 483 NotifierWithReturn write_threshold_notifier; 484 485 int quiesce_counter; 486 }; 487 488 struct BlockBackendRootState { 489 int open_flags; 490 bool read_only; 491 BlockdevDetectZeroesOptions detect_zeroes; 492 493 char *throttle_group; 494 ThrottleState *throttle_state; 495 }; 496 497 static inline BlockDriverState *backing_bs(BlockDriverState *bs) 498 { 499 return bs->backing ? bs->backing->bs : NULL; 500 } 501 502 503 /* Essential block drivers which must always be statically linked into qemu, and 504 * which therefore can be accessed without using bdrv_find_format() */ 505 extern BlockDriver bdrv_file; 506 extern BlockDriver bdrv_raw; 507 extern BlockDriver bdrv_qcow2; 508 509 /** 510 * bdrv_setup_io_funcs: 511 * 512 * Prepare a #BlockDriver for I/O request processing by populating 513 * unimplemented coroutine and AIO interfaces with generic wrapper functions 514 * that fall back to implemented interfaces. 515 */ 516 void bdrv_setup_io_funcs(BlockDriver *bdrv); 517 518 int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 519 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 520 BdrvRequestFlags flags); 521 int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 522 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 523 BdrvRequestFlags flags); 524 525 int get_tmp_filename(char *filename, int size); 526 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, 527 const char *filename); 528 529 void bdrv_set_io_limits(BlockDriverState *bs, 530 ThrottleConfig *cfg); 531 532 533 /** 534 * bdrv_add_before_write_notifier: 535 * 536 * Register a callback that is invoked before write requests are processed but 537 * after any throttling or waiting for overlapping requests. 538 */ 539 void bdrv_add_before_write_notifier(BlockDriverState *bs, 540 NotifierWithReturn *notifier); 541 542 /** 543 * bdrv_detach_aio_context: 544 * 545 * May be called from .bdrv_detach_aio_context() to detach children from the 546 * current #AioContext. This is only needed by block drivers that manage their 547 * own children. Both ->file and ->backing are automatically handled and 548 * block drivers should not call this function on them explicitly. 549 */ 550 void bdrv_detach_aio_context(BlockDriverState *bs); 551 552 /** 553 * bdrv_attach_aio_context: 554 * 555 * May be called from .bdrv_attach_aio_context() to attach children to the new 556 * #AioContext. This is only needed by block drivers that manage their own 557 * children. Both ->file and ->backing are automatically handled and block 558 * drivers should not call this function on them explicitly. 559 */ 560 void bdrv_attach_aio_context(BlockDriverState *bs, 561 AioContext *new_context); 562 563 /** 564 * bdrv_add_aio_context_notifier: 565 * 566 * If a long-running job intends to be always run in the same AioContext as a 567 * certain BDS, it may use this function to be notified of changes regarding the 568 * association of the BDS to an AioContext. 569 * 570 * attached_aio_context() is called after the target BDS has been attached to a 571 * new AioContext; detach_aio_context() is called before the target BDS is being 572 * detached from its old AioContext. 573 */ 574 void bdrv_add_aio_context_notifier(BlockDriverState *bs, 575 void (*attached_aio_context)(AioContext *new_context, void *opaque), 576 void (*detach_aio_context)(void *opaque), void *opaque); 577 578 /** 579 * bdrv_remove_aio_context_notifier: 580 * 581 * Unsubscribe of change notifications regarding the BDS's AioContext. The 582 * parameters given here have to be the same as those given to 583 * bdrv_add_aio_context_notifier(). 584 */ 585 void bdrv_remove_aio_context_notifier(BlockDriverState *bs, 586 void (*aio_context_attached)(AioContext *, 587 void *), 588 void (*aio_context_detached)(void *), 589 void *opaque); 590 591 #ifdef _WIN32 592 int is_windows_drive(const char *filename); 593 #endif 594 595 /** 596 * stream_start: 597 * @bs: Block device to operate on. 598 * @base: Block device that will become the new base, or %NULL to 599 * flatten the whole backing file chain onto @bs. 600 * @base_id: The file name that will be written to @bs as the new 601 * backing file if the job completes. Ignored if @base is %NULL. 602 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 603 * @on_error: The action to take upon error. 604 * @cb: Completion function for the job. 605 * @opaque: Opaque pointer value passed to @cb. 606 * @errp: Error object. 607 * 608 * Start a streaming operation on @bs. Clusters that are unallocated 609 * in @bs, but allocated in any image between @base and @bs (both 610 * exclusive) will be written to @bs. At the end of a successful 611 * streaming job, the backing file of @bs will be changed to 612 * @base_id in the written image and to @base in the live BlockDriverState. 613 */ 614 void stream_start(BlockDriverState *bs, BlockDriverState *base, 615 const char *base_id, int64_t speed, BlockdevOnError on_error, 616 BlockCompletionFunc *cb, 617 void *opaque, Error **errp); 618 619 /** 620 * commit_start: 621 * @bs: Active block device. 622 * @top: Top block device to be committed. 623 * @base: Block device that will be written into, and become the new top. 624 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 625 * @on_error: The action to take upon error. 626 * @cb: Completion function for the job. 627 * @opaque: Opaque pointer value passed to @cb. 628 * @backing_file_str: String to use as the backing file in @top's overlay 629 * @errp: Error object. 630 * 631 */ 632 void commit_start(BlockDriverState *bs, BlockDriverState *base, 633 BlockDriverState *top, int64_t speed, 634 BlockdevOnError on_error, BlockCompletionFunc *cb, 635 void *opaque, const char *backing_file_str, Error **errp); 636 /** 637 * commit_active_start: 638 * @bs: Active block device to be committed. 639 * @base: Block device that will be written into, and become the new top. 640 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 641 * @on_error: The action to take upon error. 642 * @cb: Completion function for the job. 643 * @opaque: Opaque pointer value passed to @cb. 644 * @errp: Error object. 645 * 646 */ 647 void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 648 int64_t speed, 649 BlockdevOnError on_error, 650 BlockCompletionFunc *cb, 651 void *opaque, Error **errp); 652 /* 653 * mirror_start: 654 * @bs: Block device to operate on. 655 * @target: Block device to write to. 656 * @replaces: Block graph node name to replace once the mirror is done. Can 657 * only be used when full mirroring is selected. 658 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 659 * @granularity: The chosen granularity for the dirty bitmap. 660 * @buf_size: The amount of data that can be in flight at one time. 661 * @mode: Whether to collapse all images in the chain to the target. 662 * @on_source_error: The action to take upon error reading from the source. 663 * @on_target_error: The action to take upon error writing to the target. 664 * @unmap: Whether to unmap target where source sectors only contain zeroes. 665 * @cb: Completion function for the job. 666 * @opaque: Opaque pointer value passed to @cb. 667 * @errp: Error object. 668 * 669 * Start a mirroring operation on @bs. Clusters that are allocated 670 * in @bs will be written to @bs until the job is cancelled or 671 * manually completed. At the end of a successful mirroring job, 672 * @bs will be switched to read from @target. 673 */ 674 void mirror_start(BlockDriverState *bs, BlockDriverState *target, 675 const char *replaces, 676 int64_t speed, uint32_t granularity, int64_t buf_size, 677 MirrorSyncMode mode, BlockdevOnError on_source_error, 678 BlockdevOnError on_target_error, 679 bool unmap, 680 BlockCompletionFunc *cb, 681 void *opaque, Error **errp); 682 683 /* 684 * backup_start: 685 * @bs: Block device to operate on. 686 * @target: Block device to write to. 687 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 688 * @sync_mode: What parts of the disk image should be copied to the destination. 689 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL. 690 * @on_source_error: The action to take upon error reading from the source. 691 * @on_target_error: The action to take upon error writing to the target. 692 * @cb: Completion function for the job. 693 * @opaque: Opaque pointer value passed to @cb. 694 * @txn: Transaction that this job is part of (may be NULL). 695 * 696 * Start a backup operation on @bs. Clusters in @bs are written to @target 697 * until the job is cancelled or manually completed. 698 */ 699 void backup_start(BlockDriverState *bs, BlockDriverState *target, 700 int64_t speed, MirrorSyncMode sync_mode, 701 BdrvDirtyBitmap *sync_bitmap, 702 BlockdevOnError on_source_error, 703 BlockdevOnError on_target_error, 704 BlockCompletionFunc *cb, void *opaque, 705 BlockJobTxn *txn, Error **errp); 706 707 void hmp_drive_add_node(Monitor *mon, const char *optstr); 708 709 BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, 710 const char *child_name, 711 const BdrvChildRole *child_role); 712 void bdrv_root_unref_child(BdrvChild *child); 713 714 void blk_dev_change_media_cb(BlockBackend *blk, bool load); 715 bool blk_dev_has_removable_media(BlockBackend *blk); 716 bool blk_dev_has_tray(BlockBackend *blk); 717 void blk_dev_eject_request(BlockBackend *blk, bool force); 718 bool blk_dev_is_tray_open(BlockBackend *blk); 719 bool blk_dev_is_medium_locked(BlockBackend *blk); 720 void blk_dev_resize_cb(BlockBackend *blk); 721 722 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); 723 bool bdrv_requests_pending(BlockDriverState *bs); 724 725 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); 726 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in); 727 728 void blockdev_close_all_bdrv_states(void); 729 730 #endif /* BLOCK_INT_H */ 731