1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #ifndef BLOCK_INT_H 25 #define BLOCK_INT_H 26 27 #include "block/accounting.h" 28 #include "block/block.h" 29 #include "qemu/option.h" 30 #include "qemu/queue.h" 31 #include "qemu/coroutine.h" 32 #include "qemu/timer.h" 33 #include "qapi-types.h" 34 #include "qemu/hbitmap.h" 35 #include "block/snapshot.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/throttle.h" 38 39 #define BLOCK_FLAG_ENCRYPT 1 40 #define BLOCK_FLAG_LAZY_REFCOUNTS 8 41 42 #define BLOCK_OPT_SIZE "size" 43 #define BLOCK_OPT_ENCRYPT "encryption" 44 #define BLOCK_OPT_COMPAT6 "compat6" 45 #define BLOCK_OPT_HWVERSION "hwversion" 46 #define BLOCK_OPT_BACKING_FILE "backing_file" 47 #define BLOCK_OPT_BACKING_FMT "backing_fmt" 48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size" 49 #define BLOCK_OPT_TABLE_SIZE "table_size" 50 #define BLOCK_OPT_PREALLOC "preallocation" 51 #define BLOCK_OPT_SUBFMT "subformat" 52 #define BLOCK_OPT_COMPAT_LEVEL "compat" 53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" 54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type" 55 #define BLOCK_OPT_REDUNDANCY "redundancy" 56 #define BLOCK_OPT_NOCOW "nocow" 57 #define BLOCK_OPT_OBJECT_SIZE "object_size" 58 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits" 59 60 #define BLOCK_PROBE_BUF_SIZE 512 61 62 enum BdrvTrackedRequestType { 63 BDRV_TRACKED_READ, 64 BDRV_TRACKED_WRITE, 65 BDRV_TRACKED_FLUSH, 66 BDRV_TRACKED_IOCTL, 67 BDRV_TRACKED_DISCARD, 68 }; 69 70 typedef struct BdrvTrackedRequest { 71 BlockDriverState *bs; 72 int64_t offset; 73 unsigned int bytes; 74 enum BdrvTrackedRequestType type; 75 76 bool serialising; 77 int64_t overlap_offset; 78 unsigned int overlap_bytes; 79 80 QLIST_ENTRY(BdrvTrackedRequest) list; 81 Coroutine *co; /* owner, used for deadlock detection */ 82 CoQueue wait_queue; /* coroutines blocked on this request */ 83 84 struct BdrvTrackedRequest *waiting_for; 85 } BdrvTrackedRequest; 86 87 struct BlockDriver { 88 const char *format_name; 89 int instance_size; 90 91 /* set to true if the BlockDriver is a block filter */ 92 bool is_filter; 93 /* for snapshots block filter like Quorum can implement the 94 * following recursive callback. 95 * It's purpose is to recurse on the filter children while calling 96 * bdrv_recurse_is_first_non_filter on them. 97 * For a sample implementation look in the future Quorum block filter. 98 */ 99 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs, 100 BlockDriverState *candidate); 101 102 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); 103 int (*bdrv_probe_device)(const char *filename); 104 105 /* Any driver implementing this callback is expected to be able to handle 106 * NULL file names in its .bdrv_open() implementation */ 107 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); 108 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have 109 * this field set to true, except ones that are defined only by their 110 * child's bs. 111 * An example of the last type will be the quorum block driver. 112 */ 113 bool bdrv_needs_filename; 114 115 /* Set if a driver can support backing files */ 116 bool supports_backing; 117 118 /* For handling image reopen for split or non-split files */ 119 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, 120 BlockReopenQueue *queue, Error **errp); 121 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); 122 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); 123 void (*bdrv_join_options)(QDict *options, QDict *old_options); 124 125 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, 126 Error **errp); 127 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, 128 Error **errp); 129 void (*bdrv_close)(BlockDriverState *bs); 130 int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp); 131 int (*bdrv_set_key)(BlockDriverState *bs, const char *key); 132 int (*bdrv_make_empty)(BlockDriverState *bs); 133 134 void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options); 135 136 /* aio */ 137 BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs, 138 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 139 BlockCompletionFunc *cb, void *opaque); 140 BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs, 141 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 142 BlockCompletionFunc *cb, void *opaque); 143 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, 144 BlockCompletionFunc *cb, void *opaque); 145 BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs, 146 int64_t sector_num, int nb_sectors, 147 BlockCompletionFunc *cb, void *opaque); 148 149 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, 150 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 151 int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, 152 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); 153 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, 154 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 155 int coroutine_fn (*bdrv_co_writev_flags)(BlockDriverState *bs, 156 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); 157 int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, 158 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); 159 160 /* 161 * Efficiently zero a region of the disk image. Typically an image format 162 * would use a compact metadata representation to implement this. This 163 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev() 164 * will be called instead. 165 */ 166 int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs, 167 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 168 int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs, 169 int64_t sector_num, int nb_sectors); 170 int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs, 171 int64_t sector_num, int nb_sectors, int *pnum, 172 BlockDriverState **file); 173 174 /* 175 * Invalidate any cached meta-data. 176 */ 177 void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp); 178 int (*bdrv_inactivate)(BlockDriverState *bs); 179 180 /* 181 * Flushes all data for all layers by calling bdrv_co_flush for underlying 182 * layers, if needed. This function is needed for deterministic 183 * synchronization of the flush finishing callback. 184 */ 185 int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs); 186 187 /* 188 * Flushes all data that was already written to the OS all the way down to 189 * the disk (for example raw-posix calls fsync()). 190 */ 191 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); 192 193 /* 194 * Flushes all internal caches to the OS. The data may still sit in a 195 * writeback cache of the host OS, but it will survive a crash of the qemu 196 * process. 197 */ 198 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); 199 200 const char *protocol_name; 201 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset); 202 203 int64_t (*bdrv_getlength)(BlockDriverState *bs); 204 bool has_variable_length; 205 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); 206 207 int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num, 208 const uint8_t *buf, int nb_sectors); 209 210 int (*bdrv_snapshot_create)(BlockDriverState *bs, 211 QEMUSnapshotInfo *sn_info); 212 int (*bdrv_snapshot_goto)(BlockDriverState *bs, 213 const char *snapshot_id); 214 int (*bdrv_snapshot_delete)(BlockDriverState *bs, 215 const char *snapshot_id, 216 const char *name, 217 Error **errp); 218 int (*bdrv_snapshot_list)(BlockDriverState *bs, 219 QEMUSnapshotInfo **psn_info); 220 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, 221 const char *snapshot_id, 222 const char *name, 223 Error **errp); 224 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); 225 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs); 226 227 int (*bdrv_save_vmstate)(BlockDriverState *bs, QEMUIOVector *qiov, 228 int64_t pos); 229 int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf, 230 int64_t pos, int size); 231 232 int (*bdrv_change_backing_file)(BlockDriverState *bs, 233 const char *backing_file, const char *backing_fmt); 234 235 /* removable device specific */ 236 bool (*bdrv_is_inserted)(BlockDriverState *bs); 237 int (*bdrv_media_changed)(BlockDriverState *bs); 238 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); 239 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); 240 241 /* to control generic scsi devices */ 242 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, 243 unsigned long int req, void *buf, 244 BlockCompletionFunc *cb, void *opaque); 245 246 /* List of options for creating images, terminated by name == NULL */ 247 QemuOptsList *create_opts; 248 249 /* 250 * Returns 0 for completed check, -errno for internal errors. 251 * The check results are stored in result. 252 */ 253 int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result, 254 BdrvCheckMode fix); 255 256 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts, 257 BlockDriverAmendStatusCB *status_cb, 258 void *cb_opaque); 259 260 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event); 261 262 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ 263 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, 264 const char *tag); 265 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, 266 const char *tag); 267 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); 268 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); 269 270 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); 271 272 /* 273 * Returns 1 if newly created images are guaranteed to contain only 274 * zeros, 0 otherwise. 275 */ 276 int (*bdrv_has_zero_init)(BlockDriverState *bs); 277 278 /* Remove fd handlers, timers, and other event loop callbacks so the event 279 * loop is no longer in use. Called with no in-flight requests and in 280 * depth-first traversal order with parents before child nodes. 281 */ 282 void (*bdrv_detach_aio_context)(BlockDriverState *bs); 283 284 /* Add fd handlers, timers, and other event loop callbacks so I/O requests 285 * can be processed again. Called with no in-flight requests and in 286 * depth-first traversal order with child nodes before parent nodes. 287 */ 288 void (*bdrv_attach_aio_context)(BlockDriverState *bs, 289 AioContext *new_context); 290 291 /* io queue for linux-aio */ 292 void (*bdrv_io_plug)(BlockDriverState *bs); 293 void (*bdrv_io_unplug)(BlockDriverState *bs); 294 295 /** 296 * Try to get @bs's logical and physical block size. 297 * On success, store them in @bsz and return zero. 298 * On failure, return negative errno. 299 */ 300 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz); 301 /** 302 * Try to get @bs's geometry (cyls, heads, sectors) 303 * On success, store them in @geo and return 0. 304 * On failure return -errno. 305 * Only drivers that want to override guest geometry implement this 306 * callback; see hd_geometry_guess(). 307 */ 308 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); 309 310 /** 311 * Drain and stop any internal sources of requests in the driver, and 312 * remain so until next I/O callback (e.g. bdrv_co_writev) is called. 313 */ 314 void (*bdrv_drain)(BlockDriverState *bs); 315 316 void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child, 317 Error **errp); 318 void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child, 319 Error **errp); 320 321 QLIST_ENTRY(BlockDriver) list; 322 }; 323 324 typedef struct BlockLimits { 325 /* maximum number of sectors that can be discarded at once */ 326 int max_discard; 327 328 /* optimal alignment for discard requests in sectors */ 329 int64_t discard_alignment; 330 331 /* maximum number of sectors that can zeroized at once */ 332 int max_write_zeroes; 333 334 /* optimal alignment for write zeroes requests in sectors */ 335 int64_t write_zeroes_alignment; 336 337 /* optimal transfer length in sectors */ 338 int opt_transfer_length; 339 340 /* maximal transfer length in sectors */ 341 int max_transfer_length; 342 343 /* memory alignment so that no bounce buffer is needed */ 344 size_t min_mem_alignment; 345 346 /* memory alignment for bounce buffer */ 347 size_t opt_mem_alignment; 348 349 /* maximum number of iovec elements */ 350 int max_iov; 351 } BlockLimits; 352 353 typedef struct BdrvOpBlocker BdrvOpBlocker; 354 355 typedef struct BdrvAioNotifier { 356 void (*attached_aio_context)(AioContext *new_context, void *opaque); 357 void (*detach_aio_context)(void *opaque); 358 359 void *opaque; 360 361 QLIST_ENTRY(BdrvAioNotifier) list; 362 } BdrvAioNotifier; 363 364 struct BdrvChildRole { 365 void (*inherit_options)(int *child_flags, QDict *child_options, 366 int parent_flags, QDict *parent_options); 367 368 void (*change_media)(BdrvChild *child, bool load); 369 void (*resize)(BdrvChild *child); 370 371 /* Returns a name that is supposedly more useful for human users than the 372 * node name for identifying the node in question (in particular, a BB 373 * name), or NULL if the parent can't provide a better name. */ 374 const char* (*get_name)(BdrvChild *child); 375 376 /* 377 * If this pair of functions is implemented, the parent doesn't issue new 378 * requests after returning from .drained_begin() until .drained_end() is 379 * called. 380 * 381 * Note that this can be nested. If drained_begin() was called twice, new 382 * I/O is allowed only after drained_end() was called twice, too. 383 */ 384 void (*drained_begin)(BdrvChild *child); 385 void (*drained_end)(BdrvChild *child); 386 }; 387 388 extern const BdrvChildRole child_file; 389 extern const BdrvChildRole child_format; 390 391 struct BdrvChild { 392 BlockDriverState *bs; 393 char *name; 394 const BdrvChildRole *role; 395 void *opaque; 396 QLIST_ENTRY(BdrvChild) next; 397 QLIST_ENTRY(BdrvChild) next_parent; 398 }; 399 400 /* 401 * Note: the function bdrv_append() copies and swaps contents of 402 * BlockDriverStates, so if you add new fields to this struct, please 403 * inspect bdrv_append() to determine if the new fields need to be 404 * copied as well. 405 */ 406 struct BlockDriverState { 407 int64_t total_sectors; /* if we are reading a disk image, give its 408 size in sectors */ 409 int read_only; /* if true, the media is read only */ 410 int open_flags; /* flags used to open the file, re-used for re-open */ 411 int encrypted; /* if true, the media is encrypted */ 412 int valid_key; /* if true, a valid encryption key has been set */ 413 int sg; /* if true, the device is a /dev/sg* */ 414 int copy_on_read; /* if true, copy read backing sectors into image 415 note this is a reference count */ 416 bool probed; 417 418 BlockDriver *drv; /* NULL means no media */ 419 void *opaque; 420 421 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ 422 /* long-running tasks intended to always use the same AioContext as this 423 * BDS may register themselves in this list to be notified of changes 424 * regarding this BDS's context */ 425 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; 426 427 char filename[PATH_MAX]; 428 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of 429 this file image */ 430 char backing_format[16]; /* if non-zero and backing_file exists */ 431 432 QDict *full_open_options; 433 char exact_filename[PATH_MAX]; 434 435 BdrvChild *backing; 436 BdrvChild *file; 437 438 /* Callback before write request is processed */ 439 NotifierWithReturnList before_write_notifiers; 440 441 /* number of in-flight serialising requests */ 442 unsigned int serialising_in_flight; 443 444 /* Offset after the highest byte written to */ 445 uint64_t wr_highest_offset; 446 447 /* I/O Limits */ 448 BlockLimits bl; 449 450 /* Whether produces zeros when read beyond eof */ 451 bool zero_beyond_eof; 452 453 /* Alignment requirement for offset/length of I/O requests */ 454 unsigned int request_alignment; 455 /* Flags honored during pwrite (so far: BDRV_REQ_FUA) */ 456 unsigned int supported_write_flags; 457 /* Flags honored during write_zeroes (so far: BDRV_REQ_FUA, 458 * BDRV_REQ_MAY_UNMAP) */ 459 unsigned int supported_zero_flags; 460 461 /* the following member gives a name to every node on the bs graph. */ 462 char node_name[32]; 463 /* element of the list of named nodes building the graph */ 464 QTAILQ_ENTRY(BlockDriverState) node_list; 465 /* element of the list of all BlockDriverStates (all_bdrv_states) */ 466 QTAILQ_ENTRY(BlockDriverState) bs_list; 467 /* element of the list of monitor-owned BDS */ 468 QTAILQ_ENTRY(BlockDriverState) monitor_list; 469 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; 470 int refcnt; 471 472 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; 473 474 /* operation blockers */ 475 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; 476 477 /* long-running background operation */ 478 BlockJob *job; 479 480 /* The node that this node inherited default options from (and a reopen on 481 * which can affect this node by changing these defaults). This is always a 482 * parent node of this node. */ 483 BlockDriverState *inherits_from; 484 QLIST_HEAD(, BdrvChild) children; 485 QLIST_HEAD(, BdrvChild) parents; 486 487 QDict *options; 488 QDict *explicit_options; 489 BlockdevDetectZeroesOptions detect_zeroes; 490 491 /* The error object in use for blocking operations on backing_hd */ 492 Error *backing_blocker; 493 494 /* threshold limit for writes, in bytes. "High water mark". */ 495 uint64_t write_threshold_offset; 496 NotifierWithReturn write_threshold_notifier; 497 498 /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */ 499 unsigned io_plugged; 500 unsigned io_plug_disabled; 501 502 int quiesce_counter; 503 }; 504 505 struct BlockBackendRootState { 506 int open_flags; 507 bool read_only; 508 BlockdevDetectZeroesOptions detect_zeroes; 509 }; 510 511 static inline BlockDriverState *backing_bs(BlockDriverState *bs) 512 { 513 return bs->backing ? bs->backing->bs : NULL; 514 } 515 516 517 /* Essential block drivers which must always be statically linked into qemu, and 518 * which therefore can be accessed without using bdrv_find_format() */ 519 extern BlockDriver bdrv_file; 520 extern BlockDriver bdrv_raw; 521 extern BlockDriver bdrv_qcow2; 522 523 /** 524 * bdrv_setup_io_funcs: 525 * 526 * Prepare a #BlockDriver for I/O request processing by populating 527 * unimplemented coroutine and AIO interfaces with generic wrapper functions 528 * that fall back to implemented interfaces. 529 */ 530 void bdrv_setup_io_funcs(BlockDriver *bdrv); 531 532 int coroutine_fn bdrv_co_preadv(BlockDriverState *bs, 533 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 534 BdrvRequestFlags flags); 535 int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs, 536 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 537 BdrvRequestFlags flags); 538 539 int get_tmp_filename(char *filename, int size); 540 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, 541 const char *filename); 542 543 544 /** 545 * bdrv_add_before_write_notifier: 546 * 547 * Register a callback that is invoked before write requests are processed but 548 * after any throttling or waiting for overlapping requests. 549 */ 550 void bdrv_add_before_write_notifier(BlockDriverState *bs, 551 NotifierWithReturn *notifier); 552 553 /** 554 * bdrv_detach_aio_context: 555 * 556 * May be called from .bdrv_detach_aio_context() to detach children from the 557 * current #AioContext. This is only needed by block drivers that manage their 558 * own children. Both ->file and ->backing are automatically handled and 559 * block drivers should not call this function on them explicitly. 560 */ 561 void bdrv_detach_aio_context(BlockDriverState *bs); 562 563 /** 564 * bdrv_attach_aio_context: 565 * 566 * May be called from .bdrv_attach_aio_context() to attach children to the new 567 * #AioContext. This is only needed by block drivers that manage their own 568 * children. Both ->file and ->backing are automatically handled and block 569 * drivers should not call this function on them explicitly. 570 */ 571 void bdrv_attach_aio_context(BlockDriverState *bs, 572 AioContext *new_context); 573 574 /** 575 * bdrv_add_aio_context_notifier: 576 * 577 * If a long-running job intends to be always run in the same AioContext as a 578 * certain BDS, it may use this function to be notified of changes regarding the 579 * association of the BDS to an AioContext. 580 * 581 * attached_aio_context() is called after the target BDS has been attached to a 582 * new AioContext; detach_aio_context() is called before the target BDS is being 583 * detached from its old AioContext. 584 */ 585 void bdrv_add_aio_context_notifier(BlockDriverState *bs, 586 void (*attached_aio_context)(AioContext *new_context, void *opaque), 587 void (*detach_aio_context)(void *opaque), void *opaque); 588 589 /** 590 * bdrv_remove_aio_context_notifier: 591 * 592 * Unsubscribe of change notifications regarding the BDS's AioContext. The 593 * parameters given here have to be the same as those given to 594 * bdrv_add_aio_context_notifier(). 595 */ 596 void bdrv_remove_aio_context_notifier(BlockDriverState *bs, 597 void (*aio_context_attached)(AioContext *, 598 void *), 599 void (*aio_context_detached)(void *), 600 void *opaque); 601 602 #ifdef _WIN32 603 int is_windows_drive(const char *filename); 604 #endif 605 606 /** 607 * stream_start: 608 * @bs: Block device to operate on. 609 * @base: Block device that will become the new base, or %NULL to 610 * flatten the whole backing file chain onto @bs. 611 * @base_id: The file name that will be written to @bs as the new 612 * backing file if the job completes. Ignored if @base is %NULL. 613 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 614 * @on_error: The action to take upon error. 615 * @cb: Completion function for the job. 616 * @opaque: Opaque pointer value passed to @cb. 617 * @errp: Error object. 618 * 619 * Start a streaming operation on @bs. Clusters that are unallocated 620 * in @bs, but allocated in any image between @base and @bs (both 621 * exclusive) will be written to @bs. At the end of a successful 622 * streaming job, the backing file of @bs will be changed to 623 * @base_id in the written image and to @base in the live BlockDriverState. 624 */ 625 void stream_start(BlockDriverState *bs, BlockDriverState *base, 626 const char *base_id, int64_t speed, BlockdevOnError on_error, 627 BlockCompletionFunc *cb, 628 void *opaque, Error **errp); 629 630 /** 631 * commit_start: 632 * @bs: Active block device. 633 * @top: Top block device to be committed. 634 * @base: Block device that will be written into, and become the new top. 635 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 636 * @on_error: The action to take upon error. 637 * @cb: Completion function for the job. 638 * @opaque: Opaque pointer value passed to @cb. 639 * @backing_file_str: String to use as the backing file in @top's overlay 640 * @errp: Error object. 641 * 642 */ 643 void commit_start(BlockDriverState *bs, BlockDriverState *base, 644 BlockDriverState *top, int64_t speed, 645 BlockdevOnError on_error, BlockCompletionFunc *cb, 646 void *opaque, const char *backing_file_str, Error **errp); 647 /** 648 * commit_active_start: 649 * @bs: Active block device to be committed. 650 * @base: Block device that will be written into, and become the new top. 651 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 652 * @on_error: The action to take upon error. 653 * @cb: Completion function for the job. 654 * @opaque: Opaque pointer value passed to @cb. 655 * @errp: Error object. 656 * 657 */ 658 void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 659 int64_t speed, 660 BlockdevOnError on_error, 661 BlockCompletionFunc *cb, 662 void *opaque, Error **errp); 663 /* 664 * mirror_start: 665 * @bs: Block device to operate on. 666 * @target: Block device to write to. 667 * @replaces: Block graph node name to replace once the mirror is done. Can 668 * only be used when full mirroring is selected. 669 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 670 * @granularity: The chosen granularity for the dirty bitmap. 671 * @buf_size: The amount of data that can be in flight at one time. 672 * @mode: Whether to collapse all images in the chain to the target. 673 * @on_source_error: The action to take upon error reading from the source. 674 * @on_target_error: The action to take upon error writing to the target. 675 * @unmap: Whether to unmap target where source sectors only contain zeroes. 676 * @cb: Completion function for the job. 677 * @opaque: Opaque pointer value passed to @cb. 678 * @errp: Error object. 679 * 680 * Start a mirroring operation on @bs. Clusters that are allocated 681 * in @bs will be written to @bs until the job is cancelled or 682 * manually completed. At the end of a successful mirroring job, 683 * @bs will be switched to read from @target. 684 */ 685 void mirror_start(BlockDriverState *bs, BlockDriverState *target, 686 const char *replaces, 687 int64_t speed, uint32_t granularity, int64_t buf_size, 688 MirrorSyncMode mode, BlockdevOnError on_source_error, 689 BlockdevOnError on_target_error, 690 bool unmap, 691 BlockCompletionFunc *cb, 692 void *opaque, Error **errp); 693 694 /* 695 * backup_start: 696 * @bs: Block device to operate on. 697 * @target: Block device to write to. 698 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 699 * @sync_mode: What parts of the disk image should be copied to the destination. 700 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL. 701 * @on_source_error: The action to take upon error reading from the source. 702 * @on_target_error: The action to take upon error writing to the target. 703 * @cb: Completion function for the job. 704 * @opaque: Opaque pointer value passed to @cb. 705 * @txn: Transaction that this job is part of (may be NULL). 706 * 707 * Start a backup operation on @bs. Clusters in @bs are written to @target 708 * until the job is cancelled or manually completed. 709 */ 710 void backup_start(BlockDriverState *bs, BlockDriverState *target, 711 int64_t speed, MirrorSyncMode sync_mode, 712 BdrvDirtyBitmap *sync_bitmap, 713 BlockdevOnError on_source_error, 714 BlockdevOnError on_target_error, 715 BlockCompletionFunc *cb, void *opaque, 716 BlockJobTxn *txn, Error **errp); 717 718 void hmp_drive_add_node(Monitor *mon, const char *optstr); 719 720 BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, 721 const char *child_name, 722 const BdrvChildRole *child_role, 723 void *opaque); 724 void bdrv_root_unref_child(BdrvChild *child); 725 726 const char *bdrv_get_parent_name(const BlockDriverState *bs); 727 void blk_dev_change_media_cb(BlockBackend *blk, bool load); 728 bool blk_dev_has_removable_media(BlockBackend *blk); 729 bool blk_dev_has_tray(BlockBackend *blk); 730 void blk_dev_eject_request(BlockBackend *blk, bool force); 731 bool blk_dev_is_tray_open(BlockBackend *blk); 732 bool blk_dev_is_medium_locked(BlockBackend *blk); 733 734 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); 735 bool bdrv_requests_pending(BlockDriverState *bs); 736 737 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); 738 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in); 739 740 void blockdev_close_all_bdrv_states(void); 741 742 #endif /* BLOCK_INT_H */ 743