1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #ifndef BLOCK_INT_H 25 #define BLOCK_INT_H 26 27 #include "block/accounting.h" 28 #include "block/block.h" 29 #include "qemu/option.h" 30 #include "qemu/queue.h" 31 #include "qemu/coroutine.h" 32 #include "qemu/timer.h" 33 #include "qapi-types.h" 34 #include "qemu/hbitmap.h" 35 #include "block/snapshot.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/throttle.h" 38 39 #define BLOCK_FLAG_ENCRYPT 1 40 #define BLOCK_FLAG_LAZY_REFCOUNTS 8 41 42 #define BLOCK_OPT_SIZE "size" 43 #define BLOCK_OPT_ENCRYPT "encryption" 44 #define BLOCK_OPT_COMPAT6 "compat6" 45 #define BLOCK_OPT_HWVERSION "hwversion" 46 #define BLOCK_OPT_BACKING_FILE "backing_file" 47 #define BLOCK_OPT_BACKING_FMT "backing_fmt" 48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size" 49 #define BLOCK_OPT_TABLE_SIZE "table_size" 50 #define BLOCK_OPT_PREALLOC "preallocation" 51 #define BLOCK_OPT_SUBFMT "subformat" 52 #define BLOCK_OPT_COMPAT_LEVEL "compat" 53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" 54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type" 55 #define BLOCK_OPT_REDUNDANCY "redundancy" 56 #define BLOCK_OPT_NOCOW "nocow" 57 #define BLOCK_OPT_OBJECT_SIZE "object_size" 58 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits" 59 60 #define BLOCK_PROBE_BUF_SIZE 512 61 62 enum BdrvTrackedRequestType { 63 BDRV_TRACKED_READ, 64 BDRV_TRACKED_WRITE, 65 BDRV_TRACKED_FLUSH, 66 BDRV_TRACKED_IOCTL, 67 BDRV_TRACKED_DISCARD, 68 }; 69 70 typedef struct BdrvTrackedRequest { 71 BlockDriverState *bs; 72 int64_t offset; 73 unsigned int bytes; 74 enum BdrvTrackedRequestType type; 75 76 bool serialising; 77 int64_t overlap_offset; 78 unsigned int overlap_bytes; 79 80 QLIST_ENTRY(BdrvTrackedRequest) list; 81 Coroutine *co; /* owner, used for deadlock detection */ 82 CoQueue wait_queue; /* coroutines blocked on this request */ 83 84 struct BdrvTrackedRequest *waiting_for; 85 } BdrvTrackedRequest; 86 87 struct BlockDriver { 88 const char *format_name; 89 int instance_size; 90 91 /* set to true if the BlockDriver is a block filter */ 92 bool is_filter; 93 /* for snapshots block filter like Quorum can implement the 94 * following recursive callback. 95 * It's purpose is to recurse on the filter children while calling 96 * bdrv_recurse_is_first_non_filter on them. 97 * For a sample implementation look in the future Quorum block filter. 98 */ 99 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs, 100 BlockDriverState *candidate); 101 102 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); 103 int (*bdrv_probe_device)(const char *filename); 104 105 /* Any driver implementing this callback is expected to be able to handle 106 * NULL file names in its .bdrv_open() implementation */ 107 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); 108 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have 109 * this field set to true, except ones that are defined only by their 110 * child's bs. 111 * An example of the last type will be the quorum block driver. 112 */ 113 bool bdrv_needs_filename; 114 115 /* Set if a driver can support backing files */ 116 bool supports_backing; 117 118 /* For handling image reopen for split or non-split files */ 119 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, 120 BlockReopenQueue *queue, Error **errp); 121 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); 122 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); 123 void (*bdrv_join_options)(QDict *options, QDict *old_options); 124 125 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, 126 Error **errp); 127 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, 128 Error **errp); 129 void (*bdrv_close)(BlockDriverState *bs); 130 int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp); 131 int (*bdrv_set_key)(BlockDriverState *bs, const char *key); 132 int (*bdrv_make_empty)(BlockDriverState *bs); 133 134 void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options); 135 136 /* aio */ 137 BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs, 138 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 139 BlockCompletionFunc *cb, void *opaque); 140 BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs, 141 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 142 BlockCompletionFunc *cb, void *opaque); 143 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, 144 BlockCompletionFunc *cb, void *opaque); 145 BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs, 146 int64_t offset, int count, 147 BlockCompletionFunc *cb, void *opaque); 148 149 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, 150 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 151 int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, 152 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); 153 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, 154 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 155 int coroutine_fn (*bdrv_co_writev_flags)(BlockDriverState *bs, 156 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); 157 int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, 158 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); 159 160 /* 161 * Efficiently zero a region of the disk image. Typically an image format 162 * would use a compact metadata representation to implement this. This 163 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev() 164 * will be called instead. 165 */ 166 int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs, 167 int64_t offset, int count, BdrvRequestFlags flags); 168 int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs, 169 int64_t offset, int count); 170 int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs, 171 int64_t sector_num, int nb_sectors, int *pnum, 172 BlockDriverState **file); 173 174 /* 175 * Invalidate any cached meta-data. 176 */ 177 void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp); 178 int (*bdrv_inactivate)(BlockDriverState *bs); 179 180 /* 181 * Flushes all data for all layers by calling bdrv_co_flush for underlying 182 * layers, if needed. This function is needed for deterministic 183 * synchronization of the flush finishing callback. 184 */ 185 int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs); 186 187 /* 188 * Flushes all data that was already written to the OS all the way down to 189 * the disk (for example raw-posix calls fsync()). 190 */ 191 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); 192 193 /* 194 * Flushes all internal caches to the OS. The data may still sit in a 195 * writeback cache of the host OS, but it will survive a crash of the qemu 196 * process. 197 */ 198 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); 199 200 const char *protocol_name; 201 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset); 202 203 int64_t (*bdrv_getlength)(BlockDriverState *bs); 204 bool has_variable_length; 205 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); 206 207 int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs, 208 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov); 209 210 int (*bdrv_snapshot_create)(BlockDriverState *bs, 211 QEMUSnapshotInfo *sn_info); 212 int (*bdrv_snapshot_goto)(BlockDriverState *bs, 213 const char *snapshot_id); 214 int (*bdrv_snapshot_delete)(BlockDriverState *bs, 215 const char *snapshot_id, 216 const char *name, 217 Error **errp); 218 int (*bdrv_snapshot_list)(BlockDriverState *bs, 219 QEMUSnapshotInfo **psn_info); 220 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, 221 const char *snapshot_id, 222 const char *name, 223 Error **errp); 224 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); 225 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs); 226 227 int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs, 228 QEMUIOVector *qiov, 229 int64_t pos); 230 int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs, 231 QEMUIOVector *qiov, 232 int64_t pos); 233 234 int (*bdrv_change_backing_file)(BlockDriverState *bs, 235 const char *backing_file, const char *backing_fmt); 236 237 /* removable device specific */ 238 bool (*bdrv_is_inserted)(BlockDriverState *bs); 239 int (*bdrv_media_changed)(BlockDriverState *bs); 240 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); 241 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); 242 243 /* to control generic scsi devices */ 244 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, 245 unsigned long int req, void *buf, 246 BlockCompletionFunc *cb, void *opaque); 247 248 /* List of options for creating images, terminated by name == NULL */ 249 QemuOptsList *create_opts; 250 251 /* 252 * Returns 0 for completed check, -errno for internal errors. 253 * The check results are stored in result. 254 */ 255 int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result, 256 BdrvCheckMode fix); 257 258 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts, 259 BlockDriverAmendStatusCB *status_cb, 260 void *cb_opaque); 261 262 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event); 263 264 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ 265 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, 266 const char *tag); 267 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, 268 const char *tag); 269 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); 270 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); 271 272 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); 273 274 /* 275 * Returns 1 if newly created images are guaranteed to contain only 276 * zeros, 0 otherwise. 277 */ 278 int (*bdrv_has_zero_init)(BlockDriverState *bs); 279 280 /* Remove fd handlers, timers, and other event loop callbacks so the event 281 * loop is no longer in use. Called with no in-flight requests and in 282 * depth-first traversal order with parents before child nodes. 283 */ 284 void (*bdrv_detach_aio_context)(BlockDriverState *bs); 285 286 /* Add fd handlers, timers, and other event loop callbacks so I/O requests 287 * can be processed again. Called with no in-flight requests and in 288 * depth-first traversal order with child nodes before parent nodes. 289 */ 290 void (*bdrv_attach_aio_context)(BlockDriverState *bs, 291 AioContext *new_context); 292 293 /* io queue for linux-aio */ 294 void (*bdrv_io_plug)(BlockDriverState *bs); 295 void (*bdrv_io_unplug)(BlockDriverState *bs); 296 297 /** 298 * Try to get @bs's logical and physical block size. 299 * On success, store them in @bsz and return zero. 300 * On failure, return negative errno. 301 */ 302 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz); 303 /** 304 * Try to get @bs's geometry (cyls, heads, sectors) 305 * On success, store them in @geo and return 0. 306 * On failure return -errno. 307 * Only drivers that want to override guest geometry implement this 308 * callback; see hd_geometry_guess(). 309 */ 310 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); 311 312 /** 313 * Drain and stop any internal sources of requests in the driver, and 314 * remain so until next I/O callback (e.g. bdrv_co_writev) is called. 315 */ 316 void (*bdrv_drain)(BlockDriverState *bs); 317 318 void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child, 319 Error **errp); 320 void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child, 321 Error **errp); 322 323 QLIST_ENTRY(BlockDriver) list; 324 }; 325 326 typedef struct BlockLimits { 327 /* Alignment requirement, in bytes, for offset/length of I/O 328 * requests. Must be a power of 2 less than INT_MAX; defaults to 329 * 1 for drivers with modern byte interfaces, and to 512 330 * otherwise. */ 331 uint32_t request_alignment; 332 333 /* Maximum number of bytes that can be discarded at once (since it 334 * is signed, it must be < 2G, if set). Must be multiple of 335 * pdiscard_alignment, but need not be power of 2. May be 0 if no 336 * inherent 32-bit limit */ 337 int32_t max_pdiscard; 338 339 /* Optimal alignment for discard requests in bytes. A power of 2 340 * is best but not mandatory. Must be a multiple of 341 * bl.request_alignment, and must be less than max_pdiscard if 342 * that is set. May be 0 if bl.request_alignment is good enough */ 343 uint32_t pdiscard_alignment; 344 345 /* Maximum number of bytes that can zeroized at once (since it is 346 * signed, it must be < 2G, if set). Must be multiple of 347 * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */ 348 int32_t max_pwrite_zeroes; 349 350 /* Optimal alignment for write zeroes requests in bytes. A power 351 * of 2 is best but not mandatory. Must be a multiple of 352 * bl.request_alignment, and must be less than max_pwrite_zeroes 353 * if that is set. May be 0 if bl.request_alignment is good 354 * enough */ 355 uint32_t pwrite_zeroes_alignment; 356 357 /* Optimal transfer length in bytes. A power of 2 is best but not 358 * mandatory. Must be a multiple of bl.request_alignment, or 0 if 359 * no preferred size */ 360 uint32_t opt_transfer; 361 362 /* Maximal transfer length in bytes. Need not be power of 2, but 363 * must be multiple of opt_transfer and bl.request_alignment, or 0 364 * for no 32-bit limit. For now, anything larger than INT_MAX is 365 * clamped down. */ 366 uint32_t max_transfer; 367 368 /* memory alignment, in bytes so that no bounce buffer is needed */ 369 size_t min_mem_alignment; 370 371 /* memory alignment, in bytes, for bounce buffer */ 372 size_t opt_mem_alignment; 373 374 /* maximum number of iovec elements */ 375 int max_iov; 376 } BlockLimits; 377 378 typedef struct BdrvOpBlocker BdrvOpBlocker; 379 380 typedef struct BdrvAioNotifier { 381 void (*attached_aio_context)(AioContext *new_context, void *opaque); 382 void (*detach_aio_context)(void *opaque); 383 384 void *opaque; 385 bool deleted; 386 387 QLIST_ENTRY(BdrvAioNotifier) list; 388 } BdrvAioNotifier; 389 390 struct BdrvChildRole { 391 void (*inherit_options)(int *child_flags, QDict *child_options, 392 int parent_flags, QDict *parent_options); 393 394 void (*change_media)(BdrvChild *child, bool load); 395 void (*resize)(BdrvChild *child); 396 397 /* Returns a name that is supposedly more useful for human users than the 398 * node name for identifying the node in question (in particular, a BB 399 * name), or NULL if the parent can't provide a better name. */ 400 const char* (*get_name)(BdrvChild *child); 401 402 /* 403 * If this pair of functions is implemented, the parent doesn't issue new 404 * requests after returning from .drained_begin() until .drained_end() is 405 * called. 406 * 407 * Note that this can be nested. If drained_begin() was called twice, new 408 * I/O is allowed only after drained_end() was called twice, too. 409 */ 410 void (*drained_begin)(BdrvChild *child); 411 void (*drained_end)(BdrvChild *child); 412 }; 413 414 extern const BdrvChildRole child_file; 415 extern const BdrvChildRole child_format; 416 417 struct BdrvChild { 418 BlockDriverState *bs; 419 char *name; 420 const BdrvChildRole *role; 421 void *opaque; 422 QLIST_ENTRY(BdrvChild) next; 423 QLIST_ENTRY(BdrvChild) next_parent; 424 }; 425 426 /* 427 * Note: the function bdrv_append() copies and swaps contents of 428 * BlockDriverStates, so if you add new fields to this struct, please 429 * inspect bdrv_append() to determine if the new fields need to be 430 * copied as well. 431 */ 432 struct BlockDriverState { 433 int64_t total_sectors; /* if we are reading a disk image, give its 434 size in sectors */ 435 int open_flags; /* flags used to open the file, re-used for re-open */ 436 bool read_only; /* if true, the media is read only */ 437 bool encrypted; /* if true, the media is encrypted */ 438 bool valid_key; /* if true, a valid encryption key has been set */ 439 bool sg; /* if true, the device is a /dev/sg* */ 440 bool probed; /* if true, format was probed rather than specified */ 441 442 int copy_on_read; /* if nonzero, copy read backing sectors into image. 443 note this is a reference count */ 444 445 CoQueue flush_queue; /* Serializing flush queue */ 446 BdrvTrackedRequest *active_flush_req; /* Flush request in flight */ 447 unsigned int write_gen; /* Current data generation */ 448 unsigned int flushed_gen; /* Flushed write generation */ 449 450 BlockDriver *drv; /* NULL means no media */ 451 void *opaque; 452 453 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ 454 /* long-running tasks intended to always use the same AioContext as this 455 * BDS may register themselves in this list to be notified of changes 456 * regarding this BDS's context */ 457 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; 458 bool walking_aio_notifiers; /* to make removal during iteration safe */ 459 460 char filename[PATH_MAX]; 461 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of 462 this file image */ 463 char backing_format[16]; /* if non-zero and backing_file exists */ 464 465 QDict *full_open_options; 466 char exact_filename[PATH_MAX]; 467 468 BdrvChild *backing; 469 BdrvChild *file; 470 471 /* Callback before write request is processed */ 472 NotifierWithReturnList before_write_notifiers; 473 474 /* number of in-flight serialising requests */ 475 unsigned int serialising_in_flight; 476 477 /* Offset after the highest byte written to */ 478 uint64_t wr_highest_offset; 479 480 /* I/O Limits */ 481 BlockLimits bl; 482 483 /* Flags honored during pwrite (so far: BDRV_REQ_FUA) */ 484 unsigned int supported_write_flags; 485 /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA, 486 * BDRV_REQ_MAY_UNMAP) */ 487 unsigned int supported_zero_flags; 488 489 /* the following member gives a name to every node on the bs graph. */ 490 char node_name[32]; 491 /* element of the list of named nodes building the graph */ 492 QTAILQ_ENTRY(BlockDriverState) node_list; 493 /* element of the list of all BlockDriverStates (all_bdrv_states) */ 494 QTAILQ_ENTRY(BlockDriverState) bs_list; 495 /* element of the list of monitor-owned BDS */ 496 QTAILQ_ENTRY(BlockDriverState) monitor_list; 497 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; 498 int refcnt; 499 500 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; 501 502 /* operation blockers */ 503 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; 504 505 /* long-running background operation */ 506 BlockJob *job; 507 508 /* The node that this node inherited default options from (and a reopen on 509 * which can affect this node by changing these defaults). This is always a 510 * parent node of this node. */ 511 BlockDriverState *inherits_from; 512 QLIST_HEAD(, BdrvChild) children; 513 QLIST_HEAD(, BdrvChild) parents; 514 515 QDict *options; 516 QDict *explicit_options; 517 BlockdevDetectZeroesOptions detect_zeroes; 518 519 /* The error object in use for blocking operations on backing_hd */ 520 Error *backing_blocker; 521 522 /* threshold limit for writes, in bytes. "High water mark". */ 523 uint64_t write_threshold_offset; 524 NotifierWithReturn write_threshold_notifier; 525 526 /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */ 527 unsigned io_plugged; 528 unsigned io_plug_disabled; 529 530 int quiesce_counter; 531 }; 532 533 struct BlockBackendRootState { 534 int open_flags; 535 bool read_only; 536 BlockdevDetectZeroesOptions detect_zeroes; 537 }; 538 539 typedef enum BlockMirrorBackingMode { 540 /* Reuse the existing backing chain from the source for the target. 541 * - sync=full: Set backing BDS to NULL. 542 * - sync=top: Use source's backing BDS. 543 * - sync=none: Use source as the backing BDS. */ 544 MIRROR_SOURCE_BACKING_CHAIN, 545 546 /* Open the target's backing chain completely anew */ 547 MIRROR_OPEN_BACKING_CHAIN, 548 549 /* Do not change the target's backing BDS after job completion */ 550 MIRROR_LEAVE_BACKING_CHAIN, 551 } BlockMirrorBackingMode; 552 553 static inline BlockDriverState *backing_bs(BlockDriverState *bs) 554 { 555 return bs->backing ? bs->backing->bs : NULL; 556 } 557 558 559 /* Essential block drivers which must always be statically linked into qemu, and 560 * which therefore can be accessed without using bdrv_find_format() */ 561 extern BlockDriver bdrv_file; 562 extern BlockDriver bdrv_raw; 563 extern BlockDriver bdrv_qcow2; 564 565 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 566 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 567 BdrvRequestFlags flags); 568 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 569 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 570 BdrvRequestFlags flags); 571 572 int get_tmp_filename(char *filename, int size); 573 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, 574 const char *filename); 575 576 577 /** 578 * bdrv_add_before_write_notifier: 579 * 580 * Register a callback that is invoked before write requests are processed but 581 * after any throttling or waiting for overlapping requests. 582 */ 583 void bdrv_add_before_write_notifier(BlockDriverState *bs, 584 NotifierWithReturn *notifier); 585 586 /** 587 * bdrv_detach_aio_context: 588 * 589 * May be called from .bdrv_detach_aio_context() to detach children from the 590 * current #AioContext. This is only needed by block drivers that manage their 591 * own children. Both ->file and ->backing are automatically handled and 592 * block drivers should not call this function on them explicitly. 593 */ 594 void bdrv_detach_aio_context(BlockDriverState *bs); 595 596 /** 597 * bdrv_attach_aio_context: 598 * 599 * May be called from .bdrv_attach_aio_context() to attach children to the new 600 * #AioContext. This is only needed by block drivers that manage their own 601 * children. Both ->file and ->backing are automatically handled and block 602 * drivers should not call this function on them explicitly. 603 */ 604 void bdrv_attach_aio_context(BlockDriverState *bs, 605 AioContext *new_context); 606 607 /** 608 * bdrv_add_aio_context_notifier: 609 * 610 * If a long-running job intends to be always run in the same AioContext as a 611 * certain BDS, it may use this function to be notified of changes regarding the 612 * association of the BDS to an AioContext. 613 * 614 * attached_aio_context() is called after the target BDS has been attached to a 615 * new AioContext; detach_aio_context() is called before the target BDS is being 616 * detached from its old AioContext. 617 */ 618 void bdrv_add_aio_context_notifier(BlockDriverState *bs, 619 void (*attached_aio_context)(AioContext *new_context, void *opaque), 620 void (*detach_aio_context)(void *opaque), void *opaque); 621 622 /** 623 * bdrv_remove_aio_context_notifier: 624 * 625 * Unsubscribe of change notifications regarding the BDS's AioContext. The 626 * parameters given here have to be the same as those given to 627 * bdrv_add_aio_context_notifier(). 628 */ 629 void bdrv_remove_aio_context_notifier(BlockDriverState *bs, 630 void (*aio_context_attached)(AioContext *, 631 void *), 632 void (*aio_context_detached)(void *), 633 void *opaque); 634 635 #ifdef _WIN32 636 int is_windows_drive(const char *filename); 637 #endif 638 639 /** 640 * stream_start: 641 * @job_id: The id of the newly-created job, or %NULL to use the 642 * device name of @bs. 643 * @bs: Block device to operate on. 644 * @base: Block device that will become the new base, or %NULL to 645 * flatten the whole backing file chain onto @bs. 646 * @backing_file_str: The file name that will be written to @bs as the 647 * the new backing file if the job completes. Ignored if @base is %NULL. 648 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 649 * @on_error: The action to take upon error. 650 * @cb: Completion function for the job. 651 * @opaque: Opaque pointer value passed to @cb. 652 * @errp: Error object. 653 * 654 * Start a streaming operation on @bs. Clusters that are unallocated 655 * in @bs, but allocated in any image between @base and @bs (both 656 * exclusive) will be written to @bs. At the end of a successful 657 * streaming job, the backing file of @bs will be changed to 658 * @backing_file_str in the written image and to @base in the live 659 * BlockDriverState. 660 */ 661 void stream_start(const char *job_id, BlockDriverState *bs, 662 BlockDriverState *base, const char *backing_file_str, 663 int64_t speed, BlockdevOnError on_error, 664 BlockCompletionFunc *cb, void *opaque, Error **errp); 665 666 /** 667 * commit_start: 668 * @job_id: The id of the newly-created job, or %NULL to use the 669 * device name of @bs. 670 * @bs: Active block device. 671 * @top: Top block device to be committed. 672 * @base: Block device that will be written into, and become the new top. 673 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 674 * @on_error: The action to take upon error. 675 * @cb: Completion function for the job. 676 * @opaque: Opaque pointer value passed to @cb. 677 * @backing_file_str: String to use as the backing file in @top's overlay 678 * @errp: Error object. 679 * 680 */ 681 void commit_start(const char *job_id, BlockDriverState *bs, 682 BlockDriverState *base, BlockDriverState *top, int64_t speed, 683 BlockdevOnError on_error, BlockCompletionFunc *cb, 684 void *opaque, const char *backing_file_str, Error **errp); 685 /** 686 * commit_active_start: 687 * @job_id: The id of the newly-created job, or %NULL to use the 688 * device name of @bs. 689 * @bs: Active block device to be committed. 690 * @base: Block device that will be written into, and become the new top. 691 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 692 * @on_error: The action to take upon error. 693 * @cb: Completion function for the job. 694 * @opaque: Opaque pointer value passed to @cb. 695 * @errp: Error object. 696 * @auto_complete: Auto complete the job. 697 * 698 */ 699 void commit_active_start(const char *job_id, BlockDriverState *bs, 700 BlockDriverState *base, int64_t speed, 701 BlockdevOnError on_error, 702 BlockCompletionFunc *cb, 703 void *opaque, Error **errp, bool auto_complete); 704 /* 705 * mirror_start: 706 * @job_id: The id of the newly-created job, or %NULL to use the 707 * device name of @bs. 708 * @bs: Block device to operate on. 709 * @target: Block device to write to. 710 * @replaces: Block graph node name to replace once the mirror is done. Can 711 * only be used when full mirroring is selected. 712 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 713 * @granularity: The chosen granularity for the dirty bitmap. 714 * @buf_size: The amount of data that can be in flight at one time. 715 * @mode: Whether to collapse all images in the chain to the target. 716 * @backing_mode: How to establish the target's backing chain after completion. 717 * @on_source_error: The action to take upon error reading from the source. 718 * @on_target_error: The action to take upon error writing to the target. 719 * @unmap: Whether to unmap target where source sectors only contain zeroes. 720 * @cb: Completion function for the job. 721 * @opaque: Opaque pointer value passed to @cb. 722 * @errp: Error object. 723 * 724 * Start a mirroring operation on @bs. Clusters that are allocated 725 * in @bs will be written to @bs until the job is cancelled or 726 * manually completed. At the end of a successful mirroring job, 727 * @bs will be switched to read from @target. 728 */ 729 void mirror_start(const char *job_id, BlockDriverState *bs, 730 BlockDriverState *target, const char *replaces, 731 int64_t speed, uint32_t granularity, int64_t buf_size, 732 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 733 BlockdevOnError on_source_error, 734 BlockdevOnError on_target_error, 735 bool unmap, 736 BlockCompletionFunc *cb, 737 void *opaque, Error **errp); 738 739 /* 740 * backup_start: 741 * @job_id: The id of the newly-created job, or %NULL to use the 742 * device name of @bs. 743 * @bs: Block device to operate on. 744 * @target: Block device to write to. 745 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 746 * @sync_mode: What parts of the disk image should be copied to the destination. 747 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL. 748 * @on_source_error: The action to take upon error reading from the source. 749 * @on_target_error: The action to take upon error writing to the target. 750 * @cb: Completion function for the job. 751 * @opaque: Opaque pointer value passed to @cb. 752 * @txn: Transaction that this job is part of (may be NULL). 753 * 754 * Start a backup operation on @bs. Clusters in @bs are written to @target 755 * until the job is cancelled or manually completed. 756 */ 757 void backup_start(const char *job_id, BlockDriverState *bs, 758 BlockDriverState *target, int64_t speed, 759 MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, 760 bool compress, 761 BlockdevOnError on_source_error, 762 BlockdevOnError on_target_error, 763 BlockCompletionFunc *cb, void *opaque, 764 BlockJobTxn *txn, Error **errp); 765 766 void hmp_drive_add_node(Monitor *mon, const char *optstr); 767 768 BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, 769 const char *child_name, 770 const BdrvChildRole *child_role, 771 void *opaque); 772 void bdrv_root_unref_child(BdrvChild *child); 773 774 const char *bdrv_get_parent_name(const BlockDriverState *bs); 775 void blk_dev_change_media_cb(BlockBackend *blk, bool load); 776 bool blk_dev_has_removable_media(BlockBackend *blk); 777 bool blk_dev_has_tray(BlockBackend *blk); 778 void blk_dev_eject_request(BlockBackend *blk, bool force); 779 bool blk_dev_is_tray_open(BlockBackend *blk); 780 bool blk_dev_is_medium_locked(BlockBackend *blk); 781 782 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int64_t nr_sect); 783 bool bdrv_requests_pending(BlockDriverState *bs); 784 785 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); 786 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in); 787 788 void blockdev_close_all_bdrv_states(void); 789 790 #endif /* BLOCK_INT_H */ 791