1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #ifndef BLOCK_INT_H 25 #define BLOCK_INT_H 26 27 #include "block/accounting.h" 28 #include "block/block.h" 29 #include "block/aio-wait.h" 30 #include "qemu/queue.h" 31 #include "qemu/coroutine.h" 32 #include "qemu/stats64.h" 33 #include "qemu/timer.h" 34 #include "qemu/hbitmap.h" 35 #include "block/snapshot.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/throttle.h" 38 39 #define BLOCK_FLAG_LAZY_REFCOUNTS 8 40 41 #define BLOCK_OPT_SIZE "size" 42 #define BLOCK_OPT_ENCRYPT "encryption" 43 #define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format" 44 #define BLOCK_OPT_COMPAT6 "compat6" 45 #define BLOCK_OPT_HWVERSION "hwversion" 46 #define BLOCK_OPT_BACKING_FILE "backing_file" 47 #define BLOCK_OPT_BACKING_FMT "backing_fmt" 48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size" 49 #define BLOCK_OPT_TABLE_SIZE "table_size" 50 #define BLOCK_OPT_PREALLOC "preallocation" 51 #define BLOCK_OPT_SUBFMT "subformat" 52 #define BLOCK_OPT_COMPAT_LEVEL "compat" 53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" 54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type" 55 #define BLOCK_OPT_REDUNDANCY "redundancy" 56 #define BLOCK_OPT_NOCOW "nocow" 57 #define BLOCK_OPT_OBJECT_SIZE "object_size" 58 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits" 59 60 #define BLOCK_PROBE_BUF_SIZE 512 61 62 enum BdrvTrackedRequestType { 63 BDRV_TRACKED_READ, 64 BDRV_TRACKED_WRITE, 65 BDRV_TRACKED_DISCARD, 66 BDRV_TRACKED_TRUNCATE, 67 }; 68 69 typedef struct BdrvTrackedRequest { 70 BlockDriverState *bs; 71 int64_t offset; 72 unsigned int bytes; 73 enum BdrvTrackedRequestType type; 74 75 bool serialising; 76 int64_t overlap_offset; 77 unsigned int overlap_bytes; 78 79 QLIST_ENTRY(BdrvTrackedRequest) list; 80 Coroutine *co; /* owner, used for deadlock detection */ 81 CoQueue wait_queue; /* coroutines blocked on this request */ 82 83 struct BdrvTrackedRequest *waiting_for; 84 } BdrvTrackedRequest; 85 86 struct BlockDriver { 87 const char *format_name; 88 int instance_size; 89 90 /* set to true if the BlockDriver is a block filter. Block filters pass 91 * certain callbacks that refer to data (see block.c) to their bs->file if 92 * the driver doesn't implement them. Drivers that do not wish to forward 93 * must implement them and return -ENOTSUP. 94 */ 95 bool is_filter; 96 /* for snapshots block filter like Quorum can implement the 97 * following recursive callback. 98 * It's purpose is to recurse on the filter children while calling 99 * bdrv_recurse_is_first_non_filter on them. 100 * For a sample implementation look in the future Quorum block filter. 101 */ 102 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs, 103 BlockDriverState *candidate); 104 105 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); 106 int (*bdrv_probe_device)(const char *filename); 107 108 /* Any driver implementing this callback is expected to be able to handle 109 * NULL file names in its .bdrv_open() implementation */ 110 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); 111 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have 112 * this field set to true, except ones that are defined only by their 113 * child's bs. 114 * An example of the last type will be the quorum block driver. 115 */ 116 bool bdrv_needs_filename; 117 118 /* Set if a driver can support backing files */ 119 bool supports_backing; 120 121 /* For handling image reopen for split or non-split files */ 122 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, 123 BlockReopenQueue *queue, Error **errp); 124 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); 125 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); 126 void (*bdrv_join_options)(QDict *options, QDict *old_options); 127 128 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, 129 Error **errp); 130 131 /* Protocol drivers should implement this instead of bdrv_open */ 132 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, 133 Error **errp); 134 void (*bdrv_close)(BlockDriverState *bs); 135 int coroutine_fn (*bdrv_co_create)(BlockdevCreateOptions *opts, 136 Error **errp); 137 int coroutine_fn (*bdrv_co_create_opts)(const char *filename, 138 QemuOpts *opts, 139 Error **errp); 140 int (*bdrv_make_empty)(BlockDriverState *bs); 141 142 void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options); 143 144 /* aio */ 145 BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs, 146 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags, 147 BlockCompletionFunc *cb, void *opaque); 148 BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs, 149 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags, 150 BlockCompletionFunc *cb, void *opaque); 151 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, 152 BlockCompletionFunc *cb, void *opaque); 153 BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs, 154 int64_t offset, int bytes, 155 BlockCompletionFunc *cb, void *opaque); 156 157 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, 158 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); 159 160 /** 161 * @offset: position in bytes to read at 162 * @bytes: number of bytes to read 163 * @qiov: the buffers to fill with read data 164 * @flags: currently unused, always 0 165 * 166 * @offset and @bytes will be a multiple of 'request_alignment', 167 * but the length of individual @qiov elements does not have to 168 * be a multiple. 169 * 170 * @bytes will always equal the total size of @qiov, and will be 171 * no larger than 'max_transfer'. 172 * 173 * The buffer in @qiov may point directly to guest memory. 174 */ 175 int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, 176 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); 177 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, 178 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); 179 /** 180 * @offset: position in bytes to write at 181 * @bytes: number of bytes to write 182 * @qiov: the buffers containing data to write 183 * @flags: zero or more bits allowed by 'supported_write_flags' 184 * 185 * @offset and @bytes will be a multiple of 'request_alignment', 186 * but the length of individual @qiov elements does not have to 187 * be a multiple. 188 * 189 * @bytes will always equal the total size of @qiov, and will be 190 * no larger than 'max_transfer'. 191 * 192 * The buffer in @qiov may point directly to guest memory. 193 */ 194 int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, 195 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); 196 197 /* 198 * Efficiently zero a region of the disk image. Typically an image format 199 * would use a compact metadata representation to implement this. This 200 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev() 201 * will be called instead. 202 */ 203 int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs, 204 int64_t offset, int bytes, BdrvRequestFlags flags); 205 int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs, 206 int64_t offset, int bytes); 207 208 /* Map [offset, offset + nbytes) range onto a child of @bs to copy from, 209 * and invoke bdrv_co_copy_range_from(child, ...), or invoke 210 * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from. 211 * 212 * See the comment of bdrv_co_copy_range for the parameter and return value 213 * semantics. 214 */ 215 int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs, 216 BdrvChild *src, 217 uint64_t offset, 218 BdrvChild *dst, 219 uint64_t dst_offset, 220 uint64_t bytes, 221 BdrvRequestFlags flags); 222 223 /* Map [offset, offset + nbytes) range onto a child of bs to copy data to, 224 * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy 225 * operation if @bs is the leaf and @src has the same BlockDriver. Return 226 * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver. 227 * 228 * See the comment of bdrv_co_copy_range for the parameter and return value 229 * semantics. 230 */ 231 int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs, 232 BdrvChild *src, 233 uint64_t src_offset, 234 BdrvChild *dst, 235 uint64_t dst_offset, 236 uint64_t bytes, 237 BdrvRequestFlags flags); 238 239 /* 240 * Building block for bdrv_block_status[_above] and 241 * bdrv_is_allocated[_above]. The driver should answer only 242 * according to the current layer, and should only need to set 243 * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID, 244 * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing 245 * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See 246 * block.h for the overall meaning of the bits. As a hint, the 247 * flag want_zero is true if the caller cares more about precise 248 * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for 249 * overall allocation (favor larger *pnum, perhaps by reporting 250 * _DATA instead of _ZERO). The block layer guarantees input 251 * clamped to bdrv_getlength() and aligned to request_alignment, 252 * as well as non-NULL pnum, map, and file; in turn, the driver 253 * must return an error or set pnum to an aligned non-zero value. 254 */ 255 int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs, 256 bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum, 257 int64_t *map, BlockDriverState **file); 258 259 /* 260 * Invalidate any cached meta-data. 261 */ 262 void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs, 263 Error **errp); 264 int (*bdrv_inactivate)(BlockDriverState *bs); 265 266 /* 267 * Flushes all data for all layers by calling bdrv_co_flush for underlying 268 * layers, if needed. This function is needed for deterministic 269 * synchronization of the flush finishing callback. 270 */ 271 int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs); 272 273 /* 274 * Flushes all data that was already written to the OS all the way down to 275 * the disk (for example file-posix.c calls fsync()). 276 */ 277 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); 278 279 /* 280 * Flushes all internal caches to the OS. The data may still sit in a 281 * writeback cache of the host OS, but it will survive a crash of the qemu 282 * process. 283 */ 284 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); 285 286 /* 287 * Drivers setting this field must be able to work with just a plain 288 * filename with '<protocol_name>:' as a prefix, and no other options. 289 * Options may be extracted from the filename by implementing 290 * bdrv_parse_filename. 291 */ 292 const char *protocol_name; 293 int coroutine_fn (*bdrv_co_truncate)(BlockDriverState *bs, int64_t offset, 294 PreallocMode prealloc, Error **errp); 295 296 int64_t (*bdrv_getlength)(BlockDriverState *bs); 297 bool has_variable_length; 298 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); 299 BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs, 300 Error **errp); 301 302 int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs, 303 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov); 304 305 int (*bdrv_snapshot_create)(BlockDriverState *bs, 306 QEMUSnapshotInfo *sn_info); 307 int (*bdrv_snapshot_goto)(BlockDriverState *bs, 308 const char *snapshot_id); 309 int (*bdrv_snapshot_delete)(BlockDriverState *bs, 310 const char *snapshot_id, 311 const char *name, 312 Error **errp); 313 int (*bdrv_snapshot_list)(BlockDriverState *bs, 314 QEMUSnapshotInfo **psn_info); 315 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, 316 const char *snapshot_id, 317 const char *name, 318 Error **errp); 319 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); 320 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs); 321 322 int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs, 323 QEMUIOVector *qiov, 324 int64_t pos); 325 int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs, 326 QEMUIOVector *qiov, 327 int64_t pos); 328 329 int (*bdrv_change_backing_file)(BlockDriverState *bs, 330 const char *backing_file, const char *backing_fmt); 331 332 /* removable device specific */ 333 bool (*bdrv_is_inserted)(BlockDriverState *bs); 334 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); 335 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); 336 337 /* to control generic scsi devices */ 338 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, 339 unsigned long int req, void *buf, 340 BlockCompletionFunc *cb, void *opaque); 341 int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs, 342 unsigned long int req, void *buf); 343 344 /* List of options for creating images, terminated by name == NULL */ 345 QemuOptsList *create_opts; 346 347 /* 348 * Returns 0 for completed check, -errno for internal errors. 349 * The check results are stored in result. 350 */ 351 int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs, 352 BdrvCheckResult *result, 353 BdrvCheckMode fix); 354 355 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts, 356 BlockDriverAmendStatusCB *status_cb, 357 void *cb_opaque, 358 Error **errp); 359 360 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event); 361 362 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ 363 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, 364 const char *tag); 365 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs, 366 const char *tag); 367 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); 368 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); 369 370 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); 371 372 /* 373 * Returns 1 if newly created images are guaranteed to contain only 374 * zeros, 0 otherwise. 375 */ 376 int (*bdrv_has_zero_init)(BlockDriverState *bs); 377 378 /* Remove fd handlers, timers, and other event loop callbacks so the event 379 * loop is no longer in use. Called with no in-flight requests and in 380 * depth-first traversal order with parents before child nodes. 381 */ 382 void (*bdrv_detach_aio_context)(BlockDriverState *bs); 383 384 /* Add fd handlers, timers, and other event loop callbacks so I/O requests 385 * can be processed again. Called with no in-flight requests and in 386 * depth-first traversal order with child nodes before parent nodes. 387 */ 388 void (*bdrv_attach_aio_context)(BlockDriverState *bs, 389 AioContext *new_context); 390 391 /* io queue for linux-aio */ 392 void (*bdrv_io_plug)(BlockDriverState *bs); 393 void (*bdrv_io_unplug)(BlockDriverState *bs); 394 395 /** 396 * Try to get @bs's logical and physical block size. 397 * On success, store them in @bsz and return zero. 398 * On failure, return negative errno. 399 */ 400 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz); 401 /** 402 * Try to get @bs's geometry (cyls, heads, sectors) 403 * On success, store them in @geo and return 0. 404 * On failure return -errno. 405 * Only drivers that want to override guest geometry implement this 406 * callback; see hd_geometry_guess(). 407 */ 408 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); 409 410 /** 411 * bdrv_co_drain_begin is called if implemented in the beginning of a 412 * drain operation to drain and stop any internal sources of requests in 413 * the driver. 414 * bdrv_co_drain_end is called if implemented at the end of the drain. 415 * 416 * They should be used by the driver to e.g. manage scheduled I/O 417 * requests, or toggle an internal state. After the end of the drain new 418 * requests will continue normally. 419 */ 420 void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs); 421 void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs); 422 423 void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child, 424 Error **errp); 425 void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child, 426 Error **errp); 427 428 /** 429 * Informs the block driver that a permission change is intended. The 430 * driver checks whether the change is permissible and may take other 431 * preparations for the change (e.g. get file system locks). This operation 432 * is always followed either by a call to either .bdrv_set_perm or 433 * .bdrv_abort_perm_update. 434 * 435 * Checks whether the requested set of cumulative permissions in @perm 436 * can be granted for accessing @bs and whether no other users are using 437 * permissions other than those given in @shared (both arguments take 438 * BLK_PERM_* bitmasks). 439 * 440 * If both conditions are met, 0 is returned. Otherwise, -errno is returned 441 * and errp is set to an error describing the conflict. 442 */ 443 int (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm, 444 uint64_t shared, Error **errp); 445 446 /** 447 * Called to inform the driver that the set of cumulative set of used 448 * permissions for @bs has changed to @perm, and the set of sharable 449 * permission to @shared. The driver can use this to propagate changes to 450 * its children (i.e. request permissions only if a parent actually needs 451 * them). 452 * 453 * This function is only invoked after bdrv_check_perm(), so block drivers 454 * may rely on preparations made in their .bdrv_check_perm implementation. 455 */ 456 void (*bdrv_set_perm)(BlockDriverState *bs, uint64_t perm, uint64_t shared); 457 458 /* 459 * Called to inform the driver that after a previous bdrv_check_perm() 460 * call, the permission update is not performed and any preparations made 461 * for it (e.g. taken file locks) need to be undone. 462 * 463 * This function can be called even for nodes that never saw a 464 * bdrv_check_perm() call. It is a no-op then. 465 */ 466 void (*bdrv_abort_perm_update)(BlockDriverState *bs); 467 468 /** 469 * Returns in @nperm and @nshared the permissions that the driver for @bs 470 * needs on its child @c, based on the cumulative permissions requested by 471 * the parents in @parent_perm and @parent_shared. 472 * 473 * If @c is NULL, return the permissions for attaching a new child for the 474 * given @role. 475 * 476 * If @reopen_queue is non-NULL, don't return the currently needed 477 * permissions, but those that will be needed after applying the 478 * @reopen_queue. 479 */ 480 void (*bdrv_child_perm)(BlockDriverState *bs, BdrvChild *c, 481 const BdrvChildRole *role, 482 BlockReopenQueue *reopen_queue, 483 uint64_t parent_perm, uint64_t parent_shared, 484 uint64_t *nperm, uint64_t *nshared); 485 486 /** 487 * Bitmaps should be marked as 'IN_USE' in the image on reopening image 488 * as rw. This handler should realize it. It also should unset readonly 489 * field of BlockDirtyBitmap's in case of success. 490 */ 491 int (*bdrv_reopen_bitmaps_rw)(BlockDriverState *bs, Error **errp); 492 bool (*bdrv_can_store_new_dirty_bitmap)(BlockDriverState *bs, 493 const char *name, 494 uint32_t granularity, 495 Error **errp); 496 void (*bdrv_remove_persistent_dirty_bitmap)(BlockDriverState *bs, 497 const char *name, 498 Error **errp); 499 500 /** 501 * Register/unregister a buffer for I/O. For example, when the driver is 502 * interested to know the memory areas that will later be used in iovs, so 503 * that it can do IOMMU mapping with VFIO etc., in order to get better 504 * performance. In the case of VFIO drivers, this callback is used to do 505 * DMA mapping for hot buffers. 506 */ 507 void (*bdrv_register_buf)(BlockDriverState *bs, void *host, size_t size); 508 void (*bdrv_unregister_buf)(BlockDriverState *bs, void *host); 509 QLIST_ENTRY(BlockDriver) list; 510 }; 511 512 typedef struct BlockLimits { 513 /* Alignment requirement, in bytes, for offset/length of I/O 514 * requests. Must be a power of 2 less than INT_MAX; defaults to 515 * 1 for drivers with modern byte interfaces, and to 512 516 * otherwise. */ 517 uint32_t request_alignment; 518 519 /* Maximum number of bytes that can be discarded at once (since it 520 * is signed, it must be < 2G, if set). Must be multiple of 521 * pdiscard_alignment, but need not be power of 2. May be 0 if no 522 * inherent 32-bit limit */ 523 int32_t max_pdiscard; 524 525 /* Optimal alignment for discard requests in bytes. A power of 2 526 * is best but not mandatory. Must be a multiple of 527 * bl.request_alignment, and must be less than max_pdiscard if 528 * that is set. May be 0 if bl.request_alignment is good enough */ 529 uint32_t pdiscard_alignment; 530 531 /* Maximum number of bytes that can zeroized at once (since it is 532 * signed, it must be < 2G, if set). Must be multiple of 533 * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */ 534 int32_t max_pwrite_zeroes; 535 536 /* Optimal alignment for write zeroes requests in bytes. A power 537 * of 2 is best but not mandatory. Must be a multiple of 538 * bl.request_alignment, and must be less than max_pwrite_zeroes 539 * if that is set. May be 0 if bl.request_alignment is good 540 * enough */ 541 uint32_t pwrite_zeroes_alignment; 542 543 /* Optimal transfer length in bytes. A power of 2 is best but not 544 * mandatory. Must be a multiple of bl.request_alignment, or 0 if 545 * no preferred size */ 546 uint32_t opt_transfer; 547 548 /* Maximal transfer length in bytes. Need not be power of 2, but 549 * must be multiple of opt_transfer and bl.request_alignment, or 0 550 * for no 32-bit limit. For now, anything larger than INT_MAX is 551 * clamped down. */ 552 uint32_t max_transfer; 553 554 /* memory alignment, in bytes so that no bounce buffer is needed */ 555 size_t min_mem_alignment; 556 557 /* memory alignment, in bytes, for bounce buffer */ 558 size_t opt_mem_alignment; 559 560 /* maximum number of iovec elements */ 561 int max_iov; 562 } BlockLimits; 563 564 typedef struct BdrvOpBlocker BdrvOpBlocker; 565 566 typedef struct BdrvAioNotifier { 567 void (*attached_aio_context)(AioContext *new_context, void *opaque); 568 void (*detach_aio_context)(void *opaque); 569 570 void *opaque; 571 bool deleted; 572 573 QLIST_ENTRY(BdrvAioNotifier) list; 574 } BdrvAioNotifier; 575 576 struct BdrvChildRole { 577 /* If true, bdrv_replace_node() doesn't change the node this BdrvChild 578 * points to. */ 579 bool stay_at_node; 580 581 /* If true, the parent is a BlockDriverState and bdrv_next_all_states() 582 * will return it. This information is used for drain_all, where every node 583 * will be drained separately, so the drain only needs to be propagated to 584 * non-BDS parents. */ 585 bool parent_is_bds; 586 587 void (*inherit_options)(int *child_flags, QDict *child_options, 588 int parent_flags, QDict *parent_options); 589 590 void (*change_media)(BdrvChild *child, bool load); 591 void (*resize)(BdrvChild *child); 592 593 /* Returns a name that is supposedly more useful for human users than the 594 * node name for identifying the node in question (in particular, a BB 595 * name), or NULL if the parent can't provide a better name. */ 596 const char *(*get_name)(BdrvChild *child); 597 598 /* Returns a malloced string that describes the parent of the child for a 599 * human reader. This could be a node-name, BlockBackend name, qdev ID or 600 * QOM path of the device owning the BlockBackend, job type and ID etc. The 601 * caller is responsible for freeing the memory. */ 602 char *(*get_parent_desc)(BdrvChild *child); 603 604 /* 605 * If this pair of functions is implemented, the parent doesn't issue new 606 * requests after returning from .drained_begin() until .drained_end() is 607 * called. 608 * 609 * Note that this can be nested. If drained_begin() was called twice, new 610 * I/O is allowed only after drained_end() was called twice, too. 611 */ 612 void (*drained_begin)(BdrvChild *child); 613 void (*drained_end)(BdrvChild *child); 614 615 /* 616 * Returns whether the parent has pending requests for the child. This 617 * callback is polled after .drained_begin() has been called until all 618 * activity on the child has stopped. 619 */ 620 bool (*drained_poll)(BdrvChild *child); 621 622 /* Notifies the parent that the child has been activated/inactivated (e.g. 623 * when migration is completing) and it can start/stop requesting 624 * permissions and doing I/O on it. */ 625 void (*activate)(BdrvChild *child, Error **errp); 626 int (*inactivate)(BdrvChild *child); 627 628 void (*attach)(BdrvChild *child); 629 void (*detach)(BdrvChild *child); 630 631 /* Notifies the parent that the filename of its child has changed (e.g. 632 * because the direct child was removed from the backing chain), so that it 633 * can update its reference. */ 634 int (*update_filename)(BdrvChild *child, BlockDriverState *new_base, 635 const char *filename, Error **errp); 636 }; 637 638 extern const BdrvChildRole child_file; 639 extern const BdrvChildRole child_format; 640 extern const BdrvChildRole child_backing; 641 642 struct BdrvChild { 643 BlockDriverState *bs; 644 char *name; 645 const BdrvChildRole *role; 646 void *opaque; 647 648 /** 649 * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask) 650 */ 651 uint64_t perm; 652 653 /** 654 * Permissions that can still be granted to other users of @bs while this 655 * BdrvChild is still attached to it. (BLK_PERM_* bitmask) 656 */ 657 uint64_t shared_perm; 658 659 QLIST_ENTRY(BdrvChild) next; 660 QLIST_ENTRY(BdrvChild) next_parent; 661 }; 662 663 /* 664 * Note: the function bdrv_append() copies and swaps contents of 665 * BlockDriverStates, so if you add new fields to this struct, please 666 * inspect bdrv_append() to determine if the new fields need to be 667 * copied as well. 668 */ 669 struct BlockDriverState { 670 /* Protected by big QEMU lock or read-only after opening. No special 671 * locking needed during I/O... 672 */ 673 int open_flags; /* flags used to open the file, re-used for re-open */ 674 bool read_only; /* if true, the media is read only */ 675 bool encrypted; /* if true, the media is encrypted */ 676 bool sg; /* if true, the device is a /dev/sg* */ 677 bool probed; /* if true, format was probed rather than specified */ 678 bool force_share; /* if true, always allow all shared permissions */ 679 bool implicit; /* if true, this filter node was automatically inserted */ 680 681 BlockDriver *drv; /* NULL means no media */ 682 void *opaque; 683 684 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ 685 /* long-running tasks intended to always use the same AioContext as this 686 * BDS may register themselves in this list to be notified of changes 687 * regarding this BDS's context */ 688 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; 689 bool walking_aio_notifiers; /* to make removal during iteration safe */ 690 691 char filename[PATH_MAX]; 692 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of 693 this file image */ 694 char backing_format[16]; /* if non-zero and backing_file exists */ 695 696 QDict *full_open_options; 697 char exact_filename[PATH_MAX]; 698 699 BdrvChild *backing; 700 BdrvChild *file; 701 702 /* I/O Limits */ 703 BlockLimits bl; 704 705 /* Flags honored during pwrite (so far: BDRV_REQ_FUA, 706 * BDRV_REQ_WRITE_UNCHANGED). 707 * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those 708 * writes will be issued as normal writes without the flag set. 709 * This is important to note for drivers that do not explicitly 710 * request a WRITE permission for their children and instead take 711 * the same permissions as their parent did (this is commonly what 712 * block filters do). Such drivers have to be aware that the 713 * parent may have taken a WRITE_UNCHANGED permission only and is 714 * issuing such requests. Drivers either must make sure that 715 * these requests do not result in plain WRITE accesses (usually 716 * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding 717 * every incoming write request as-is, including potentially that 718 * flag), or they have to explicitly take the WRITE permission for 719 * their children. */ 720 unsigned int supported_write_flags; 721 /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA, 722 * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED) */ 723 unsigned int supported_zero_flags; 724 725 /* the following member gives a name to every node on the bs graph. */ 726 char node_name[32]; 727 /* element of the list of named nodes building the graph */ 728 QTAILQ_ENTRY(BlockDriverState) node_list; 729 /* element of the list of all BlockDriverStates (all_bdrv_states) */ 730 QTAILQ_ENTRY(BlockDriverState) bs_list; 731 /* element of the list of monitor-owned BDS */ 732 QTAILQ_ENTRY(BlockDriverState) monitor_list; 733 int refcnt; 734 735 /* operation blockers */ 736 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; 737 738 /* long-running background operation */ 739 BlockJob *job; 740 741 /* The node that this node inherited default options from (and a reopen on 742 * which can affect this node by changing these defaults). This is always a 743 * parent node of this node. */ 744 BlockDriverState *inherits_from; 745 QLIST_HEAD(, BdrvChild) children; 746 QLIST_HEAD(, BdrvChild) parents; 747 748 QDict *options; 749 QDict *explicit_options; 750 BlockdevDetectZeroesOptions detect_zeroes; 751 752 /* The error object in use for blocking operations on backing_hd */ 753 Error *backing_blocker; 754 755 /* Protected by AioContext lock */ 756 757 /* If we are reading a disk image, give its size in sectors. 758 * Generally read-only; it is written to by load_snapshot and 759 * save_snaphost, but the block layer is quiescent during those. 760 */ 761 int64_t total_sectors; 762 763 /* Callback before write request is processed */ 764 NotifierWithReturnList before_write_notifiers; 765 766 /* threshold limit for writes, in bytes. "High water mark". */ 767 uint64_t write_threshold_offset; 768 NotifierWithReturn write_threshold_notifier; 769 770 /* Writing to the list requires the BQL _and_ the dirty_bitmap_mutex. 771 * Reading from the list can be done with either the BQL or the 772 * dirty_bitmap_mutex. Modifying a bitmap only requires 773 * dirty_bitmap_mutex. */ 774 QemuMutex dirty_bitmap_mutex; 775 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; 776 777 /* Offset after the highest byte written to */ 778 Stat64 wr_highest_offset; 779 780 /* If true, copy read backing sectors into image. Can be >1 if more 781 * than one client has requested copy-on-read. Accessed with atomic 782 * ops. 783 */ 784 int copy_on_read; 785 786 /* number of in-flight requests; overall and serialising. 787 * Accessed with atomic ops. 788 */ 789 unsigned int in_flight; 790 unsigned int serialising_in_flight; 791 792 /* Kicked to signal main loop when a request completes. */ 793 AioWait wait; 794 795 /* counter for nested bdrv_io_plug. 796 * Accessed with atomic ops. 797 */ 798 unsigned io_plugged; 799 800 /* do we need to tell the quest if we have a volatile write cache? */ 801 int enable_write_cache; 802 803 /* Accessed with atomic ops. */ 804 int quiesce_counter; 805 int recursive_quiesce_counter; 806 807 unsigned int write_gen; /* Current data generation */ 808 809 /* Protected by reqs_lock. */ 810 CoMutex reqs_lock; 811 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; 812 CoQueue flush_queue; /* Serializing flush queue */ 813 bool active_flush_req; /* Flush request in flight? */ 814 815 /* Only read/written by whoever has set active_flush_req to true. */ 816 unsigned int flushed_gen; /* Flushed write generation */ 817 }; 818 819 struct BlockBackendRootState { 820 int open_flags; 821 bool read_only; 822 BlockdevDetectZeroesOptions detect_zeroes; 823 }; 824 825 typedef enum BlockMirrorBackingMode { 826 /* Reuse the existing backing chain from the source for the target. 827 * - sync=full: Set backing BDS to NULL. 828 * - sync=top: Use source's backing BDS. 829 * - sync=none: Use source as the backing BDS. */ 830 MIRROR_SOURCE_BACKING_CHAIN, 831 832 /* Open the target's backing chain completely anew */ 833 MIRROR_OPEN_BACKING_CHAIN, 834 835 /* Do not change the target's backing BDS after job completion */ 836 MIRROR_LEAVE_BACKING_CHAIN, 837 } BlockMirrorBackingMode; 838 839 static inline BlockDriverState *backing_bs(BlockDriverState *bs) 840 { 841 return bs->backing ? bs->backing->bs : NULL; 842 } 843 844 845 /* Essential block drivers which must always be statically linked into qemu, and 846 * which therefore can be accessed without using bdrv_find_format() */ 847 extern BlockDriver bdrv_file; 848 extern BlockDriver bdrv_raw; 849 extern BlockDriver bdrv_qcow2; 850 851 int coroutine_fn bdrv_co_preadv(BdrvChild *child, 852 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 853 BdrvRequestFlags flags); 854 int coroutine_fn bdrv_co_pwritev(BdrvChild *child, 855 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 856 BdrvRequestFlags flags); 857 858 extern unsigned int bdrv_drain_all_count; 859 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); 860 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); 861 862 int get_tmp_filename(char *filename, int size); 863 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, 864 const char *filename); 865 866 void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix, 867 QDict *options); 868 869 870 /** 871 * bdrv_add_before_write_notifier: 872 * 873 * Register a callback that is invoked before write requests are processed but 874 * after any throttling or waiting for overlapping requests. 875 */ 876 void bdrv_add_before_write_notifier(BlockDriverState *bs, 877 NotifierWithReturn *notifier); 878 879 /** 880 * bdrv_detach_aio_context: 881 * 882 * May be called from .bdrv_detach_aio_context() to detach children from the 883 * current #AioContext. This is only needed by block drivers that manage their 884 * own children. Both ->file and ->backing are automatically handled and 885 * block drivers should not call this function on them explicitly. 886 */ 887 void bdrv_detach_aio_context(BlockDriverState *bs); 888 889 /** 890 * bdrv_attach_aio_context: 891 * 892 * May be called from .bdrv_attach_aio_context() to attach children to the new 893 * #AioContext. This is only needed by block drivers that manage their own 894 * children. Both ->file and ->backing are automatically handled and block 895 * drivers should not call this function on them explicitly. 896 */ 897 void bdrv_attach_aio_context(BlockDriverState *bs, 898 AioContext *new_context); 899 900 /** 901 * bdrv_add_aio_context_notifier: 902 * 903 * If a long-running job intends to be always run in the same AioContext as a 904 * certain BDS, it may use this function to be notified of changes regarding the 905 * association of the BDS to an AioContext. 906 * 907 * attached_aio_context() is called after the target BDS has been attached to a 908 * new AioContext; detach_aio_context() is called before the target BDS is being 909 * detached from its old AioContext. 910 */ 911 void bdrv_add_aio_context_notifier(BlockDriverState *bs, 912 void (*attached_aio_context)(AioContext *new_context, void *opaque), 913 void (*detach_aio_context)(void *opaque), void *opaque); 914 915 /** 916 * bdrv_remove_aio_context_notifier: 917 * 918 * Unsubscribe of change notifications regarding the BDS's AioContext. The 919 * parameters given here have to be the same as those given to 920 * bdrv_add_aio_context_notifier(). 921 */ 922 void bdrv_remove_aio_context_notifier(BlockDriverState *bs, 923 void (*aio_context_attached)(AioContext *, 924 void *), 925 void (*aio_context_detached)(void *), 926 void *opaque); 927 928 /** 929 * bdrv_wakeup: 930 * @bs: The BlockDriverState for which an I/O operation has been completed. 931 * 932 * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During 933 * synchronous I/O on a BlockDriverState that is attached to another 934 * I/O thread, the main thread lets the I/O thread's event loop run, 935 * waiting for the I/O operation to complete. A bdrv_wakeup will wake 936 * up the main thread if necessary. 937 * 938 * Manual calls to bdrv_wakeup are rarely necessary, because 939 * bdrv_dec_in_flight already calls it. 940 */ 941 void bdrv_wakeup(BlockDriverState *bs); 942 943 #ifdef _WIN32 944 int is_windows_drive(const char *filename); 945 #endif 946 947 /** 948 * stream_start: 949 * @job_id: The id of the newly-created job, or %NULL to use the 950 * device name of @bs. 951 * @bs: Block device to operate on. 952 * @base: Block device that will become the new base, or %NULL to 953 * flatten the whole backing file chain onto @bs. 954 * @backing_file_str: The file name that will be written to @bs as the 955 * the new backing file if the job completes. Ignored if @base is %NULL. 956 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 957 * @on_error: The action to take upon error. 958 * @errp: Error object. 959 * 960 * Start a streaming operation on @bs. Clusters that are unallocated 961 * in @bs, but allocated in any image between @base and @bs (both 962 * exclusive) will be written to @bs. At the end of a successful 963 * streaming job, the backing file of @bs will be changed to 964 * @backing_file_str in the written image and to @base in the live 965 * BlockDriverState. 966 */ 967 void stream_start(const char *job_id, BlockDriverState *bs, 968 BlockDriverState *base, const char *backing_file_str, 969 int64_t speed, BlockdevOnError on_error, Error **errp); 970 971 /** 972 * commit_start: 973 * @job_id: The id of the newly-created job, or %NULL to use the 974 * device name of @bs. 975 * @bs: Active block device. 976 * @top: Top block device to be committed. 977 * @base: Block device that will be written into, and become the new top. 978 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 979 * @on_error: The action to take upon error. 980 * @backing_file_str: String to use as the backing file in @top's overlay 981 * @filter_node_name: The node name that should be assigned to the filter 982 * driver that the commit job inserts into the graph above @top. NULL means 983 * that a node name should be autogenerated. 984 * @errp: Error object. 985 * 986 */ 987 void commit_start(const char *job_id, BlockDriverState *bs, 988 BlockDriverState *base, BlockDriverState *top, int64_t speed, 989 BlockdevOnError on_error, const char *backing_file_str, 990 const char *filter_node_name, Error **errp); 991 /** 992 * commit_active_start: 993 * @job_id: The id of the newly-created job, or %NULL to use the 994 * device name of @bs. 995 * @bs: Active block device to be committed. 996 * @base: Block device that will be written into, and become the new top. 997 * @creation_flags: Flags that control the behavior of the Job lifetime. 998 * See @BlockJobCreateFlags 999 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 1000 * @on_error: The action to take upon error. 1001 * @filter_node_name: The node name that should be assigned to the filter 1002 * driver that the commit job inserts into the graph above @bs. NULL means that 1003 * a node name should be autogenerated. 1004 * @cb: Completion function for the job. 1005 * @opaque: Opaque pointer value passed to @cb. 1006 * @auto_complete: Auto complete the job. 1007 * @errp: Error object. 1008 * 1009 */ 1010 void commit_active_start(const char *job_id, BlockDriverState *bs, 1011 BlockDriverState *base, int creation_flags, 1012 int64_t speed, BlockdevOnError on_error, 1013 const char *filter_node_name, 1014 BlockCompletionFunc *cb, void *opaque, 1015 bool auto_complete, Error **errp); 1016 /* 1017 * mirror_start: 1018 * @job_id: The id of the newly-created job, or %NULL to use the 1019 * device name of @bs. 1020 * @bs: Block device to operate on. 1021 * @target: Block device to write to. 1022 * @replaces: Block graph node name to replace once the mirror is done. Can 1023 * only be used when full mirroring is selected. 1024 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 1025 * @granularity: The chosen granularity for the dirty bitmap. 1026 * @buf_size: The amount of data that can be in flight at one time. 1027 * @mode: Whether to collapse all images in the chain to the target. 1028 * @backing_mode: How to establish the target's backing chain after completion. 1029 * @on_source_error: The action to take upon error reading from the source. 1030 * @on_target_error: The action to take upon error writing to the target. 1031 * @unmap: Whether to unmap target where source sectors only contain zeroes. 1032 * @filter_node_name: The node name that should be assigned to the filter 1033 * driver that the mirror job inserts into the graph above @bs. NULL means that 1034 * a node name should be autogenerated. 1035 * @copy_mode: When to trigger writes to the target. 1036 * @errp: Error object. 1037 * 1038 * Start a mirroring operation on @bs. Clusters that are allocated 1039 * in @bs will be written to @target until the job is cancelled or 1040 * manually completed. At the end of a successful mirroring job, 1041 * @bs will be switched to read from @target. 1042 */ 1043 void mirror_start(const char *job_id, BlockDriverState *bs, 1044 BlockDriverState *target, const char *replaces, 1045 int64_t speed, uint32_t granularity, int64_t buf_size, 1046 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1047 BlockdevOnError on_source_error, 1048 BlockdevOnError on_target_error, 1049 bool unmap, const char *filter_node_name, 1050 MirrorCopyMode copy_mode, Error **errp); 1051 1052 /* 1053 * backup_job_create: 1054 * @job_id: The id of the newly-created job, or %NULL to use the 1055 * device name of @bs. 1056 * @bs: Block device to operate on. 1057 * @target: Block device to write to. 1058 * @speed: The maximum speed, in bytes per second, or 0 for unlimited. 1059 * @sync_mode: What parts of the disk image should be copied to the destination. 1060 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL. 1061 * @on_source_error: The action to take upon error reading from the source. 1062 * @on_target_error: The action to take upon error writing to the target. 1063 * @creation_flags: Flags that control the behavior of the Job lifetime. 1064 * See @BlockJobCreateFlags 1065 * @cb: Completion function for the job. 1066 * @opaque: Opaque pointer value passed to @cb. 1067 * @txn: Transaction that this job is part of (may be NULL). 1068 * 1069 * Create a backup operation on @bs. Clusters in @bs are written to @target 1070 * until the job is cancelled or manually completed. 1071 */ 1072 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, 1073 BlockDriverState *target, int64_t speed, 1074 MirrorSyncMode sync_mode, 1075 BdrvDirtyBitmap *sync_bitmap, 1076 bool compress, 1077 BlockdevOnError on_source_error, 1078 BlockdevOnError on_target_error, 1079 int creation_flags, 1080 BlockCompletionFunc *cb, void *opaque, 1081 JobTxn *txn, Error **errp); 1082 1083 void hmp_drive_add_node(Monitor *mon, const char *optstr); 1084 1085 BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, 1086 const char *child_name, 1087 const BdrvChildRole *child_role, 1088 uint64_t perm, uint64_t shared_perm, 1089 void *opaque, Error **errp); 1090 void bdrv_root_unref_child(BdrvChild *child); 1091 1092 int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, 1093 Error **errp); 1094 1095 /* Default implementation for BlockDriver.bdrv_child_perm() that can be used by 1096 * block filters: Forward CONSISTENT_READ, WRITE, WRITE_UNCHANGED and RESIZE to 1097 * all children */ 1098 void bdrv_filter_default_perms(BlockDriverState *bs, BdrvChild *c, 1099 const BdrvChildRole *role, 1100 BlockReopenQueue *reopen_queue, 1101 uint64_t perm, uint64_t shared, 1102 uint64_t *nperm, uint64_t *nshared); 1103 1104 /* Default implementation for BlockDriver.bdrv_child_perm() that can be used by 1105 * (non-raw) image formats: Like above for bs->backing, but for bs->file it 1106 * requires WRITE | RESIZE for read-write images, always requires 1107 * CONSISTENT_READ and doesn't share WRITE. */ 1108 void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c, 1109 const BdrvChildRole *role, 1110 BlockReopenQueue *reopen_queue, 1111 uint64_t perm, uint64_t shared, 1112 uint64_t *nperm, uint64_t *nshared); 1113 1114 /* 1115 * Default implementation for drivers to pass bdrv_co_block_status() to 1116 * their file. 1117 */ 1118 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs, 1119 bool want_zero, 1120 int64_t offset, 1121 int64_t bytes, 1122 int64_t *pnum, 1123 int64_t *map, 1124 BlockDriverState **file); 1125 /* 1126 * Default implementation for drivers to pass bdrv_co_block_status() to 1127 * their backing file. 1128 */ 1129 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs, 1130 bool want_zero, 1131 int64_t offset, 1132 int64_t bytes, 1133 int64_t *pnum, 1134 int64_t *map, 1135 BlockDriverState **file); 1136 const char *bdrv_get_parent_name(const BlockDriverState *bs); 1137 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp); 1138 bool blk_dev_has_removable_media(BlockBackend *blk); 1139 bool blk_dev_has_tray(BlockBackend *blk); 1140 void blk_dev_eject_request(BlockBackend *blk, bool force); 1141 bool blk_dev_is_tray_open(BlockBackend *blk); 1142 bool blk_dev_is_medium_locked(BlockBackend *blk); 1143 1144 void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes); 1145 1146 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); 1147 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in); 1148 1149 void bdrv_inc_in_flight(BlockDriverState *bs); 1150 void bdrv_dec_in_flight(BlockDriverState *bs); 1151 1152 void blockdev_close_all_bdrv_states(void); 1153 1154 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset, 1155 BdrvChild *dst, uint64_t dst_offset, 1156 uint64_t bytes, BdrvRequestFlags flags); 1157 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset, 1158 BdrvChild *dst, uint64_t dst_offset, 1159 uint64_t bytes, BdrvRequestFlags flags); 1160 1161 int refresh_total_sectors(BlockDriverState *bs, int64_t hint); 1162 1163 #endif /* BLOCK_INT_H */ 1164