1 #ifndef BLOCK_H 2 #define BLOCK_H 3 4 #include "block/aio.h" 5 #include "qapi/qapi-types-block-core.h" 6 #include "block/aio-wait.h" 7 #include "qemu/iov.h" 8 #include "qemu/coroutine.h" 9 #include "block/accounting.h" 10 #include "block/dirty-bitmap.h" 11 #include "block/blockjob.h" 12 #include "qemu/hbitmap.h" 13 14 /* block.c */ 15 typedef struct BlockDriver BlockDriver; 16 typedef struct BdrvChild BdrvChild; 17 typedef struct BdrvChildRole BdrvChildRole; 18 19 typedef struct BlockDriverInfo { 20 /* in bytes, 0 if irrelevant */ 21 int cluster_size; 22 /* offset at which the VM state can be saved (0 if not possible) */ 23 int64_t vm_state_offset; 24 bool is_dirty; 25 /* 26 * True if unallocated blocks read back as zeroes. This is equivalent 27 * to the LBPRZ flag in the SCSI logical block provisioning page. 28 */ 29 bool unallocated_blocks_are_zero; 30 /* 31 * True if this block driver only supports compressed writes 32 */ 33 bool needs_compressed_writes; 34 } BlockDriverInfo; 35 36 typedef struct BlockFragInfo { 37 uint64_t allocated_clusters; 38 uint64_t total_clusters; 39 uint64_t fragmented_clusters; 40 uint64_t compressed_clusters; 41 } BlockFragInfo; 42 43 typedef enum { 44 BDRV_REQ_COPY_ON_READ = 0x1, 45 BDRV_REQ_ZERO_WRITE = 0x2, 46 /* The BDRV_REQ_MAY_UNMAP flag is used to indicate that the block driver 47 * is allowed to optimize a write zeroes request by unmapping (discarding) 48 * blocks if it is guaranteed that the result will read back as 49 * zeroes. The flag is only passed to the driver if the block device is 50 * opened with BDRV_O_UNMAP. 51 */ 52 BDRV_REQ_MAY_UNMAP = 0x4, 53 54 /* 55 * The BDRV_REQ_NO_SERIALISING flag is only valid for reads and means that 56 * we don't want wait_serialising_requests() during the read operation. 57 * 58 * This flag is used for backup copy-on-write operations, when we need to 59 * read old data before write (write notifier triggered). It is okay since 60 * we already waited for other serializing requests in the initiating write 61 * (see bdrv_aligned_pwritev), and it is necessary if the initiating write 62 * is already serializing (without the flag, the read would deadlock 63 * waiting for the serialising write to complete). 64 */ 65 BDRV_REQ_NO_SERIALISING = 0x8, 66 BDRV_REQ_FUA = 0x10, 67 BDRV_REQ_WRITE_COMPRESSED = 0x20, 68 69 /* Signifies that this write request will not change the visible disk 70 * content. */ 71 BDRV_REQ_WRITE_UNCHANGED = 0x40, 72 73 /* 74 * BDRV_REQ_SERIALISING forces request serialisation for writes. 75 * It is used to ensure that writes to the backing file of a backup process 76 * target cannot race with a read of the backup target that defers to the 77 * backing file. 78 * 79 * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to 80 * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be 81 * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long. 82 */ 83 BDRV_REQ_SERIALISING = 0x80, 84 85 /* Mask of valid flags */ 86 BDRV_REQ_MASK = 0xff, 87 } BdrvRequestFlags; 88 89 typedef struct BlockSizes { 90 uint32_t phys; 91 uint32_t log; 92 } BlockSizes; 93 94 typedef struct HDGeometry { 95 uint32_t heads; 96 uint32_t sectors; 97 uint32_t cylinders; 98 } HDGeometry; 99 100 #define BDRV_O_RDWR 0x0002 101 #define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */ 102 #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ 103 #define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */ 104 #define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */ 105 #define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */ 106 #define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */ 107 #define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */ 108 #define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */ 109 #define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */ 110 #define BDRV_O_CHECK 0x1000 /* open solely for consistency check */ 111 #define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */ 112 #define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */ 113 #define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given: 114 select an appropriate protocol driver, 115 ignoring the format layer */ 116 #define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */ 117 118 #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH) 119 120 121 /* Option names of options parsed by the block layer */ 122 123 #define BDRV_OPT_CACHE_WB "cache.writeback" 124 #define BDRV_OPT_CACHE_DIRECT "cache.direct" 125 #define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush" 126 #define BDRV_OPT_READ_ONLY "read-only" 127 #define BDRV_OPT_DISCARD "discard" 128 #define BDRV_OPT_FORCE_SHARE "force-share" 129 130 131 #define BDRV_SECTOR_BITS 9 132 #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) 133 #define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1) 134 135 #define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \ 136 INT_MAX >> BDRV_SECTOR_BITS) 137 #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) 138 139 /* 140 * Allocation status flags for bdrv_block_status() and friends. 141 * 142 * Public flags: 143 * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer 144 * BDRV_BLOCK_ZERO: offset reads as zero 145 * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data 146 * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this 147 * layer rather than any backing, set by block layer 148 * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this 149 * layer, set by block layer 150 * 151 * Internal flag: 152 * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request 153 * that the block layer recompute the answer from the returned 154 * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID. 155 * 156 * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the 157 * host offset within the returned BDS that is allocated for the 158 * corresponding raw guest data. However, whether that offset 159 * actually contains data also depends on BDRV_BLOCK_DATA, as follows: 160 * 161 * DATA ZERO OFFSET_VALID 162 * t t t sectors read as zero, returned file is zero at offset 163 * t f t sectors read as valid from file at offset 164 * f t t sectors preallocated, read as zero, returned file not 165 * necessarily zero at offset 166 * f f t sectors preallocated but read from backing_hd, 167 * returned file contains garbage at offset 168 * t t f sectors preallocated, read as zero, unknown offset 169 * t f f sectors read from unknown file or offset 170 * f t f not allocated or unknown offset, read as zero 171 * f f f not allocated or unknown offset, read from backing_hd 172 */ 173 #define BDRV_BLOCK_DATA 0x01 174 #define BDRV_BLOCK_ZERO 0x02 175 #define BDRV_BLOCK_OFFSET_VALID 0x04 176 #define BDRV_BLOCK_RAW 0x08 177 #define BDRV_BLOCK_ALLOCATED 0x10 178 #define BDRV_BLOCK_EOF 0x20 179 #define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK 180 181 typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue; 182 183 typedef struct BDRVReopenState { 184 BlockDriverState *bs; 185 int flags; 186 uint64_t perm, shared_perm; 187 QDict *options; 188 QDict *explicit_options; 189 void *opaque; 190 } BDRVReopenState; 191 192 /* 193 * Block operation types 194 */ 195 typedef enum BlockOpType { 196 BLOCK_OP_TYPE_BACKUP_SOURCE, 197 BLOCK_OP_TYPE_BACKUP_TARGET, 198 BLOCK_OP_TYPE_CHANGE, 199 BLOCK_OP_TYPE_COMMIT_SOURCE, 200 BLOCK_OP_TYPE_COMMIT_TARGET, 201 BLOCK_OP_TYPE_DATAPLANE, 202 BLOCK_OP_TYPE_DRIVE_DEL, 203 BLOCK_OP_TYPE_EJECT, 204 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, 205 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, 206 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, 207 BLOCK_OP_TYPE_MIRROR_SOURCE, 208 BLOCK_OP_TYPE_MIRROR_TARGET, 209 BLOCK_OP_TYPE_RESIZE, 210 BLOCK_OP_TYPE_STREAM, 211 BLOCK_OP_TYPE_REPLACE, 212 BLOCK_OP_TYPE_MAX, 213 } BlockOpType; 214 215 /* Block node permission constants */ 216 enum { 217 /** 218 * A user that has the "permission" of consistent reads is guaranteed that 219 * their view of the contents of the block device is complete and 220 * self-consistent, representing the contents of a disk at a specific 221 * point. 222 * 223 * For most block devices (including their backing files) this is true, but 224 * the property cannot be maintained in a few situations like for 225 * intermediate nodes of a commit block job. 226 */ 227 BLK_PERM_CONSISTENT_READ = 0x01, 228 229 /** This permission is required to change the visible disk contents. */ 230 BLK_PERM_WRITE = 0x02, 231 232 /** 233 * This permission (which is weaker than BLK_PERM_WRITE) is both enough and 234 * required for writes to the block node when the caller promises that 235 * the visible disk content doesn't change. 236 * 237 * As the BLK_PERM_WRITE permission is strictly stronger, either is 238 * sufficient to perform an unchanging write. 239 */ 240 BLK_PERM_WRITE_UNCHANGED = 0x04, 241 242 /** This permission is required to change the size of a block node. */ 243 BLK_PERM_RESIZE = 0x08, 244 245 /** 246 * This permission is required to change the node that this BdrvChild 247 * points to. 248 */ 249 BLK_PERM_GRAPH_MOD = 0x10, 250 251 BLK_PERM_ALL = 0x1f, 252 253 DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ 254 | BLK_PERM_WRITE 255 | BLK_PERM_WRITE_UNCHANGED 256 | BLK_PERM_RESIZE, 257 258 DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH, 259 }; 260 261 char *bdrv_perm_names(uint64_t perm); 262 263 /* disk I/O throttling */ 264 void bdrv_init(void); 265 void bdrv_init_with_whitelist(void); 266 bool bdrv_uses_whitelist(void); 267 int bdrv_is_whitelisted(BlockDriver *drv, bool read_only); 268 BlockDriver *bdrv_find_protocol(const char *filename, 269 bool allow_protocol_prefix, 270 Error **errp); 271 BlockDriver *bdrv_find_format(const char *format_name); 272 int bdrv_create(BlockDriver *drv, const char* filename, 273 QemuOpts *opts, Error **errp); 274 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp); 275 BlockDriverState *bdrv_new(void); 276 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, 277 Error **errp); 278 void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, 279 Error **errp); 280 281 int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); 282 int bdrv_parse_discard_flags(const char *mode, int *flags); 283 BdrvChild *bdrv_open_child(const char *filename, 284 QDict *options, const char *bdref_key, 285 BlockDriverState* parent, 286 const BdrvChildRole *child_role, 287 bool allow_none, Error **errp); 288 BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); 289 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, 290 Error **errp); 291 int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, 292 const char *bdref_key, Error **errp); 293 BlockDriverState *bdrv_open(const char *filename, const char *reference, 294 QDict *options, int flags, Error **errp); 295 BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, 296 int flags, Error **errp); 297 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, 298 BlockDriverState *bs, 299 QDict *options, int flags); 300 int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp); 301 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp); 302 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, 303 BlockReopenQueue *queue, Error **errp); 304 void bdrv_reopen_commit(BDRVReopenState *reopen_state); 305 void bdrv_reopen_abort(BDRVReopenState *reopen_state); 306 int bdrv_read(BdrvChild *child, int64_t sector_num, 307 uint8_t *buf, int nb_sectors); 308 int bdrv_write(BdrvChild *child, int64_t sector_num, 309 const uint8_t *buf, int nb_sectors); 310 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 311 int bytes, BdrvRequestFlags flags); 312 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags); 313 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes); 314 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov); 315 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes); 316 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov); 317 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 318 const void *buf, int count); 319 /* 320 * Efficiently zero a region of the disk image. Note that this is a regular 321 * I/O request like read or write and should have a reasonable size. This 322 * function is not suitable for zeroing the entire image in a single request 323 * because it may allocate memory for the entire region. 324 */ 325 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 326 int bytes, BdrvRequestFlags flags); 327 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, 328 const char *backing_file); 329 void bdrv_refresh_filename(BlockDriverState *bs); 330 331 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, 332 PreallocMode prealloc, Error **errp); 333 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc, 334 Error **errp); 335 336 int64_t bdrv_nb_sectors(BlockDriverState *bs); 337 int64_t bdrv_getlength(BlockDriverState *bs); 338 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); 339 BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, 340 BlockDriverState *in_bs, Error **errp); 341 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); 342 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp); 343 int bdrv_commit(BlockDriverState *bs); 344 int bdrv_change_backing_file(BlockDriverState *bs, 345 const char *backing_file, const char *backing_fmt); 346 void bdrv_register(BlockDriver *bdrv); 347 int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, 348 const char *backing_file_str); 349 BlockDriverState *bdrv_find_overlay(BlockDriverState *active, 350 BlockDriverState *bs); 351 BlockDriverState *bdrv_find_base(BlockDriverState *bs); 352 353 354 typedef struct BdrvCheckResult { 355 int corruptions; 356 int leaks; 357 int check_errors; 358 int corruptions_fixed; 359 int leaks_fixed; 360 int64_t image_end_offset; 361 BlockFragInfo bfi; 362 } BdrvCheckResult; 363 364 typedef enum { 365 BDRV_FIX_LEAKS = 1, 366 BDRV_FIX_ERRORS = 2, 367 } BdrvCheckMode; 368 369 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); 370 371 /* The units of offset and total_work_size may be chosen arbitrarily by the 372 * block driver; total_work_size may change during the course of the amendment 373 * operation */ 374 typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset, 375 int64_t total_work_size, void *opaque); 376 int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts, 377 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 378 Error **errp); 379 380 /* external snapshots */ 381 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs, 382 BlockDriverState *candidate); 383 bool bdrv_is_first_non_filter(BlockDriverState *candidate); 384 385 /* check if a named node can be replaced when doing drive-mirror */ 386 BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, 387 const char *node_name, Error **errp); 388 389 /* async block I/O */ 390 void bdrv_aio_cancel(BlockAIOCB *acb); 391 void bdrv_aio_cancel_async(BlockAIOCB *acb); 392 393 /* sg packet commands */ 394 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf); 395 396 /* Invalidate any cached metadata used by image formats */ 397 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp); 398 void bdrv_invalidate_cache_all(Error **errp); 399 int bdrv_inactivate_all(void); 400 401 /* Ensure contents are flushed to disk. */ 402 int bdrv_flush(BlockDriverState *bs); 403 int coroutine_fn bdrv_co_flush(BlockDriverState *bs); 404 int bdrv_flush_all(void); 405 void bdrv_close_all(void); 406 void bdrv_drain(BlockDriverState *bs); 407 void coroutine_fn bdrv_co_drain(BlockDriverState *bs); 408 void bdrv_drain_all_begin(void); 409 void bdrv_drain_all_end(void); 410 void bdrv_drain_all(void); 411 412 /* Returns NULL when bs == NULL */ 413 AioWait *bdrv_get_aio_wait(BlockDriverState *bs); 414 415 #define BDRV_POLL_WHILE(bs, cond) ({ \ 416 BlockDriverState *bs_ = (bs); \ 417 AIO_WAIT_WHILE(bdrv_get_aio_wait(bs_), \ 418 bdrv_get_aio_context(bs_), \ 419 cond); }) 420 421 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes); 422 int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int bytes); 423 int bdrv_has_zero_init_1(BlockDriverState *bs); 424 int bdrv_has_zero_init(BlockDriverState *bs); 425 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs); 426 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); 427 int bdrv_block_status(BlockDriverState *bs, int64_t offset, 428 int64_t bytes, int64_t *pnum, int64_t *map, 429 BlockDriverState **file); 430 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 431 int64_t offset, int64_t bytes, int64_t *pnum, 432 int64_t *map, BlockDriverState **file); 433 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, 434 int64_t *pnum); 435 int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, 436 int64_t offset, int64_t bytes, int64_t *pnum); 437 438 bool bdrv_is_read_only(BlockDriverState *bs); 439 int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only, 440 bool ignore_allow_rdw, Error **errp); 441 int bdrv_set_read_only(BlockDriverState *bs, bool read_only, Error **errp); 442 bool bdrv_is_writable(BlockDriverState *bs); 443 bool bdrv_is_sg(BlockDriverState *bs); 444 bool bdrv_is_inserted(BlockDriverState *bs); 445 void bdrv_lock_medium(BlockDriverState *bs, bool locked); 446 void bdrv_eject(BlockDriverState *bs, bool eject_flag); 447 const char *bdrv_get_format_name(BlockDriverState *bs); 448 BlockDriverState *bdrv_find_node(const char *node_name); 449 BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp); 450 BlockDriverState *bdrv_lookup_bs(const char *device, 451 const char *node_name, 452 Error **errp); 453 bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base); 454 BlockDriverState *bdrv_next_node(BlockDriverState *bs); 455 BlockDriverState *bdrv_next_all_states(BlockDriverState *bs); 456 457 typedef struct BdrvNextIterator { 458 enum { 459 BDRV_NEXT_BACKEND_ROOTS, 460 BDRV_NEXT_MONITOR_OWNED, 461 } phase; 462 BlockBackend *blk; 463 BlockDriverState *bs; 464 } BdrvNextIterator; 465 466 BlockDriverState *bdrv_first(BdrvNextIterator *it); 467 BlockDriverState *bdrv_next(BdrvNextIterator *it); 468 void bdrv_next_cleanup(BdrvNextIterator *it); 469 470 BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs); 471 bool bdrv_is_encrypted(BlockDriverState *bs); 472 void bdrv_iterate_format(void (*it)(void *opaque, const char *name), 473 void *opaque); 474 const char *bdrv_get_node_name(const BlockDriverState *bs); 475 const char *bdrv_get_device_name(const BlockDriverState *bs); 476 const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); 477 int bdrv_get_flags(BlockDriverState *bs); 478 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); 479 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs); 480 void bdrv_round_to_clusters(BlockDriverState *bs, 481 int64_t offset, int64_t bytes, 482 int64_t *cluster_offset, 483 int64_t *cluster_bytes); 484 485 const char *bdrv_get_encrypted_filename(BlockDriverState *bs); 486 void bdrv_get_backing_filename(BlockDriverState *bs, 487 char *filename, int filename_size); 488 void bdrv_get_full_backing_filename(BlockDriverState *bs, 489 char *dest, size_t sz, Error **errp); 490 void bdrv_get_full_backing_filename_from_filename(const char *backed, 491 const char *backing, 492 char *dest, size_t sz, 493 Error **errp); 494 495 int path_has_protocol(const char *path); 496 int path_is_absolute(const char *path); 497 void path_combine(char *dest, int dest_size, 498 const char *base_path, 499 const char *filename); 500 501 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); 502 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); 503 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 504 int64_t pos, int size); 505 506 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 507 int64_t pos, int size); 508 509 void bdrv_img_create(const char *filename, const char *fmt, 510 const char *base_filename, const char *base_fmt, 511 char *options, uint64_t img_size, int flags, 512 bool quiet, Error **errp); 513 514 /* Returns the alignment in bytes that is required so that no bounce buffer 515 * is required throughout the stack */ 516 size_t bdrv_min_mem_align(BlockDriverState *bs); 517 /* Returns optimal alignment in bytes for bounce buffer */ 518 size_t bdrv_opt_mem_align(BlockDriverState *bs); 519 void *qemu_blockalign(BlockDriverState *bs, size_t size); 520 void *qemu_blockalign0(BlockDriverState *bs, size_t size); 521 void *qemu_try_blockalign(BlockDriverState *bs, size_t size); 522 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size); 523 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov); 524 525 void bdrv_enable_copy_on_read(BlockDriverState *bs); 526 void bdrv_disable_copy_on_read(BlockDriverState *bs); 527 528 void bdrv_ref(BlockDriverState *bs); 529 void bdrv_unref(BlockDriverState *bs); 530 void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child); 531 BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, 532 BlockDriverState *child_bs, 533 const char *child_name, 534 const BdrvChildRole *child_role, 535 Error **errp); 536 537 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp); 538 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason); 539 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason); 540 void bdrv_op_block_all(BlockDriverState *bs, Error *reason); 541 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason); 542 bool bdrv_op_blocker_is_empty(BlockDriverState *bs); 543 544 #define BLKDBG_EVENT(child, evt) \ 545 do { \ 546 if (child) { \ 547 bdrv_debug_event(child->bs, evt); \ 548 } \ 549 } while (0) 550 551 void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event); 552 553 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, 554 const char *tag); 555 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); 556 int bdrv_debug_resume(BlockDriverState *bs, const char *tag); 557 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); 558 559 /** 560 * bdrv_get_aio_context: 561 * 562 * Returns: the currently bound #AioContext 563 */ 564 AioContext *bdrv_get_aio_context(BlockDriverState *bs); 565 566 /** 567 * Transfer control to @co in the aio context of @bs 568 */ 569 void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co); 570 571 /** 572 * bdrv_set_aio_context: 573 * 574 * Changes the #AioContext used for fd handlers, timers, and BHs by this 575 * BlockDriverState and all its children. 576 * 577 * This function must be called with iothread lock held. 578 */ 579 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context); 580 int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); 581 int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); 582 583 void bdrv_io_plug(BlockDriverState *bs); 584 void bdrv_io_unplug(BlockDriverState *bs); 585 586 /** 587 * bdrv_parent_drained_begin: 588 * 589 * Begin a quiesced section of all users of @bs. This is part of 590 * bdrv_drained_begin. 591 */ 592 void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 593 bool ignore_bds_parents); 594 595 /** 596 * bdrv_parent_drained_begin_single: 597 * 598 * Begin a quiesced section for the parent of @c. If @poll is true, wait for 599 * any pending activity to cease. 600 */ 601 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll); 602 603 /** 604 * bdrv_parent_drained_end: 605 * 606 * End a quiesced section of all users of @bs. This is part of 607 * bdrv_drained_end. 608 */ 609 void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 610 bool ignore_bds_parents); 611 612 /** 613 * bdrv_drain_poll: 614 * 615 * Poll for pending requests in @bs, its parents (except for @ignore_parent), 616 * and if @recursive is true its children as well (used for subtree drain). 617 * 618 * If @ignore_bds_parents is true, parents that are BlockDriverStates must 619 * ignore the drain request because they will be drained separately (used for 620 * drain_all). 621 * 622 * This is part of bdrv_drained_begin. 623 */ 624 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 625 BdrvChild *ignore_parent, bool ignore_bds_parents); 626 627 /** 628 * bdrv_drained_begin: 629 * 630 * Begin a quiesced section for exclusive access to the BDS, by disabling 631 * external request sources including NBD server and device model. Note that 632 * this doesn't block timers or coroutines from submitting more requests, which 633 * means block_job_pause is still necessary. 634 * 635 * This function can be recursive. 636 */ 637 void bdrv_drained_begin(BlockDriverState *bs); 638 639 /** 640 * bdrv_do_drained_begin_quiesce: 641 * 642 * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already 643 * running requests to complete. 644 */ 645 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 646 BdrvChild *parent, bool ignore_bds_parents); 647 648 /** 649 * Like bdrv_drained_begin, but recursively begins a quiesced section for 650 * exclusive access to all child nodes as well. 651 */ 652 void bdrv_subtree_drained_begin(BlockDriverState *bs); 653 654 /** 655 * bdrv_drained_end: 656 * 657 * End a quiescent section started by bdrv_drained_begin(). 658 */ 659 void bdrv_drained_end(BlockDriverState *bs); 660 661 /** 662 * End a quiescent section started by bdrv_subtree_drained_begin(). 663 */ 664 void bdrv_subtree_drained_end(BlockDriverState *bs); 665 666 void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child, 667 Error **errp); 668 void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp); 669 670 bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, 671 uint32_t granularity, Error **errp); 672 /** 673 * 674 * bdrv_register_buf/bdrv_unregister_buf: 675 * 676 * Register/unregister a buffer for I/O. For example, VFIO drivers are 677 * interested to know the memory areas that would later be used for I/O, so 678 * that they can prepare IOMMU mapping etc., to get better performance. 679 */ 680 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size); 681 void bdrv_unregister_buf(BlockDriverState *bs, void *host); 682 683 /** 684 * 685 * bdrv_co_copy_range: 686 * 687 * Do offloaded copy between two children. If the operation is not implemented 688 * by the driver, or if the backend storage doesn't support it, a negative 689 * error code will be returned. 690 * 691 * Note: block layer doesn't emulate or fallback to a bounce buffer approach 692 * because usually the caller shouldn't attempt offloaded copy any more (e.g. 693 * calling copy_file_range(2)) after the first error, thus it should fall back 694 * to a read+write path in the caller level. 695 * 696 * @src: Source child to copy data from 697 * @src_offset: offset in @src image to read data 698 * @dst: Destination child to copy data to 699 * @dst_offset: offset in @dst image to write data 700 * @bytes: number of bytes to copy 701 * @flags: request flags. Supported flags: 702 * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero 703 * write on @dst as if bdrv_co_pwrite_zeroes is 704 * called. Used to simplify caller code, or 705 * during BlockDriver.bdrv_co_copy_range_from() 706 * recursion. 707 * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping 708 * requests currently in flight. 709 * 710 * Returns: 0 if succeeded; negative error code if failed. 711 **/ 712 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, 713 BdrvChild *dst, uint64_t dst_offset, 714 uint64_t bytes, BdrvRequestFlags read_flags, 715 BdrvRequestFlags write_flags); 716 #endif 717