1 #ifndef BLOCK_H 2 #define BLOCK_H 3 4 #include "block/aio.h" 5 #include "qapi/qapi-types-block-core.h" 6 #include "block/aio-wait.h" 7 #include "qemu/iov.h" 8 #include "qemu/coroutine.h" 9 #include "block/accounting.h" 10 #include "block/dirty-bitmap.h" 11 #include "block/blockjob.h" 12 #include "qemu/hbitmap.h" 13 14 /* block.c */ 15 typedef struct BlockDriver BlockDriver; 16 typedef struct BdrvChild BdrvChild; 17 typedef struct BdrvChildRole BdrvChildRole; 18 19 typedef struct BlockDriverInfo { 20 /* in bytes, 0 if irrelevant */ 21 int cluster_size; 22 /* offset at which the VM state can be saved (0 if not possible) */ 23 int64_t vm_state_offset; 24 bool is_dirty; 25 /* 26 * True if unallocated blocks read back as zeroes. This is equivalent 27 * to the LBPRZ flag in the SCSI logical block provisioning page. 28 */ 29 bool unallocated_blocks_are_zero; 30 /* 31 * True if this block driver only supports compressed writes 32 */ 33 bool needs_compressed_writes; 34 } BlockDriverInfo; 35 36 typedef struct BlockFragInfo { 37 uint64_t allocated_clusters; 38 uint64_t total_clusters; 39 uint64_t fragmented_clusters; 40 uint64_t compressed_clusters; 41 } BlockFragInfo; 42 43 typedef enum { 44 BDRV_REQ_COPY_ON_READ = 0x1, 45 BDRV_REQ_ZERO_WRITE = 0x2, 46 47 /* 48 * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate 49 * that the block driver should unmap (discard) blocks if it is guaranteed 50 * that the result will read back as zeroes. The flag is only passed to the 51 * driver if the block device is opened with BDRV_O_UNMAP. 52 */ 53 BDRV_REQ_MAY_UNMAP = 0x4, 54 55 /* 56 * The BDRV_REQ_NO_SERIALISING flag is only valid for reads and means that 57 * we don't want wait_serialising_requests() during the read operation. 58 * 59 * This flag is used for backup copy-on-write operations, when we need to 60 * read old data before write (write notifier triggered). It is okay since 61 * we already waited for other serializing requests in the initiating write 62 * (see bdrv_aligned_pwritev), and it is necessary if the initiating write 63 * is already serializing (without the flag, the read would deadlock 64 * waiting for the serialising write to complete). 65 */ 66 BDRV_REQ_NO_SERIALISING = 0x8, 67 BDRV_REQ_FUA = 0x10, 68 BDRV_REQ_WRITE_COMPRESSED = 0x20, 69 70 /* Signifies that this write request will not change the visible disk 71 * content. */ 72 BDRV_REQ_WRITE_UNCHANGED = 0x40, 73 74 /* 75 * BDRV_REQ_SERIALISING forces request serialisation for writes. 76 * It is used to ensure that writes to the backing file of a backup process 77 * target cannot race with a read of the backup target that defers to the 78 * backing file. 79 * 80 * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to 81 * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be 82 * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long. 83 */ 84 BDRV_REQ_SERIALISING = 0x80, 85 86 /* Mask of valid flags */ 87 BDRV_REQ_MASK = 0xff, 88 } BdrvRequestFlags; 89 90 typedef struct BlockSizes { 91 uint32_t phys; 92 uint32_t log; 93 } BlockSizes; 94 95 typedef struct HDGeometry { 96 uint32_t heads; 97 uint32_t sectors; 98 uint32_t cylinders; 99 } HDGeometry; 100 101 #define BDRV_O_RDWR 0x0002 102 #define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */ 103 #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ 104 #define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */ 105 #define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */ 106 #define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */ 107 #define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */ 108 #define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */ 109 #define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */ 110 #define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */ 111 #define BDRV_O_CHECK 0x1000 /* open solely for consistency check */ 112 #define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */ 113 #define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */ 114 #define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given: 115 select an appropriate protocol driver, 116 ignoring the format layer */ 117 #define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */ 118 119 #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH) 120 121 122 /* Option names of options parsed by the block layer */ 123 124 #define BDRV_OPT_CACHE_WB "cache.writeback" 125 #define BDRV_OPT_CACHE_DIRECT "cache.direct" 126 #define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush" 127 #define BDRV_OPT_READ_ONLY "read-only" 128 #define BDRV_OPT_DISCARD "discard" 129 #define BDRV_OPT_FORCE_SHARE "force-share" 130 131 132 #define BDRV_SECTOR_BITS 9 133 #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) 134 #define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1) 135 136 #define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \ 137 INT_MAX >> BDRV_SECTOR_BITS) 138 #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) 139 140 /* 141 * Allocation status flags for bdrv_block_status() and friends. 142 * 143 * Public flags: 144 * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer 145 * BDRV_BLOCK_ZERO: offset reads as zero 146 * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data 147 * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this 148 * layer rather than any backing, set by block layer 149 * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this 150 * layer, set by block layer 151 * 152 * Internal flag: 153 * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request 154 * that the block layer recompute the answer from the returned 155 * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID. 156 * 157 * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the 158 * host offset within the returned BDS that is allocated for the 159 * corresponding raw guest data. However, whether that offset 160 * actually contains data also depends on BDRV_BLOCK_DATA, as follows: 161 * 162 * DATA ZERO OFFSET_VALID 163 * t t t sectors read as zero, returned file is zero at offset 164 * t f t sectors read as valid from file at offset 165 * f t t sectors preallocated, read as zero, returned file not 166 * necessarily zero at offset 167 * f f t sectors preallocated but read from backing_hd, 168 * returned file contains garbage at offset 169 * t t f sectors preallocated, read as zero, unknown offset 170 * t f f sectors read from unknown file or offset 171 * f t f not allocated or unknown offset, read as zero 172 * f f f not allocated or unknown offset, read from backing_hd 173 */ 174 #define BDRV_BLOCK_DATA 0x01 175 #define BDRV_BLOCK_ZERO 0x02 176 #define BDRV_BLOCK_OFFSET_VALID 0x04 177 #define BDRV_BLOCK_RAW 0x08 178 #define BDRV_BLOCK_ALLOCATED 0x10 179 #define BDRV_BLOCK_EOF 0x20 180 #define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK 181 182 typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue; 183 184 typedef struct BDRVReopenState { 185 BlockDriverState *bs; 186 int flags; 187 BlockdevDetectZeroesOptions detect_zeroes; 188 uint64_t perm, shared_perm; 189 QDict *options; 190 QDict *explicit_options; 191 void *opaque; 192 } BDRVReopenState; 193 194 /* 195 * Block operation types 196 */ 197 typedef enum BlockOpType { 198 BLOCK_OP_TYPE_BACKUP_SOURCE, 199 BLOCK_OP_TYPE_BACKUP_TARGET, 200 BLOCK_OP_TYPE_CHANGE, 201 BLOCK_OP_TYPE_COMMIT_SOURCE, 202 BLOCK_OP_TYPE_COMMIT_TARGET, 203 BLOCK_OP_TYPE_DATAPLANE, 204 BLOCK_OP_TYPE_DRIVE_DEL, 205 BLOCK_OP_TYPE_EJECT, 206 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, 207 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, 208 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, 209 BLOCK_OP_TYPE_MIRROR_SOURCE, 210 BLOCK_OP_TYPE_MIRROR_TARGET, 211 BLOCK_OP_TYPE_RESIZE, 212 BLOCK_OP_TYPE_STREAM, 213 BLOCK_OP_TYPE_REPLACE, 214 BLOCK_OP_TYPE_MAX, 215 } BlockOpType; 216 217 /* Block node permission constants */ 218 enum { 219 /** 220 * A user that has the "permission" of consistent reads is guaranteed that 221 * their view of the contents of the block device is complete and 222 * self-consistent, representing the contents of a disk at a specific 223 * point. 224 * 225 * For most block devices (including their backing files) this is true, but 226 * the property cannot be maintained in a few situations like for 227 * intermediate nodes of a commit block job. 228 */ 229 BLK_PERM_CONSISTENT_READ = 0x01, 230 231 /** This permission is required to change the visible disk contents. */ 232 BLK_PERM_WRITE = 0x02, 233 234 /** 235 * This permission (which is weaker than BLK_PERM_WRITE) is both enough and 236 * required for writes to the block node when the caller promises that 237 * the visible disk content doesn't change. 238 * 239 * As the BLK_PERM_WRITE permission is strictly stronger, either is 240 * sufficient to perform an unchanging write. 241 */ 242 BLK_PERM_WRITE_UNCHANGED = 0x04, 243 244 /** This permission is required to change the size of a block node. */ 245 BLK_PERM_RESIZE = 0x08, 246 247 /** 248 * This permission is required to change the node that this BdrvChild 249 * points to. 250 */ 251 BLK_PERM_GRAPH_MOD = 0x10, 252 253 BLK_PERM_ALL = 0x1f, 254 255 DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ 256 | BLK_PERM_WRITE 257 | BLK_PERM_WRITE_UNCHANGED 258 | BLK_PERM_RESIZE, 259 260 DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH, 261 }; 262 263 char *bdrv_perm_names(uint64_t perm); 264 265 /* disk I/O throttling */ 266 void bdrv_init(void); 267 void bdrv_init_with_whitelist(void); 268 bool bdrv_uses_whitelist(void); 269 int bdrv_is_whitelisted(BlockDriver *drv, bool read_only); 270 BlockDriver *bdrv_find_protocol(const char *filename, 271 bool allow_protocol_prefix, 272 Error **errp); 273 BlockDriver *bdrv_find_format(const char *format_name); 274 int bdrv_create(BlockDriver *drv, const char* filename, 275 QemuOpts *opts, Error **errp); 276 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp); 277 BlockDriverState *bdrv_new(void); 278 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, 279 Error **errp); 280 void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, 281 Error **errp); 282 283 int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); 284 int bdrv_parse_discard_flags(const char *mode, int *flags); 285 BdrvChild *bdrv_open_child(const char *filename, 286 QDict *options, const char *bdref_key, 287 BlockDriverState* parent, 288 const BdrvChildRole *child_role, 289 bool allow_none, Error **errp); 290 BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); 291 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, 292 Error **errp); 293 int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, 294 const char *bdref_key, Error **errp); 295 BlockDriverState *bdrv_open(const char *filename, const char *reference, 296 QDict *options, int flags, Error **errp); 297 BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, 298 int flags, Error **errp); 299 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, 300 BlockDriverState *bs, 301 QDict *options, int flags); 302 int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp); 303 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp); 304 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, 305 BlockReopenQueue *queue, Error **errp); 306 void bdrv_reopen_commit(BDRVReopenState *reopen_state); 307 void bdrv_reopen_abort(BDRVReopenState *reopen_state); 308 int bdrv_read(BdrvChild *child, int64_t sector_num, 309 uint8_t *buf, int nb_sectors); 310 int bdrv_write(BdrvChild *child, int64_t sector_num, 311 const uint8_t *buf, int nb_sectors); 312 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, 313 int bytes, BdrvRequestFlags flags); 314 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags); 315 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes); 316 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov); 317 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes); 318 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov); 319 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, 320 const void *buf, int count); 321 /* 322 * Efficiently zero a region of the disk image. Note that this is a regular 323 * I/O request like read or write and should have a reasonable size. This 324 * function is not suitable for zeroing the entire image in a single request 325 * because it may allocate memory for the entire region. 326 */ 327 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, 328 int bytes, BdrvRequestFlags flags); 329 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, 330 const char *backing_file); 331 void bdrv_refresh_filename(BlockDriverState *bs); 332 333 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, 334 PreallocMode prealloc, Error **errp); 335 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc, 336 Error **errp); 337 338 int64_t bdrv_nb_sectors(BlockDriverState *bs); 339 int64_t bdrv_getlength(BlockDriverState *bs); 340 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); 341 BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, 342 BlockDriverState *in_bs, Error **errp); 343 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); 344 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp); 345 int bdrv_commit(BlockDriverState *bs); 346 int bdrv_change_backing_file(BlockDriverState *bs, 347 const char *backing_file, const char *backing_fmt); 348 void bdrv_register(BlockDriver *bdrv); 349 int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, 350 const char *backing_file_str); 351 BlockDriverState *bdrv_find_overlay(BlockDriverState *active, 352 BlockDriverState *bs); 353 BlockDriverState *bdrv_find_base(BlockDriverState *bs); 354 355 356 typedef struct BdrvCheckResult { 357 int corruptions; 358 int leaks; 359 int check_errors; 360 int corruptions_fixed; 361 int leaks_fixed; 362 int64_t image_end_offset; 363 BlockFragInfo bfi; 364 } BdrvCheckResult; 365 366 typedef enum { 367 BDRV_FIX_LEAKS = 1, 368 BDRV_FIX_ERRORS = 2, 369 } BdrvCheckMode; 370 371 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); 372 373 /* The units of offset and total_work_size may be chosen arbitrarily by the 374 * block driver; total_work_size may change during the course of the amendment 375 * operation */ 376 typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset, 377 int64_t total_work_size, void *opaque); 378 int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts, 379 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 380 Error **errp); 381 382 /* external snapshots */ 383 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs, 384 BlockDriverState *candidate); 385 bool bdrv_is_first_non_filter(BlockDriverState *candidate); 386 387 /* check if a named node can be replaced when doing drive-mirror */ 388 BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, 389 const char *node_name, Error **errp); 390 391 /* async block I/O */ 392 void bdrv_aio_cancel(BlockAIOCB *acb); 393 void bdrv_aio_cancel_async(BlockAIOCB *acb); 394 395 /* sg packet commands */ 396 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf); 397 398 /* Invalidate any cached metadata used by image formats */ 399 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp); 400 void bdrv_invalidate_cache_all(Error **errp); 401 int bdrv_inactivate_all(void); 402 403 /* Ensure contents are flushed to disk. */ 404 int bdrv_flush(BlockDriverState *bs); 405 int coroutine_fn bdrv_co_flush(BlockDriverState *bs); 406 int bdrv_flush_all(void); 407 void bdrv_close_all(void); 408 void bdrv_drain(BlockDriverState *bs); 409 void coroutine_fn bdrv_co_drain(BlockDriverState *bs); 410 void bdrv_drain_all_begin(void); 411 void bdrv_drain_all_end(void); 412 void bdrv_drain_all(void); 413 414 #define BDRV_POLL_WHILE(bs, cond) ({ \ 415 BlockDriverState *bs_ = (bs); \ 416 AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \ 417 cond); }) 418 419 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes); 420 int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int bytes); 421 int bdrv_has_zero_init_1(BlockDriverState *bs); 422 int bdrv_has_zero_init(BlockDriverState *bs); 423 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs); 424 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); 425 int bdrv_block_status(BlockDriverState *bs, int64_t offset, 426 int64_t bytes, int64_t *pnum, int64_t *map, 427 BlockDriverState **file); 428 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, 429 int64_t offset, int64_t bytes, int64_t *pnum, 430 int64_t *map, BlockDriverState **file); 431 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, 432 int64_t *pnum); 433 int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, 434 int64_t offset, int64_t bytes, int64_t *pnum); 435 436 bool bdrv_is_read_only(BlockDriverState *bs); 437 int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only, 438 bool ignore_allow_rdw, Error **errp); 439 int bdrv_set_read_only(BlockDriverState *bs, bool read_only, Error **errp); 440 bool bdrv_is_writable(BlockDriverState *bs); 441 bool bdrv_is_sg(BlockDriverState *bs); 442 bool bdrv_is_inserted(BlockDriverState *bs); 443 void bdrv_lock_medium(BlockDriverState *bs, bool locked); 444 void bdrv_eject(BlockDriverState *bs, bool eject_flag); 445 const char *bdrv_get_format_name(BlockDriverState *bs); 446 BlockDriverState *bdrv_find_node(const char *node_name); 447 BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp); 448 BlockDriverState *bdrv_lookup_bs(const char *device, 449 const char *node_name, 450 Error **errp); 451 bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base); 452 BlockDriverState *bdrv_next_node(BlockDriverState *bs); 453 BlockDriverState *bdrv_next_all_states(BlockDriverState *bs); 454 455 typedef struct BdrvNextIterator { 456 enum { 457 BDRV_NEXT_BACKEND_ROOTS, 458 BDRV_NEXT_MONITOR_OWNED, 459 } phase; 460 BlockBackend *blk; 461 BlockDriverState *bs; 462 } BdrvNextIterator; 463 464 BlockDriverState *bdrv_first(BdrvNextIterator *it); 465 BlockDriverState *bdrv_next(BdrvNextIterator *it); 466 void bdrv_next_cleanup(BdrvNextIterator *it); 467 468 BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs); 469 bool bdrv_is_encrypted(BlockDriverState *bs); 470 void bdrv_iterate_format(void (*it)(void *opaque, const char *name), 471 void *opaque); 472 const char *bdrv_get_node_name(const BlockDriverState *bs); 473 const char *bdrv_get_device_name(const BlockDriverState *bs); 474 const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); 475 int bdrv_get_flags(BlockDriverState *bs); 476 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); 477 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs); 478 void bdrv_round_to_clusters(BlockDriverState *bs, 479 int64_t offset, int64_t bytes, 480 int64_t *cluster_offset, 481 int64_t *cluster_bytes); 482 483 const char *bdrv_get_encrypted_filename(BlockDriverState *bs); 484 void bdrv_get_backing_filename(BlockDriverState *bs, 485 char *filename, int filename_size); 486 void bdrv_get_full_backing_filename(BlockDriverState *bs, 487 char *dest, size_t sz, Error **errp); 488 void bdrv_get_full_backing_filename_from_filename(const char *backed, 489 const char *backing, 490 char *dest, size_t sz, 491 Error **errp); 492 493 int path_has_protocol(const char *path); 494 int path_is_absolute(const char *path); 495 void path_combine(char *dest, int dest_size, 496 const char *base_path, 497 const char *filename); 498 499 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); 500 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); 501 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 502 int64_t pos, int size); 503 504 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 505 int64_t pos, int size); 506 507 void bdrv_img_create(const char *filename, const char *fmt, 508 const char *base_filename, const char *base_fmt, 509 char *options, uint64_t img_size, int flags, 510 bool quiet, Error **errp); 511 512 /* Returns the alignment in bytes that is required so that no bounce buffer 513 * is required throughout the stack */ 514 size_t bdrv_min_mem_align(BlockDriverState *bs); 515 /* Returns optimal alignment in bytes for bounce buffer */ 516 size_t bdrv_opt_mem_align(BlockDriverState *bs); 517 void *qemu_blockalign(BlockDriverState *bs, size_t size); 518 void *qemu_blockalign0(BlockDriverState *bs, size_t size); 519 void *qemu_try_blockalign(BlockDriverState *bs, size_t size); 520 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size); 521 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov); 522 523 void bdrv_enable_copy_on_read(BlockDriverState *bs); 524 void bdrv_disable_copy_on_read(BlockDriverState *bs); 525 526 void bdrv_ref(BlockDriverState *bs); 527 void bdrv_unref(BlockDriverState *bs); 528 void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child); 529 BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, 530 BlockDriverState *child_bs, 531 const char *child_name, 532 const BdrvChildRole *child_role, 533 Error **errp); 534 535 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp); 536 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason); 537 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason); 538 void bdrv_op_block_all(BlockDriverState *bs, Error *reason); 539 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason); 540 bool bdrv_op_blocker_is_empty(BlockDriverState *bs); 541 542 #define BLKDBG_EVENT(child, evt) \ 543 do { \ 544 if (child) { \ 545 bdrv_debug_event(child->bs, evt); \ 546 } \ 547 } while (0) 548 549 void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event); 550 551 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, 552 const char *tag); 553 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); 554 int bdrv_debug_resume(BlockDriverState *bs, const char *tag); 555 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); 556 557 /** 558 * bdrv_get_aio_context: 559 * 560 * Returns: the currently bound #AioContext 561 */ 562 AioContext *bdrv_get_aio_context(BlockDriverState *bs); 563 564 /** 565 * Transfer control to @co in the aio context of @bs 566 */ 567 void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co); 568 569 /** 570 * bdrv_set_aio_context: 571 * 572 * Changes the #AioContext used for fd handlers, timers, and BHs by this 573 * BlockDriverState and all its children. 574 * 575 * This function must be called with iothread lock held. 576 */ 577 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context); 578 int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); 579 int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); 580 581 void bdrv_io_plug(BlockDriverState *bs); 582 void bdrv_io_unplug(BlockDriverState *bs); 583 584 /** 585 * bdrv_parent_drained_begin: 586 * 587 * Begin a quiesced section of all users of @bs. This is part of 588 * bdrv_drained_begin. 589 */ 590 void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, 591 bool ignore_bds_parents); 592 593 /** 594 * bdrv_parent_drained_begin_single: 595 * 596 * Begin a quiesced section for the parent of @c. If @poll is true, wait for 597 * any pending activity to cease. 598 */ 599 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll); 600 601 /** 602 * bdrv_parent_drained_end: 603 * 604 * End a quiesced section of all users of @bs. This is part of 605 * bdrv_drained_end. 606 */ 607 void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, 608 bool ignore_bds_parents); 609 610 /** 611 * bdrv_drain_poll: 612 * 613 * Poll for pending requests in @bs, its parents (except for @ignore_parent), 614 * and if @recursive is true its children as well (used for subtree drain). 615 * 616 * If @ignore_bds_parents is true, parents that are BlockDriverStates must 617 * ignore the drain request because they will be drained separately (used for 618 * drain_all). 619 * 620 * This is part of bdrv_drained_begin. 621 */ 622 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, 623 BdrvChild *ignore_parent, bool ignore_bds_parents); 624 625 /** 626 * bdrv_drained_begin: 627 * 628 * Begin a quiesced section for exclusive access to the BDS, by disabling 629 * external request sources including NBD server and device model. Note that 630 * this doesn't block timers or coroutines from submitting more requests, which 631 * means block_job_pause is still necessary. 632 * 633 * This function can be recursive. 634 */ 635 void bdrv_drained_begin(BlockDriverState *bs); 636 637 /** 638 * bdrv_do_drained_begin_quiesce: 639 * 640 * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already 641 * running requests to complete. 642 */ 643 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, 644 BdrvChild *parent, bool ignore_bds_parents); 645 646 /** 647 * Like bdrv_drained_begin, but recursively begins a quiesced section for 648 * exclusive access to all child nodes as well. 649 */ 650 void bdrv_subtree_drained_begin(BlockDriverState *bs); 651 652 /** 653 * bdrv_drained_end: 654 * 655 * End a quiescent section started by bdrv_drained_begin(). 656 */ 657 void bdrv_drained_end(BlockDriverState *bs); 658 659 /** 660 * End a quiescent section started by bdrv_subtree_drained_begin(). 661 */ 662 void bdrv_subtree_drained_end(BlockDriverState *bs); 663 664 void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child, 665 Error **errp); 666 void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp); 667 668 bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, 669 uint32_t granularity, Error **errp); 670 /** 671 * 672 * bdrv_register_buf/bdrv_unregister_buf: 673 * 674 * Register/unregister a buffer for I/O. For example, VFIO drivers are 675 * interested to know the memory areas that would later be used for I/O, so 676 * that they can prepare IOMMU mapping etc., to get better performance. 677 */ 678 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size); 679 void bdrv_unregister_buf(BlockDriverState *bs, void *host); 680 681 /** 682 * 683 * bdrv_co_copy_range: 684 * 685 * Do offloaded copy between two children. If the operation is not implemented 686 * by the driver, or if the backend storage doesn't support it, a negative 687 * error code will be returned. 688 * 689 * Note: block layer doesn't emulate or fallback to a bounce buffer approach 690 * because usually the caller shouldn't attempt offloaded copy any more (e.g. 691 * calling copy_file_range(2)) after the first error, thus it should fall back 692 * to a read+write path in the caller level. 693 * 694 * @src: Source child to copy data from 695 * @src_offset: offset in @src image to read data 696 * @dst: Destination child to copy data to 697 * @dst_offset: offset in @dst image to write data 698 * @bytes: number of bytes to copy 699 * @flags: request flags. Supported flags: 700 * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero 701 * write on @dst as if bdrv_co_pwrite_zeroes is 702 * called. Used to simplify caller code, or 703 * during BlockDriver.bdrv_co_copy_range_from() 704 * recursion. 705 * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping 706 * requests currently in flight. 707 * 708 * Returns: 0 if succeeded; negative error code if failed. 709 **/ 710 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, 711 BdrvChild *dst, uint64_t dst_offset, 712 uint64_t bytes, BdrvRequestFlags read_flags, 713 BdrvRequestFlags write_flags); 714 #endif 715