xref: /openbmc/qemu/include/block/block_int.h (revision d341d9f3)
1 /*
2  * QEMU System Emulator block driver
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #ifndef BLOCK_INT_H
25 #define BLOCK_INT_H
26 
27 #include "block/accounting.h"
28 #include "block/block.h"
29 #include "block/throttle-groups.h"
30 #include "qemu/option.h"
31 #include "qemu/queue.h"
32 #include "qemu/coroutine.h"
33 #include "qemu/timer.h"
34 #include "qapi-types.h"
35 #include "qemu/hbitmap.h"
36 #include "block/snapshot.h"
37 #include "qemu/main-loop.h"
38 #include "qemu/throttle.h"
39 
40 #define BLOCK_FLAG_ENCRYPT          1
41 #define BLOCK_FLAG_COMPAT6          4
42 #define BLOCK_FLAG_LAZY_REFCOUNTS   8
43 
44 #define BLOCK_OPT_SIZE              "size"
45 #define BLOCK_OPT_ENCRYPT           "encryption"
46 #define BLOCK_OPT_COMPAT6           "compat6"
47 #define BLOCK_OPT_BACKING_FILE      "backing_file"
48 #define BLOCK_OPT_BACKING_FMT       "backing_fmt"
49 #define BLOCK_OPT_CLUSTER_SIZE      "cluster_size"
50 #define BLOCK_OPT_TABLE_SIZE        "table_size"
51 #define BLOCK_OPT_PREALLOC          "preallocation"
52 #define BLOCK_OPT_SUBFMT            "subformat"
53 #define BLOCK_OPT_COMPAT_LEVEL      "compat"
54 #define BLOCK_OPT_LAZY_REFCOUNTS    "lazy_refcounts"
55 #define BLOCK_OPT_ADAPTER_TYPE      "adapter_type"
56 #define BLOCK_OPT_REDUNDANCY        "redundancy"
57 #define BLOCK_OPT_NOCOW             "nocow"
58 #define BLOCK_OPT_OBJECT_SIZE       "object_size"
59 #define BLOCK_OPT_REFCOUNT_BITS     "refcount_bits"
60 
61 #define BLOCK_PROBE_BUF_SIZE        512
62 
63 enum BdrvTrackedRequestType {
64     BDRV_TRACKED_READ,
65     BDRV_TRACKED_WRITE,
66     BDRV_TRACKED_FLUSH,
67     BDRV_TRACKED_IOCTL,
68     BDRV_TRACKED_DISCARD,
69 };
70 
71 typedef struct BdrvTrackedRequest {
72     BlockDriverState *bs;
73     int64_t offset;
74     unsigned int bytes;
75     enum BdrvTrackedRequestType type;
76 
77     bool serialising;
78     int64_t overlap_offset;
79     unsigned int overlap_bytes;
80 
81     QLIST_ENTRY(BdrvTrackedRequest) list;
82     Coroutine *co; /* owner, used for deadlock detection */
83     CoQueue wait_queue; /* coroutines blocked on this request */
84 
85     struct BdrvTrackedRequest *waiting_for;
86 } BdrvTrackedRequest;
87 
88 struct BlockDriver {
89     const char *format_name;
90     int instance_size;
91 
92     /* set to true if the BlockDriver is a block filter */
93     bool is_filter;
94     /* for snapshots block filter like Quorum can implement the
95      * following recursive callback.
96      * It's purpose is to recurse on the filter children while calling
97      * bdrv_recurse_is_first_non_filter on them.
98      * For a sample implementation look in the future Quorum block filter.
99      */
100     bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs,
101                                              BlockDriverState *candidate);
102 
103     int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
104     int (*bdrv_probe_device)(const char *filename);
105 
106     /* Any driver implementing this callback is expected to be able to handle
107      * NULL file names in its .bdrv_open() implementation */
108     void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp);
109     /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
110      * this field set to true, except ones that are defined only by their
111      * child's bs.
112      * An example of the last type will be the quorum block driver.
113      */
114     bool bdrv_needs_filename;
115 
116     /* Set if a driver can support backing files */
117     bool supports_backing;
118 
119     /* For handling image reopen for split or non-split files */
120     int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
121                                BlockReopenQueue *queue, Error **errp);
122     void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
123     void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
124     void (*bdrv_join_options)(QDict *options, QDict *old_options);
125 
126     int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags,
127                      Error **errp);
128     int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
129                           Error **errp);
130     int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num,
131                      uint8_t *buf, int nb_sectors);
132     int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num,
133                       const uint8_t *buf, int nb_sectors);
134     void (*bdrv_close)(BlockDriverState *bs);
135     int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp);
136     int (*bdrv_set_key)(BlockDriverState *bs, const char *key);
137     int (*bdrv_make_empty)(BlockDriverState *bs);
138 
139     void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options);
140 
141     /* aio */
142     BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs,
143         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
144         BlockCompletionFunc *cb, void *opaque);
145     BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs,
146         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
147         BlockCompletionFunc *cb, void *opaque);
148     BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
149         BlockCompletionFunc *cb, void *opaque);
150     BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs,
151         int64_t sector_num, int nb_sectors,
152         BlockCompletionFunc *cb, void *opaque);
153 
154     int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
155         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
156     int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
157         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
158     /*
159      * Efficiently zero a region of the disk image.  Typically an image format
160      * would use a compact metadata representation to implement this.  This
161      * function pointer may be NULL and .bdrv_co_writev() will be called
162      * instead.
163      */
164     int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs,
165         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
166     int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs,
167         int64_t sector_num, int nb_sectors);
168     int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs,
169         int64_t sector_num, int nb_sectors, int *pnum);
170 
171     /*
172      * Invalidate any cached meta-data.
173      */
174     void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp);
175     int (*bdrv_inactivate)(BlockDriverState *bs);
176 
177     /*
178      * Flushes all data that was already written to the OS all the way down to
179      * the disk (for example raw-posix calls fsync()).
180      */
181     int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
182 
183     /*
184      * Flushes all internal caches to the OS. The data may still sit in a
185      * writeback cache of the host OS, but it will survive a crash of the qemu
186      * process.
187      */
188     int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
189 
190     const char *protocol_name;
191     int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset);
192 
193     int64_t (*bdrv_getlength)(BlockDriverState *bs);
194     bool has_variable_length;
195     int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
196 
197     int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num,
198                                  const uint8_t *buf, int nb_sectors);
199 
200     int (*bdrv_snapshot_create)(BlockDriverState *bs,
201                                 QEMUSnapshotInfo *sn_info);
202     int (*bdrv_snapshot_goto)(BlockDriverState *bs,
203                               const char *snapshot_id);
204     int (*bdrv_snapshot_delete)(BlockDriverState *bs,
205                                 const char *snapshot_id,
206                                 const char *name,
207                                 Error **errp);
208     int (*bdrv_snapshot_list)(BlockDriverState *bs,
209                               QEMUSnapshotInfo **psn_info);
210     int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
211                                   const char *snapshot_id,
212                                   const char *name,
213                                   Error **errp);
214     int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
215     ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs);
216 
217     int (*bdrv_save_vmstate)(BlockDriverState *bs, QEMUIOVector *qiov,
218                              int64_t pos);
219     int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf,
220                              int64_t pos, int size);
221 
222     int (*bdrv_change_backing_file)(BlockDriverState *bs,
223         const char *backing_file, const char *backing_fmt);
224 
225     /* removable device specific */
226     bool (*bdrv_is_inserted)(BlockDriverState *bs);
227     int (*bdrv_media_changed)(BlockDriverState *bs);
228     void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
229     void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
230 
231     /* to control generic scsi devices */
232     BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
233         unsigned long int req, void *buf,
234         BlockCompletionFunc *cb, void *opaque);
235 
236     /* List of options for creating images, terminated by name == NULL */
237     QemuOptsList *create_opts;
238 
239     /*
240      * Returns 0 for completed check, -errno for internal errors.
241      * The check results are stored in result.
242      */
243     int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result,
244         BdrvCheckMode fix);
245 
246     int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts,
247                               BlockDriverAmendStatusCB *status_cb,
248                               void *cb_opaque);
249 
250     void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event);
251 
252     /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
253     int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
254         const char *tag);
255     int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
256         const char *tag);
257     int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
258     bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
259 
260     void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp);
261 
262     /*
263      * Returns 1 if newly created images are guaranteed to contain only
264      * zeros, 0 otherwise.
265      */
266     int (*bdrv_has_zero_init)(BlockDriverState *bs);
267 
268     /* Remove fd handlers, timers, and other event loop callbacks so the event
269      * loop is no longer in use.  Called with no in-flight requests and in
270      * depth-first traversal order with parents before child nodes.
271      */
272     void (*bdrv_detach_aio_context)(BlockDriverState *bs);
273 
274     /* Add fd handlers, timers, and other event loop callbacks so I/O requests
275      * can be processed again.  Called with no in-flight requests and in
276      * depth-first traversal order with child nodes before parent nodes.
277      */
278     void (*bdrv_attach_aio_context)(BlockDriverState *bs,
279                                     AioContext *new_context);
280 
281     /* io queue for linux-aio */
282     void (*bdrv_io_plug)(BlockDriverState *bs);
283     void (*bdrv_io_unplug)(BlockDriverState *bs);
284     void (*bdrv_flush_io_queue)(BlockDriverState *bs);
285 
286     /**
287      * Try to get @bs's logical and physical block size.
288      * On success, store them in @bsz and return zero.
289      * On failure, return negative errno.
290      */
291     int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz);
292     /**
293      * Try to get @bs's geometry (cyls, heads, sectors)
294      * On success, store them in @geo and return 0.
295      * On failure return -errno.
296      * Only drivers that want to override guest geometry implement this
297      * callback; see hd_geometry_guess().
298      */
299     int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo);
300 
301     /**
302      * Drain and stop any internal sources of requests in the driver, and
303      * remain so until next I/O callback (e.g. bdrv_co_writev) is called.
304      */
305     void (*bdrv_drain)(BlockDriverState *bs);
306 
307     QLIST_ENTRY(BlockDriver) list;
308 };
309 
310 typedef struct BlockLimits {
311     /* maximum number of sectors that can be discarded at once */
312     int max_discard;
313 
314     /* optimal alignment for discard requests in sectors */
315     int64_t discard_alignment;
316 
317     /* maximum number of sectors that can zeroized at once */
318     int max_write_zeroes;
319 
320     /* optimal alignment for write zeroes requests in sectors */
321     int64_t write_zeroes_alignment;
322 
323     /* optimal transfer length in sectors */
324     int opt_transfer_length;
325 
326     /* maximal transfer length in sectors */
327     int max_transfer_length;
328 
329     /* memory alignment so that no bounce buffer is needed */
330     size_t min_mem_alignment;
331 
332     /* memory alignment for bounce buffer */
333     size_t opt_mem_alignment;
334 
335     /* maximum number of iovec elements */
336     int max_iov;
337 } BlockLimits;
338 
339 typedef struct BdrvOpBlocker BdrvOpBlocker;
340 
341 typedef struct BdrvAioNotifier {
342     void (*attached_aio_context)(AioContext *new_context, void *opaque);
343     void (*detach_aio_context)(void *opaque);
344 
345     void *opaque;
346 
347     QLIST_ENTRY(BdrvAioNotifier) list;
348 } BdrvAioNotifier;
349 
350 struct BdrvChildRole {
351     void (*inherit_options)(int *child_flags, QDict *child_options,
352                             int parent_flags, QDict *parent_options);
353 };
354 
355 extern const BdrvChildRole child_file;
356 extern const BdrvChildRole child_format;
357 
358 struct BdrvChild {
359     BlockDriverState *bs;
360     char *name;
361     const BdrvChildRole *role;
362     QLIST_ENTRY(BdrvChild) next;
363     QLIST_ENTRY(BdrvChild) next_parent;
364 };
365 
366 /*
367  * Note: the function bdrv_append() copies and swaps contents of
368  * BlockDriverStates, so if you add new fields to this struct, please
369  * inspect bdrv_append() to determine if the new fields need to be
370  * copied as well.
371  */
372 struct BlockDriverState {
373     int64_t total_sectors; /* if we are reading a disk image, give its
374                               size in sectors */
375     int read_only; /* if true, the media is read only */
376     int open_flags; /* flags used to open the file, re-used for re-open */
377     int encrypted; /* if true, the media is encrypted */
378     int valid_key; /* if true, a valid encryption key has been set */
379     int sg;        /* if true, the device is a /dev/sg* */
380     int copy_on_read; /* if true, copy read backing sectors into image
381                          note this is a reference count */
382     bool probed;
383 
384     BlockDriver *drv; /* NULL means no media */
385     void *opaque;
386 
387     BlockBackend *blk;          /* owning backend, if any */
388 
389     AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
390     /* long-running tasks intended to always use the same AioContext as this
391      * BDS may register themselves in this list to be notified of changes
392      * regarding this BDS's context */
393     QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
394 
395     char filename[PATH_MAX];
396     char backing_file[PATH_MAX]; /* if non zero, the image is a diff of
397                                     this file image */
398     char backing_format[16]; /* if non-zero and backing_file exists */
399 
400     QDict *full_open_options;
401     char exact_filename[PATH_MAX];
402 
403     BdrvChild *backing;
404     BdrvChild *file;
405 
406     NotifierList close_notifiers;
407 
408     /* Callback before write request is processed */
409     NotifierWithReturnList before_write_notifiers;
410 
411     /* number of in-flight serialising requests */
412     unsigned int serialising_in_flight;
413 
414     /* I/O throttling.
415      * throttle_state tells us if this BDS has I/O limits configured.
416      * io_limits_enabled tells us if they are currently being
417      * enforced, but it can be temporarily set to false */
418     CoQueue      throttled_reqs[2];
419     bool         io_limits_enabled;
420     /* The following fields are protected by the ThrottleGroup lock.
421      * See the ThrottleGroup documentation for details. */
422     ThrottleState *throttle_state;
423     ThrottleTimers throttle_timers;
424     unsigned       pending_reqs[2];
425     QLIST_ENTRY(BlockDriverState) round_robin;
426 
427     /* Offset after the highest byte written to */
428     uint64_t wr_highest_offset;
429 
430     /* I/O Limits */
431     BlockLimits bl;
432 
433     /* Whether produces zeros when read beyond eof */
434     bool zero_beyond_eof;
435 
436     /* Alignment requirement for offset/length of I/O requests */
437     unsigned int request_alignment;
438 
439     /* do we need to tell the quest if we have a volatile write cache? */
440     int enable_write_cache;
441 
442     /* the following member gives a name to every node on the bs graph. */
443     char node_name[32];
444     /* element of the list of named nodes building the graph */
445     QTAILQ_ENTRY(BlockDriverState) node_list;
446     /* element of the list of "drives" the guest sees */
447     QTAILQ_ENTRY(BlockDriverState) device_list;
448     QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
449     int refcnt;
450 
451     QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
452 
453     /* operation blockers */
454     QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
455 
456     /* long-running background operation */
457     BlockJob *job;
458 
459     /* The node that this node inherited default options from (and a reopen on
460      * which can affect this node by changing these defaults). This is always a
461      * parent node of this node. */
462     BlockDriverState *inherits_from;
463     QLIST_HEAD(, BdrvChild) children;
464     QLIST_HEAD(, BdrvChild) parents;
465 
466     QDict *options;
467     QDict *explicit_options;
468     BlockdevDetectZeroesOptions detect_zeroes;
469 
470     /* The error object in use for blocking operations on backing_hd */
471     Error *backing_blocker;
472 
473     /* threshold limit for writes, in bytes. "High water mark". */
474     uint64_t write_threshold_offset;
475     NotifierWithReturn write_threshold_notifier;
476 
477     int quiesce_counter;
478 };
479 
480 struct BlockBackendRootState {
481     int open_flags;
482     bool read_only;
483     BlockdevDetectZeroesOptions detect_zeroes;
484 
485     char *throttle_group;
486     ThrottleState *throttle_state;
487 };
488 
489 static inline BlockDriverState *backing_bs(BlockDriverState *bs)
490 {
491     return bs->backing ? bs->backing->bs : NULL;
492 }
493 
494 
495 /* Essential block drivers which must always be statically linked into qemu, and
496  * which therefore can be accessed without using bdrv_find_format() */
497 extern BlockDriver bdrv_file;
498 extern BlockDriver bdrv_raw;
499 extern BlockDriver bdrv_qcow2;
500 
501 extern QTAILQ_HEAD(BdrvStates, BlockDriverState) bdrv_states;
502 
503 /**
504  * bdrv_setup_io_funcs:
505  *
506  * Prepare a #BlockDriver for I/O request processing by populating
507  * unimplemented coroutine and AIO interfaces with generic wrapper functions
508  * that fall back to implemented interfaces.
509  */
510 void bdrv_setup_io_funcs(BlockDriver *bdrv);
511 
512 int get_tmp_filename(char *filename, int size);
513 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
514                             const char *filename);
515 
516 void bdrv_set_io_limits(BlockDriverState *bs,
517                         ThrottleConfig *cfg);
518 
519 
520 /**
521  * bdrv_add_before_write_notifier:
522  *
523  * Register a callback that is invoked before write requests are processed but
524  * after any throttling or waiting for overlapping requests.
525  */
526 void bdrv_add_before_write_notifier(BlockDriverState *bs,
527                                     NotifierWithReturn *notifier);
528 
529 /**
530  * bdrv_detach_aio_context:
531  *
532  * May be called from .bdrv_detach_aio_context() to detach children from the
533  * current #AioContext.  This is only needed by block drivers that manage their
534  * own children.  Both ->file and ->backing are automatically handled and
535  * block drivers should not call this function on them explicitly.
536  */
537 void bdrv_detach_aio_context(BlockDriverState *bs);
538 
539 /**
540  * bdrv_attach_aio_context:
541  *
542  * May be called from .bdrv_attach_aio_context() to attach children to the new
543  * #AioContext.  This is only needed by block drivers that manage their own
544  * children.  Both ->file and ->backing are automatically handled and block
545  * drivers should not call this function on them explicitly.
546  */
547 void bdrv_attach_aio_context(BlockDriverState *bs,
548                              AioContext *new_context);
549 
550 /**
551  * bdrv_add_aio_context_notifier:
552  *
553  * If a long-running job intends to be always run in the same AioContext as a
554  * certain BDS, it may use this function to be notified of changes regarding the
555  * association of the BDS to an AioContext.
556  *
557  * attached_aio_context() is called after the target BDS has been attached to a
558  * new AioContext; detach_aio_context() is called before the target BDS is being
559  * detached from its old AioContext.
560  */
561 void bdrv_add_aio_context_notifier(BlockDriverState *bs,
562         void (*attached_aio_context)(AioContext *new_context, void *opaque),
563         void (*detach_aio_context)(void *opaque), void *opaque);
564 
565 /**
566  * bdrv_remove_aio_context_notifier:
567  *
568  * Unsubscribe of change notifications regarding the BDS's AioContext. The
569  * parameters given here have to be the same as those given to
570  * bdrv_add_aio_context_notifier().
571  */
572 void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
573                                       void (*aio_context_attached)(AioContext *,
574                                                                    void *),
575                                       void (*aio_context_detached)(void *),
576                                       void *opaque);
577 
578 #ifdef _WIN32
579 int is_windows_drive(const char *filename);
580 #endif
581 
582 /**
583  * stream_start:
584  * @bs: Block device to operate on.
585  * @base: Block device that will become the new base, or %NULL to
586  * flatten the whole backing file chain onto @bs.
587  * @base_id: The file name that will be written to @bs as the new
588  * backing file if the job completes.  Ignored if @base is %NULL.
589  * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
590  * @on_error: The action to take upon error.
591  * @cb: Completion function for the job.
592  * @opaque: Opaque pointer value passed to @cb.
593  * @errp: Error object.
594  *
595  * Start a streaming operation on @bs.  Clusters that are unallocated
596  * in @bs, but allocated in any image between @base and @bs (both
597  * exclusive) will be written to @bs.  At the end of a successful
598  * streaming job, the backing file of @bs will be changed to
599  * @base_id in the written image and to @base in the live BlockDriverState.
600  */
601 void stream_start(BlockDriverState *bs, BlockDriverState *base,
602                   const char *base_id, int64_t speed, BlockdevOnError on_error,
603                   BlockCompletionFunc *cb,
604                   void *opaque, Error **errp);
605 
606 /**
607  * commit_start:
608  * @bs: Active block device.
609  * @top: Top block device to be committed.
610  * @base: Block device that will be written into, and become the new top.
611  * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
612  * @on_error: The action to take upon error.
613  * @cb: Completion function for the job.
614  * @opaque: Opaque pointer value passed to @cb.
615  * @backing_file_str: String to use as the backing file in @top's overlay
616  * @errp: Error object.
617  *
618  */
619 void commit_start(BlockDriverState *bs, BlockDriverState *base,
620                  BlockDriverState *top, int64_t speed,
621                  BlockdevOnError on_error, BlockCompletionFunc *cb,
622                  void *opaque, const char *backing_file_str, Error **errp);
623 /**
624  * commit_active_start:
625  * @bs: Active block device to be committed.
626  * @base: Block device that will be written into, and become the new top.
627  * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
628  * @on_error: The action to take upon error.
629  * @cb: Completion function for the job.
630  * @opaque: Opaque pointer value passed to @cb.
631  * @errp: Error object.
632  *
633  */
634 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
635                          int64_t speed,
636                          BlockdevOnError on_error,
637                          BlockCompletionFunc *cb,
638                          void *opaque, Error **errp);
639 /*
640  * mirror_start:
641  * @bs: Block device to operate on.
642  * @target: Block device to write to.
643  * @replaces: Block graph node name to replace once the mirror is done. Can
644  *            only be used when full mirroring is selected.
645  * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
646  * @granularity: The chosen granularity for the dirty bitmap.
647  * @buf_size: The amount of data that can be in flight at one time.
648  * @mode: Whether to collapse all images in the chain to the target.
649  * @on_source_error: The action to take upon error reading from the source.
650  * @on_target_error: The action to take upon error writing to the target.
651  * @unmap: Whether to unmap target where source sectors only contain zeroes.
652  * @cb: Completion function for the job.
653  * @opaque: Opaque pointer value passed to @cb.
654  * @errp: Error object.
655  *
656  * Start a mirroring operation on @bs.  Clusters that are allocated
657  * in @bs will be written to @bs until the job is cancelled or
658  * manually completed.  At the end of a successful mirroring job,
659  * @bs will be switched to read from @target.
660  */
661 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
662                   const char *replaces,
663                   int64_t speed, uint32_t granularity, int64_t buf_size,
664                   MirrorSyncMode mode, BlockdevOnError on_source_error,
665                   BlockdevOnError on_target_error,
666                   bool unmap,
667                   BlockCompletionFunc *cb,
668                   void *opaque, Error **errp);
669 
670 /*
671  * backup_start:
672  * @bs: Block device to operate on.
673  * @target: Block device to write to.
674  * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
675  * @sync_mode: What parts of the disk image should be copied to the destination.
676  * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
677  * @on_source_error: The action to take upon error reading from the source.
678  * @on_target_error: The action to take upon error writing to the target.
679  * @cb: Completion function for the job.
680  * @opaque: Opaque pointer value passed to @cb.
681  * @txn: Transaction that this job is part of (may be NULL).
682  *
683  * Start a backup operation on @bs.  Clusters in @bs are written to @target
684  * until the job is cancelled or manually completed.
685  */
686 void backup_start(BlockDriverState *bs, BlockDriverState *target,
687                   int64_t speed, MirrorSyncMode sync_mode,
688                   BdrvDirtyBitmap *sync_bitmap,
689                   BlockdevOnError on_source_error,
690                   BlockdevOnError on_target_error,
691                   BlockCompletionFunc *cb, void *opaque,
692                   BlockJobTxn *txn, Error **errp);
693 
694 void blk_set_bs(BlockBackend *blk, BlockDriverState *bs);
695 
696 void blk_dev_change_media_cb(BlockBackend *blk, bool load);
697 bool blk_dev_has_removable_media(BlockBackend *blk);
698 void blk_dev_eject_request(BlockBackend *blk, bool force);
699 bool blk_dev_is_tray_open(BlockBackend *blk);
700 bool blk_dev_is_medium_locked(BlockBackend *blk);
701 void blk_dev_resize_cb(BlockBackend *blk);
702 
703 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors);
704 bool bdrv_requests_pending(BlockDriverState *bs);
705 
706 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
707 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in);
708 
709 #endif /* BLOCK_INT_H */
710