xref: /openbmc/qemu/include/block/block-io.h (revision 003b2b25)
1 /*
2  * QEMU System Emulator block driver
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #ifndef BLOCK_IO_H
25 #define BLOCK_IO_H
26 
27 #include "block-common.h"
28 
29 /*
30  * I/O API functions. These functions are thread-safe, and therefore
31  * can run in any thread as long as the thread has called
32  * aio_context_acquire/release().
33  *
34  * These functions can only call functions from I/O and Common categories,
35  * but can be invoked by GS, "I/O or GS" and I/O APIs.
36  *
37  * All functions in this category must use the macro
38  * IO_CODE();
39  * to catch when they are accidentally called by the wrong API.
40  */
41 
42 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
43                        int64_t bytes, BdrvRequestFlags flags);
44 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
45 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes);
46 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf,
47                 int64_t bytes);
48 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
49                      const void *buf, int64_t bytes);
50 /*
51  * Efficiently zero a region of the disk image.  Note that this is a regular
52  * I/O request like read or write and should have a reasonable size.  This
53  * function is not suitable for zeroing the entire image in a single request
54  * because it may allocate memory for the entire region.
55  */
56 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
57                                        int64_t bytes, BdrvRequestFlags flags);
58 
59 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
60                                   PreallocMode prealloc, BdrvRequestFlags flags,
61                                   Error **errp);
62 
63 int64_t bdrv_nb_sectors(BlockDriverState *bs);
64 int64_t bdrv_getlength(BlockDriverState *bs);
65 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
66 BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
67                                BlockDriverState *in_bs, Error **errp);
68 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
69 int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp);
70 void coroutine_fn bdrv_co_delete_file_noerr(BlockDriverState *bs);
71 
72 
73 /* async block I/O */
74 void bdrv_aio_cancel(BlockAIOCB *acb);
75 void bdrv_aio_cancel_async(BlockAIOCB *acb);
76 
77 /* sg packet commands */
78 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
79 
80 /* Ensure contents are flushed to disk.  */
81 int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
82 
83 int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
84 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
85 int bdrv_block_status(BlockDriverState *bs, int64_t offset,
86                       int64_t bytes, int64_t *pnum, int64_t *map,
87                       BlockDriverState **file);
88 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
89                             int64_t offset, int64_t bytes, int64_t *pnum,
90                             int64_t *map, BlockDriverState **file);
91 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
92                       int64_t *pnum);
93 int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
94                             bool include_base, int64_t offset, int64_t bytes,
95                             int64_t *pnum);
96 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
97                                       int64_t bytes);
98 
99 int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
100                            bool ignore_allow_rdw, Error **errp);
101 int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
102                               Error **errp);
103 bool bdrv_is_read_only(BlockDriverState *bs);
104 bool bdrv_is_writable(BlockDriverState *bs);
105 bool bdrv_is_sg(BlockDriverState *bs);
106 bool bdrv_is_inserted(BlockDriverState *bs);
107 void bdrv_lock_medium(BlockDriverState *bs, bool locked);
108 void bdrv_eject(BlockDriverState *bs, bool eject_flag);
109 const char *bdrv_get_format_name(BlockDriverState *bs);
110 
111 bool bdrv_supports_compressed_writes(BlockDriverState *bs);
112 const char *bdrv_get_node_name(const BlockDriverState *bs);
113 const char *bdrv_get_device_name(const BlockDriverState *bs);
114 const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
115 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
116 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
117                                           Error **errp);
118 BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
119 void bdrv_round_to_clusters(BlockDriverState *bs,
120                             int64_t offset, int64_t bytes,
121                             int64_t *cluster_offset,
122                             int64_t *cluster_bytes);
123 
124 void bdrv_get_backing_filename(BlockDriverState *bs,
125                                char *filename, int filename_size);
126 
127 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
128                       int64_t pos, int size);
129 
130 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
131                       int64_t pos, int size);
132 
133 /*
134  * Returns the alignment in bytes that is required so that no bounce buffer
135  * is required throughout the stack
136  */
137 size_t bdrv_min_mem_align(BlockDriverState *bs);
138 /* Returns optimal alignment in bytes for bounce buffer */
139 size_t bdrv_opt_mem_align(BlockDriverState *bs);
140 void *qemu_blockalign(BlockDriverState *bs, size_t size);
141 void *qemu_blockalign0(BlockDriverState *bs, size_t size);
142 void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
143 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
144 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
145 
146 void bdrv_enable_copy_on_read(BlockDriverState *bs);
147 void bdrv_disable_copy_on_read(BlockDriverState *bs);
148 
149 void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
150 
151 #define BLKDBG_EVENT(child, evt) \
152     do { \
153         if (child) { \
154             bdrv_debug_event(child->bs, evt); \
155         } \
156     } while (0)
157 
158 /**
159  * bdrv_get_aio_context:
160  *
161  * Returns: the currently bound #AioContext
162  */
163 AioContext *bdrv_get_aio_context(BlockDriverState *bs);
164 
165 /**
166  * Move the current coroutine to the AioContext of @bs and return the old
167  * AioContext of the coroutine. Increase bs->in_flight so that draining @bs
168  * will wait for the operation to proceed until the corresponding
169  * bdrv_co_leave().
170  *
171  * Consequently, you can't call drain inside a bdrv_co_enter/leave() section as
172  * this will deadlock.
173  */
174 AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs);
175 
176 /**
177  * Ends a section started by bdrv_co_enter(). Move the current coroutine back
178  * to old_ctx and decrease bs->in_flight again.
179  */
180 void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx);
181 
182 /**
183  * Transfer control to @co in the aio context of @bs
184  */
185 void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co);
186 
187 AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c);
188 
189 void bdrv_io_plug(BlockDriverState *bs);
190 void bdrv_io_unplug(BlockDriverState *bs);
191 
192 bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
193                                      uint32_t granularity, Error **errp);
194 
195 /**
196  *
197  * bdrv_co_copy_range:
198  *
199  * Do offloaded copy between two children. If the operation is not implemented
200  * by the driver, or if the backend storage doesn't support it, a negative
201  * error code will be returned.
202  *
203  * Note: block layer doesn't emulate or fallback to a bounce buffer approach
204  * because usually the caller shouldn't attempt offloaded copy any more (e.g.
205  * calling copy_file_range(2)) after the first error, thus it should fall back
206  * to a read+write path in the caller level.
207  *
208  * @src: Source child to copy data from
209  * @src_offset: offset in @src image to read data
210  * @dst: Destination child to copy data to
211  * @dst_offset: offset in @dst image to write data
212  * @bytes: number of bytes to copy
213  * @flags: request flags. Supported flags:
214  *         BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
215  *                               write on @dst as if bdrv_co_pwrite_zeroes is
216  *                               called. Used to simplify caller code, or
217  *                               during BlockDriver.bdrv_co_copy_range_from()
218  *                               recursion.
219  *         BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
220  *                                   requests currently in flight.
221  *
222  * Returns: 0 if succeeded; negative error code if failed.
223  **/
224 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
225                                     BdrvChild *dst, int64_t dst_offset,
226                                     int64_t bytes, BdrvRequestFlags read_flags,
227                                     BdrvRequestFlags write_flags);
228 
229 /**
230  * bdrv_drained_end_no_poll:
231  *
232  * Same as bdrv_drained_end(), but do not poll for the subgraph to
233  * actually become unquiesced.  Therefore, no graph changes will occur
234  * with this function.
235  *
236  * *drained_end_counter is incremented for every background operation
237  * that is scheduled, and will be decremented for every operation once
238  * it settles.  The caller must poll until it reaches 0.  The counter
239  * should be accessed using atomic operations only.
240  */
241 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter);
242 
243 
244 /*
245  * "I/O or GS" API functions. These functions can run without
246  * the BQL, but only in one specific iothread/main loop.
247  *
248  * More specifically, these functions use BDRV_POLL_WHILE(bs), which
249  * requires the caller to be either in the main thread and hold
250  * the BlockdriverState (bs) AioContext lock, or directly in the
251  * home thread that runs the bs AioContext. Calling them from
252  * another thread in another AioContext would cause deadlocks.
253  *
254  * Therefore, these functions are not proper I/O, because they
255  * can't run in *any* iothreads, but only in a specific one.
256  *
257  * These functions can call any function from I/O, Common and this
258  * categories, but must be invoked only by other "I/O or GS" and GS APIs.
259  *
260  * All functions in this category must use the macro
261  * IO_OR_GS_CODE();
262  * to catch when they are accidentally called by the wrong API.
263  */
264 
265 #define BDRV_POLL_WHILE(bs, cond) ({                       \
266     BlockDriverState *bs_ = (bs);                          \
267     IO_OR_GS_CODE();                                       \
268     AIO_WAIT_WHILE(bdrv_get_aio_context(bs_),              \
269                    cond); })
270 
271 void bdrv_drain(BlockDriverState *bs);
272 void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
273 
274 int generated_co_wrapper
275 bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
276               PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
277 
278 int generated_co_wrapper bdrv_check(BlockDriverState *bs, BdrvCheckResult *res,
279                                     BdrvCheckMode fix);
280 
281 /* Invalidate any cached metadata used by image formats */
282 int generated_co_wrapper bdrv_invalidate_cache(BlockDriverState *bs,
283                                                Error **errp);
284 int generated_co_wrapper bdrv_flush(BlockDriverState *bs);
285 int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset,
286                                        int64_t bytes);
287 int generated_co_wrapper
288 bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
289 int generated_co_wrapper
290 bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
291 
292 /**
293  * bdrv_parent_drained_begin_single:
294  *
295  * Begin a quiesced section for the parent of @c. If @poll is true, wait for
296  * any pending activity to cease.
297  */
298 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
299 
300 /**
301  * bdrv_parent_drained_end_single:
302  *
303  * End a quiesced section for the parent of @c.
304  *
305  * This polls @bs's AioContext until all scheduled sub-drained_ends
306  * have settled, which may result in graph changes.
307  */
308 void bdrv_parent_drained_end_single(BdrvChild *c);
309 
310 /**
311  * bdrv_drain_poll:
312  *
313  * Poll for pending requests in @bs, its parents (except for @ignore_parent),
314  * and if @recursive is true its children as well (used for subtree drain).
315  *
316  * If @ignore_bds_parents is true, parents that are BlockDriverStates must
317  * ignore the drain request because they will be drained separately (used for
318  * drain_all).
319  *
320  * This is part of bdrv_drained_begin.
321  */
322 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
323                      BdrvChild *ignore_parent, bool ignore_bds_parents);
324 
325 /**
326  * bdrv_drained_begin:
327  *
328  * Begin a quiesced section for exclusive access to the BDS, by disabling
329  * external request sources including NBD server, block jobs, and device model.
330  *
331  * This function can be recursive.
332  */
333 void bdrv_drained_begin(BlockDriverState *bs);
334 
335 /**
336  * bdrv_do_drained_begin_quiesce:
337  *
338  * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
339  * running requests to complete.
340  */
341 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
342                                    BdrvChild *parent, bool ignore_bds_parents);
343 
344 /**
345  * Like bdrv_drained_begin, but recursively begins a quiesced section for
346  * exclusive access to all child nodes as well.
347  */
348 void bdrv_subtree_drained_begin(BlockDriverState *bs);
349 
350 /**
351  * bdrv_drained_end:
352  *
353  * End a quiescent section started by bdrv_drained_begin().
354  *
355  * This polls @bs's AioContext until all scheduled sub-drained_ends
356  * have settled.  On one hand, that may result in graph changes.  On
357  * the other, this requires that the caller either runs in the main
358  * loop; or that all involved nodes (@bs and all of its parents) are
359  * in the caller's AioContext.
360  */
361 void bdrv_drained_end(BlockDriverState *bs);
362 
363 /**
364  * End a quiescent section started by bdrv_subtree_drained_begin().
365  */
366 void bdrv_subtree_drained_end(BlockDriverState *bs);
367 
368 #endif /* BLOCK_IO_H */
369