xref: /openbmc/qemu/block/block-backend.c (revision 63785678)
1 /*
2  * QEMU Block backends
3  *
4  * Copyright (C) 2014 Red Hat, Inc.
5  *
6  * Authors:
7  *  Markus Armbruster <armbru@redhat.com>,
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2.1
10  * or later.  See the COPYING.LIB file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
21 #include "qemu/id.h"
22 
23 /* Number of coroutines to reserve per attached device model */
24 #define COROUTINE_POOL_RESERVATION 64
25 
26 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
27 
28 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
29 
30 struct BlockBackend {
31     char *name;
32     int refcnt;
33     BdrvChild *root;
34     DriveInfo *legacy_dinfo;    /* null unless created by drive_new() */
35     QTAILQ_ENTRY(BlockBackend) link;         /* for block_backends */
36     QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
37 
38     void *dev;                  /* attached device model, if any */
39     /* TODO change to DeviceState when all users are qdevified */
40     const BlockDevOps *dev_ops;
41     void *dev_opaque;
42 
43     /* the block size for which the guest device expects atomicity */
44     int guest_block_size;
45 
46     /* If the BDS tree is removed, some of its options are stored here (which
47      * can be used to restore those options in the new BDS on insert) */
48     BlockBackendRootState root_state;
49 
50     /* I/O stats (display with "info blockstats"). */
51     BlockAcctStats stats;
52 
53     BlockdevOnError on_read_error, on_write_error;
54     bool iostatus_enabled;
55     BlockDeviceIoStatus iostatus;
56 
57     bool allow_write_beyond_eof;
58 
59     NotifierList remove_bs_notifiers, insert_bs_notifiers;
60 };
61 
62 typedef struct BlockBackendAIOCB {
63     BlockAIOCB common;
64     QEMUBH *bh;
65     BlockBackend *blk;
66     int ret;
67 } BlockBackendAIOCB;
68 
69 static const AIOCBInfo block_backend_aiocb_info = {
70     .get_aio_context = blk_aiocb_get_aio_context,
71     .aiocb_size = sizeof(BlockBackendAIOCB),
72 };
73 
74 static void drive_info_del(DriveInfo *dinfo);
75 
76 /* All BlockBackends */
77 static QTAILQ_HEAD(, BlockBackend) block_backends =
78     QTAILQ_HEAD_INITIALIZER(block_backends);
79 
80 /* All BlockBackends referenced by the monitor and which are iterated through by
81  * blk_next() */
82 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
83     QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
84 
85 static void blk_root_inherit_options(int *child_flags, QDict *child_options,
86                                      int parent_flags, QDict *parent_options)
87 {
88     /* We're not supposed to call this function for root nodes */
89     abort();
90 }
91 
92 static const BdrvChildRole child_root = {
93     .inherit_options = blk_root_inherit_options,
94 };
95 
96 /*
97  * Create a new BlockBackend with a reference count of one.
98  * Store an error through @errp on failure, unless it's null.
99  * Return the new BlockBackend on success, null on failure.
100  */
101 BlockBackend *blk_new(Error **errp)
102 {
103     BlockBackend *blk;
104 
105     blk = g_new0(BlockBackend, 1);
106     blk->refcnt = 1;
107     notifier_list_init(&blk->remove_bs_notifiers);
108     notifier_list_init(&blk->insert_bs_notifiers);
109     QTAILQ_INSERT_TAIL(&block_backends, blk, link);
110     return blk;
111 }
112 
113 /*
114  * Create a new BlockBackend with a new BlockDriverState attached.
115  * Otherwise just like blk_new(), which see.
116  */
117 BlockBackend *blk_new_with_bs(Error **errp)
118 {
119     BlockBackend *blk;
120     BlockDriverState *bs;
121 
122     blk = blk_new(errp);
123     if (!blk) {
124         return NULL;
125     }
126 
127     bs = bdrv_new_root();
128     blk->root = bdrv_root_attach_child(bs, "root", &child_root);
129     bs->blk = blk;
130     return blk;
131 }
132 
133 /*
134  * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
135  *
136  * Just as with bdrv_open(), after having called this function the reference to
137  * @options belongs to the block layer (even on failure).
138  *
139  * TODO: Remove @filename and @flags; it should be possible to specify a whole
140  * BDS tree just by specifying the @options QDict (or @reference,
141  * alternatively). At the time of adding this function, this is not possible,
142  * though, so callers of this function have to be able to specify @filename and
143  * @flags.
144  */
145 BlockBackend *blk_new_open(const char *filename, const char *reference,
146                            QDict *options, int flags, Error **errp)
147 {
148     BlockBackend *blk;
149     int ret;
150 
151     blk = blk_new_with_bs(errp);
152     if (!blk) {
153         QDECREF(options);
154         return NULL;
155     }
156 
157     ret = bdrv_open(&blk->root->bs, filename, reference, options, flags, errp);
158     if (ret < 0) {
159         blk_unref(blk);
160         return NULL;
161     }
162 
163     return blk;
164 }
165 
166 static void blk_delete(BlockBackend *blk)
167 {
168     assert(!blk->refcnt);
169     assert(!blk->name);
170     assert(!blk->dev);
171     if (blk->root) {
172         blk_remove_bs(blk);
173     }
174     assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
175     assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
176     if (blk->root_state.throttle_state) {
177         g_free(blk->root_state.throttle_group);
178         throttle_group_unref(blk->root_state.throttle_state);
179     }
180     QTAILQ_REMOVE(&block_backends, blk, link);
181     drive_info_del(blk->legacy_dinfo);
182     block_acct_cleanup(&blk->stats);
183     g_free(blk);
184 }
185 
186 static void drive_info_del(DriveInfo *dinfo)
187 {
188     if (!dinfo) {
189         return;
190     }
191     qemu_opts_del(dinfo->opts);
192     g_free(dinfo->serial);
193     g_free(dinfo);
194 }
195 
196 int blk_get_refcnt(BlockBackend *blk)
197 {
198     return blk ? blk->refcnt : 0;
199 }
200 
201 /*
202  * Increment @blk's reference count.
203  * @blk must not be null.
204  */
205 void blk_ref(BlockBackend *blk)
206 {
207     blk->refcnt++;
208 }
209 
210 /*
211  * Decrement @blk's reference count.
212  * If this drops it to zero, destroy @blk.
213  * For convenience, do nothing if @blk is null.
214  */
215 void blk_unref(BlockBackend *blk)
216 {
217     if (blk) {
218         assert(blk->refcnt > 0);
219         if (!--blk->refcnt) {
220             blk_delete(blk);
221         }
222     }
223 }
224 
225 /*
226  * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
227  * ones which are hidden (i.e. are not referenced by the monitor).
228  */
229 static BlockBackend *blk_all_next(BlockBackend *blk)
230 {
231     return blk ? QTAILQ_NEXT(blk, link)
232                : QTAILQ_FIRST(&block_backends);
233 }
234 
235 void blk_remove_all_bs(void)
236 {
237     BlockBackend *blk = NULL;
238 
239     while ((blk = blk_all_next(blk)) != NULL) {
240         AioContext *ctx = blk_get_aio_context(blk);
241 
242         aio_context_acquire(ctx);
243         if (blk->root) {
244             blk_remove_bs(blk);
245         }
246         aio_context_release(ctx);
247     }
248 }
249 
250 /*
251  * Return the monitor-owned BlockBackend after @blk.
252  * If @blk is null, return the first one.
253  * Else, return @blk's next sibling, which may be null.
254  *
255  * To iterate over all BlockBackends, do
256  * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
257  *     ...
258  * }
259  */
260 BlockBackend *blk_next(BlockBackend *blk)
261 {
262     return blk ? QTAILQ_NEXT(blk, monitor_link)
263                : QTAILQ_FIRST(&monitor_block_backends);
264 }
265 
266 /*
267  * Iterates over all BlockDriverStates which are attached to a BlockBackend.
268  * This function is for use by bdrv_next().
269  *
270  * @bs must be NULL or a BDS that is attached to a BB.
271  */
272 BlockDriverState *blk_next_root_bs(BlockDriverState *bs)
273 {
274     BlockBackend *blk;
275 
276     if (bs) {
277         assert(bs->blk);
278         blk = bs->blk;
279     } else {
280         blk = NULL;
281     }
282 
283     do {
284         blk = blk_all_next(blk);
285     } while (blk && !blk->root);
286 
287     return blk ? blk->root->bs : NULL;
288 }
289 
290 /*
291  * Add a BlockBackend into the list of backends referenced by the monitor, with
292  * the given @name acting as the handle for the monitor.
293  * Strictly for use by blockdev.c.
294  *
295  * @name must not be null or empty.
296  *
297  * Returns true on success and false on failure. In the latter case, an Error
298  * object is returned through @errp.
299  */
300 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
301 {
302     assert(!blk->name);
303     assert(name && name[0]);
304 
305     if (!id_wellformed(name)) {
306         error_setg(errp, "Invalid device name");
307         return false;
308     }
309     if (blk_by_name(name)) {
310         error_setg(errp, "Device with id '%s' already exists", name);
311         return false;
312     }
313     if (bdrv_find_node(name)) {
314         error_setg(errp,
315                    "Device name '%s' conflicts with an existing node name",
316                    name);
317         return false;
318     }
319 
320     blk->name = g_strdup(name);
321     QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
322     return true;
323 }
324 
325 /*
326  * Remove a BlockBackend from the list of backends referenced by the monitor.
327  * Strictly for use by blockdev.c.
328  */
329 void monitor_remove_blk(BlockBackend *blk)
330 {
331     if (!blk->name) {
332         return;
333     }
334 
335     QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
336     g_free(blk->name);
337     blk->name = NULL;
338 }
339 
340 /*
341  * Return @blk's name, a non-null string.
342  * Returns an empty string iff @blk is not referenced by the monitor.
343  */
344 const char *blk_name(BlockBackend *blk)
345 {
346     return blk->name ?: "";
347 }
348 
349 /*
350  * Return the BlockBackend with name @name if it exists, else null.
351  * @name must not be null.
352  */
353 BlockBackend *blk_by_name(const char *name)
354 {
355     BlockBackend *blk = NULL;
356 
357     assert(name);
358     while ((blk = blk_next(blk)) != NULL) {
359         if (!strcmp(name, blk->name)) {
360             return blk;
361         }
362     }
363     return NULL;
364 }
365 
366 /*
367  * Return the BlockDriverState attached to @blk if any, else null.
368  */
369 BlockDriverState *blk_bs(BlockBackend *blk)
370 {
371     return blk->root ? blk->root->bs : NULL;
372 }
373 
374 /*
375  * Return @blk's DriveInfo if any, else null.
376  */
377 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
378 {
379     return blk->legacy_dinfo;
380 }
381 
382 /*
383  * Set @blk's DriveInfo to @dinfo, and return it.
384  * @blk must not have a DriveInfo set already.
385  * No other BlockBackend may have the same DriveInfo set.
386  */
387 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
388 {
389     assert(!blk->legacy_dinfo);
390     return blk->legacy_dinfo = dinfo;
391 }
392 
393 /*
394  * Return the BlockBackend with DriveInfo @dinfo.
395  * It must exist.
396  */
397 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
398 {
399     BlockBackend *blk = NULL;
400 
401     while ((blk = blk_next(blk)) != NULL) {
402         if (blk->legacy_dinfo == dinfo) {
403             return blk;
404         }
405     }
406     abort();
407 }
408 
409 /*
410  * Disassociates the currently associated BlockDriverState from @blk.
411  */
412 void blk_remove_bs(BlockBackend *blk)
413 {
414     assert(blk->root->bs->blk == blk);
415 
416     notifier_list_notify(&blk->remove_bs_notifiers, blk);
417 
418     blk_update_root_state(blk);
419 
420     blk->root->bs->blk = NULL;
421     bdrv_root_unref_child(blk->root);
422     blk->root = NULL;
423 }
424 
425 /*
426  * Associates a new BlockDriverState with @blk.
427  */
428 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
429 {
430     assert(!blk->root && !bs->blk);
431     bdrv_ref(bs);
432     blk->root = bdrv_root_attach_child(bs, "root", &child_root);
433     bs->blk = blk;
434 
435     notifier_list_notify(&blk->insert_bs_notifiers, blk);
436 }
437 
438 /*
439  * Attach device model @dev to @blk.
440  * Return 0 on success, -EBUSY when a device model is attached already.
441  */
442 int blk_attach_dev(BlockBackend *blk, void *dev)
443 /* TODO change to DeviceState *dev when all users are qdevified */
444 {
445     if (blk->dev) {
446         return -EBUSY;
447     }
448     blk_ref(blk);
449     blk->dev = dev;
450     blk_iostatus_reset(blk);
451     return 0;
452 }
453 
454 /*
455  * Attach device model @dev to @blk.
456  * @blk must not have a device model attached already.
457  * TODO qdevified devices don't use this, remove when devices are qdevified
458  */
459 void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
460 {
461     if (blk_attach_dev(blk, dev) < 0) {
462         abort();
463     }
464 }
465 
466 /*
467  * Detach device model @dev from @blk.
468  * @dev must be currently attached to @blk.
469  */
470 void blk_detach_dev(BlockBackend *blk, void *dev)
471 /* TODO change to DeviceState *dev when all users are qdevified */
472 {
473     assert(blk->dev == dev);
474     blk->dev = NULL;
475     blk->dev_ops = NULL;
476     blk->dev_opaque = NULL;
477     blk->guest_block_size = 512;
478     blk_unref(blk);
479 }
480 
481 /*
482  * Return the device model attached to @blk if any, else null.
483  */
484 void *blk_get_attached_dev(BlockBackend *blk)
485 /* TODO change to return DeviceState * when all users are qdevified */
486 {
487     return blk->dev;
488 }
489 
490 /*
491  * Set @blk's device model callbacks to @ops.
492  * @opaque is the opaque argument to pass to the callbacks.
493  * This is for use by device models.
494  */
495 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
496                      void *opaque)
497 {
498     blk->dev_ops = ops;
499     blk->dev_opaque = opaque;
500 }
501 
502 /*
503  * Notify @blk's attached device model of media change.
504  * If @load is true, notify of media load.
505  * Else, notify of media eject.
506  * Also send DEVICE_TRAY_MOVED events as appropriate.
507  */
508 void blk_dev_change_media_cb(BlockBackend *blk, bool load)
509 {
510     if (blk->dev_ops && blk->dev_ops->change_media_cb) {
511         bool tray_was_open, tray_is_open;
512 
513         tray_was_open = blk_dev_is_tray_open(blk);
514         blk->dev_ops->change_media_cb(blk->dev_opaque, load);
515         tray_is_open = blk_dev_is_tray_open(blk);
516 
517         if (tray_was_open != tray_is_open) {
518             qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
519                                               &error_abort);
520         }
521     }
522 }
523 
524 /*
525  * Does @blk's attached device model have removable media?
526  * %true if no device model is attached.
527  */
528 bool blk_dev_has_removable_media(BlockBackend *blk)
529 {
530     return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
531 }
532 
533 /*
534  * Does @blk's attached device model have a tray?
535  */
536 bool blk_dev_has_tray(BlockBackend *blk)
537 {
538     return blk->dev_ops && blk->dev_ops->is_tray_open;
539 }
540 
541 /*
542  * Notify @blk's attached device model of a media eject request.
543  * If @force is true, the medium is about to be yanked out forcefully.
544  */
545 void blk_dev_eject_request(BlockBackend *blk, bool force)
546 {
547     if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
548         blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
549     }
550 }
551 
552 /*
553  * Does @blk's attached device model have a tray, and is it open?
554  */
555 bool blk_dev_is_tray_open(BlockBackend *blk)
556 {
557     if (blk_dev_has_tray(blk)) {
558         return blk->dev_ops->is_tray_open(blk->dev_opaque);
559     }
560     return false;
561 }
562 
563 /*
564  * Does @blk's attached device model have the medium locked?
565  * %false if the device model has no such lock.
566  */
567 bool blk_dev_is_medium_locked(BlockBackend *blk)
568 {
569     if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
570         return blk->dev_ops->is_medium_locked(blk->dev_opaque);
571     }
572     return false;
573 }
574 
575 /*
576  * Notify @blk's attached device model of a backend size change.
577  */
578 void blk_dev_resize_cb(BlockBackend *blk)
579 {
580     if (blk->dev_ops && blk->dev_ops->resize_cb) {
581         blk->dev_ops->resize_cb(blk->dev_opaque);
582     }
583 }
584 
585 void blk_iostatus_enable(BlockBackend *blk)
586 {
587     blk->iostatus_enabled = true;
588     blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
589 }
590 
591 /* The I/O status is only enabled if the drive explicitly
592  * enables it _and_ the VM is configured to stop on errors */
593 bool blk_iostatus_is_enabled(const BlockBackend *blk)
594 {
595     return (blk->iostatus_enabled &&
596            (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
597             blk->on_write_error == BLOCKDEV_ON_ERROR_STOP   ||
598             blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
599 }
600 
601 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
602 {
603     return blk->iostatus;
604 }
605 
606 void blk_iostatus_disable(BlockBackend *blk)
607 {
608     blk->iostatus_enabled = false;
609 }
610 
611 void blk_iostatus_reset(BlockBackend *blk)
612 {
613     if (blk_iostatus_is_enabled(blk)) {
614         BlockDriverState *bs = blk_bs(blk);
615         blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
616         if (bs && bs->job) {
617             block_job_iostatus_reset(bs->job);
618         }
619     }
620 }
621 
622 void blk_iostatus_set_err(BlockBackend *blk, int error)
623 {
624     assert(blk_iostatus_is_enabled(blk));
625     if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
626         blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
627                                           BLOCK_DEVICE_IO_STATUS_FAILED;
628     }
629 }
630 
631 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
632 {
633     blk->allow_write_beyond_eof = allow;
634 }
635 
636 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
637                                   size_t size)
638 {
639     int64_t len;
640 
641     if (size > INT_MAX) {
642         return -EIO;
643     }
644 
645     if (!blk_is_available(blk)) {
646         return -ENOMEDIUM;
647     }
648 
649     if (offset < 0) {
650         return -EIO;
651     }
652 
653     if (!blk->allow_write_beyond_eof) {
654         len = blk_getlength(blk);
655         if (len < 0) {
656             return len;
657         }
658 
659         if (offset > len || len - offset < size) {
660             return -EIO;
661         }
662     }
663 
664     return 0;
665 }
666 
667 static int blk_check_request(BlockBackend *blk, int64_t sector_num,
668                              int nb_sectors)
669 {
670     if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
671         return -EIO;
672     }
673 
674     if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
675         return -EIO;
676     }
677 
678     return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
679                                   nb_sectors * BDRV_SECTOR_SIZE);
680 }
681 
682 static int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
683                                       unsigned int bytes, QEMUIOVector *qiov,
684                                       BdrvRequestFlags flags)
685 {
686     int ret = blk_check_byte_request(blk, offset, bytes);
687     if (ret < 0) {
688         return ret;
689     }
690 
691     return bdrv_co_do_preadv(blk_bs(blk), offset, bytes, qiov, flags);
692 }
693 
694 static int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
695                                       unsigned int bytes, QEMUIOVector *qiov,
696                                       BdrvRequestFlags flags)
697 {
698     int ret = blk_check_byte_request(blk, offset, bytes);
699     if (ret < 0) {
700         return ret;
701     }
702 
703     return bdrv_co_do_pwritev(blk_bs(blk), offset, bytes, qiov, flags);
704 }
705 
706 typedef struct BlkRwCo {
707     BlockBackend *blk;
708     int64_t offset;
709     QEMUIOVector *qiov;
710     int ret;
711     BdrvRequestFlags flags;
712 } BlkRwCo;
713 
714 static void blk_read_entry(void *opaque)
715 {
716     BlkRwCo *rwco = opaque;
717 
718     rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size,
719                               rwco->qiov, rwco->flags);
720 }
721 
722 static void blk_write_entry(void *opaque)
723 {
724     BlkRwCo *rwco = opaque;
725 
726     rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, rwco->qiov->size,
727                                rwco->qiov, rwco->flags);
728 }
729 
730 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
731                    int64_t bytes, CoroutineEntry co_entry,
732                    BdrvRequestFlags flags)
733 {
734     AioContext *aio_context;
735     QEMUIOVector qiov;
736     struct iovec iov;
737     Coroutine *co;
738     BlkRwCo rwco;
739 
740     iov = (struct iovec) {
741         .iov_base = buf,
742         .iov_len = bytes,
743     };
744     qemu_iovec_init_external(&qiov, &iov, 1);
745 
746     rwco = (BlkRwCo) {
747         .blk    = blk,
748         .offset = offset,
749         .qiov   = &qiov,
750         .flags  = flags,
751         .ret    = NOT_DONE,
752     };
753 
754     co = qemu_coroutine_create(co_entry);
755     qemu_coroutine_enter(co, &rwco);
756 
757     aio_context = blk_get_aio_context(blk);
758     while (rwco.ret == NOT_DONE) {
759         aio_poll(aio_context, true);
760     }
761 
762     return rwco.ret;
763 }
764 
765 static int blk_rw(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
766                   int nb_sectors, CoroutineEntry co_entry,
767                   BdrvRequestFlags flags)
768 {
769     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
770         return -EINVAL;
771     }
772 
773     return blk_prw(blk, sector_num << BDRV_SECTOR_BITS, buf,
774                    nb_sectors << BDRV_SECTOR_BITS, co_entry, flags);
775 }
776 
777 int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
778              int nb_sectors)
779 {
780     return blk_rw(blk, sector_num, buf, nb_sectors, blk_read_entry, 0);
781 }
782 
783 int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
784                          int nb_sectors)
785 {
786     BlockDriverState *bs = blk_bs(blk);
787     bool enabled;
788     int ret;
789 
790     ret = blk_check_request(blk, sector_num, nb_sectors);
791     if (ret < 0) {
792         return ret;
793     }
794 
795     enabled = bs->io_limits_enabled;
796     bs->io_limits_enabled = false;
797     ret = blk_read(blk, sector_num, buf, nb_sectors);
798     bs->io_limits_enabled = enabled;
799     return ret;
800 }
801 
802 int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
803               int nb_sectors)
804 {
805     return blk_rw(blk, sector_num, (uint8_t*) buf, nb_sectors,
806                   blk_write_entry, 0);
807 }
808 
809 int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
810                      int nb_sectors, BdrvRequestFlags flags)
811 {
812     return blk_rw(blk, sector_num, NULL, nb_sectors, blk_write_entry,
813                   BDRV_REQ_ZERO_WRITE);
814 }
815 
816 static void error_callback_bh(void *opaque)
817 {
818     struct BlockBackendAIOCB *acb = opaque;
819     qemu_bh_delete(acb->bh);
820     acb->common.cb(acb->common.opaque, acb->ret);
821     qemu_aio_unref(acb);
822 }
823 
824 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
825                                   BlockCompletionFunc *cb,
826                                   void *opaque, int ret)
827 {
828     struct BlockBackendAIOCB *acb;
829     QEMUBH *bh;
830 
831     acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
832     acb->blk = blk;
833     acb->ret = ret;
834 
835     bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
836     acb->bh = bh;
837     qemu_bh_schedule(bh);
838 
839     return &acb->common;
840 }
841 
842 typedef struct BlkAioEmAIOCB {
843     BlockAIOCB common;
844     BlkRwCo rwco;
845     bool has_returned;
846     QEMUBH* bh;
847 } BlkAioEmAIOCB;
848 
849 static const AIOCBInfo blk_aio_em_aiocb_info = {
850     .aiocb_size         = sizeof(BlkAioEmAIOCB),
851 };
852 
853 static void blk_aio_complete(BlkAioEmAIOCB *acb)
854 {
855     if (acb->bh) {
856         assert(acb->has_returned);
857         qemu_bh_delete(acb->bh);
858     }
859     if (acb->has_returned) {
860         acb->common.cb(acb->common.opaque, acb->rwco.ret);
861         qemu_aio_unref(acb);
862     }
863 }
864 
865 static void blk_aio_complete_bh(void *opaque)
866 {
867     blk_aio_complete(opaque);
868 }
869 
870 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset,
871                                 QEMUIOVector *qiov, CoroutineEntry co_entry,
872                                 BdrvRequestFlags flags,
873                                 BlockCompletionFunc *cb, void *opaque)
874 {
875     BlkAioEmAIOCB *acb;
876     Coroutine *co;
877 
878     acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
879     acb->rwco = (BlkRwCo) {
880         .blk    = blk,
881         .offset = offset,
882         .qiov   = qiov,
883         .flags  = flags,
884         .ret    = NOT_DONE,
885     };
886     acb->bh = NULL;
887     acb->has_returned = false;
888 
889     co = qemu_coroutine_create(co_entry);
890     qemu_coroutine_enter(co, acb);
891 
892     acb->has_returned = true;
893     if (acb->rwco.ret != NOT_DONE) {
894         acb->bh = aio_bh_new(blk_get_aio_context(blk), blk_aio_complete_bh, acb);
895         qemu_bh_schedule(acb->bh);
896     }
897 
898     return &acb->common;
899 }
900 
901 static void blk_aio_read_entry(void *opaque)
902 {
903     BlkAioEmAIOCB *acb = opaque;
904     BlkRwCo *rwco = &acb->rwco;
905 
906     rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size,
907                               rwco->qiov, rwco->flags);
908     blk_aio_complete(acb);
909 }
910 
911 static void blk_aio_write_entry(void *opaque)
912 {
913     BlkAioEmAIOCB *acb = opaque;
914     BlkRwCo *rwco = &acb->rwco;
915 
916     rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset,
917                                rwco->qiov ? rwco->qiov->size : 0,
918                                rwco->qiov, rwco->flags);
919     blk_aio_complete(acb);
920 }
921 
922 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
923                                  int nb_sectors, BdrvRequestFlags flags,
924                                  BlockCompletionFunc *cb, void *opaque)
925 {
926     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
927         return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
928     }
929 
930     return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, NULL,
931                         blk_aio_write_entry, BDRV_REQ_ZERO_WRITE, cb, opaque);
932 }
933 
934 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
935 {
936     int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
937     if (ret < 0) {
938         return ret;
939     }
940     return count;
941 }
942 
943 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
944 {
945     int ret = blk_prw(blk, offset, (void*) buf, count, blk_write_entry, 0);
946     if (ret < 0) {
947         return ret;
948     }
949     return count;
950 }
951 
952 int64_t blk_getlength(BlockBackend *blk)
953 {
954     if (!blk_is_available(blk)) {
955         return -ENOMEDIUM;
956     }
957 
958     return bdrv_getlength(blk_bs(blk));
959 }
960 
961 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
962 {
963     if (!blk_bs(blk)) {
964         *nb_sectors_ptr = 0;
965     } else {
966         bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
967     }
968 }
969 
970 int64_t blk_nb_sectors(BlockBackend *blk)
971 {
972     if (!blk_is_available(blk)) {
973         return -ENOMEDIUM;
974     }
975 
976     return bdrv_nb_sectors(blk_bs(blk));
977 }
978 
979 BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
980                           QEMUIOVector *iov, int nb_sectors,
981                           BlockCompletionFunc *cb, void *opaque)
982 {
983     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
984         return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
985     }
986 
987     return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, iov,
988                         blk_aio_read_entry, 0, cb, opaque);
989 }
990 
991 BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
992                            QEMUIOVector *iov, int nb_sectors,
993                            BlockCompletionFunc *cb, void *opaque)
994 {
995     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
996         return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
997     }
998 
999     return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, iov,
1000                         blk_aio_write_entry, 0, cb, opaque);
1001 }
1002 
1003 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1004                           BlockCompletionFunc *cb, void *opaque)
1005 {
1006     if (!blk_is_available(blk)) {
1007         return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
1008     }
1009 
1010     return bdrv_aio_flush(blk_bs(blk), cb, opaque);
1011 }
1012 
1013 BlockAIOCB *blk_aio_discard(BlockBackend *blk,
1014                             int64_t sector_num, int nb_sectors,
1015                             BlockCompletionFunc *cb, void *opaque)
1016 {
1017     int ret = blk_check_request(blk, sector_num, nb_sectors);
1018     if (ret < 0) {
1019         return blk_abort_aio_request(blk, cb, opaque, ret);
1020     }
1021 
1022     return bdrv_aio_discard(blk_bs(blk), sector_num, nb_sectors, cb, opaque);
1023 }
1024 
1025 void blk_aio_cancel(BlockAIOCB *acb)
1026 {
1027     bdrv_aio_cancel(acb);
1028 }
1029 
1030 void blk_aio_cancel_async(BlockAIOCB *acb)
1031 {
1032     bdrv_aio_cancel_async(acb);
1033 }
1034 
1035 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
1036 {
1037     int i, ret;
1038 
1039     for (i = 0; i < num_reqs; i++) {
1040         ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
1041         if (ret < 0) {
1042             return ret;
1043         }
1044     }
1045 
1046     return bdrv_aio_multiwrite(blk_bs(blk), reqs, num_reqs);
1047 }
1048 
1049 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1050 {
1051     if (!blk_is_available(blk)) {
1052         return -ENOMEDIUM;
1053     }
1054 
1055     return bdrv_ioctl(blk_bs(blk), req, buf);
1056 }
1057 
1058 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1059                           BlockCompletionFunc *cb, void *opaque)
1060 {
1061     if (!blk_is_available(blk)) {
1062         return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
1063     }
1064 
1065     return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
1066 }
1067 
1068 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1069 {
1070     int ret = blk_check_request(blk, sector_num, nb_sectors);
1071     if (ret < 0) {
1072         return ret;
1073     }
1074 
1075     return bdrv_co_discard(blk_bs(blk), sector_num, nb_sectors);
1076 }
1077 
1078 int blk_co_flush(BlockBackend *blk)
1079 {
1080     if (!blk_is_available(blk)) {
1081         return -ENOMEDIUM;
1082     }
1083 
1084     return bdrv_co_flush(blk_bs(blk));
1085 }
1086 
1087 int blk_flush(BlockBackend *blk)
1088 {
1089     if (!blk_is_available(blk)) {
1090         return -ENOMEDIUM;
1091     }
1092 
1093     return bdrv_flush(blk_bs(blk));
1094 }
1095 
1096 void blk_drain(BlockBackend *blk)
1097 {
1098     if (blk_bs(blk)) {
1099         bdrv_drain(blk_bs(blk));
1100     }
1101 }
1102 
1103 void blk_drain_all(void)
1104 {
1105     bdrv_drain_all();
1106 }
1107 
1108 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1109                       BlockdevOnError on_write_error)
1110 {
1111     blk->on_read_error = on_read_error;
1112     blk->on_write_error = on_write_error;
1113 }
1114 
1115 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1116 {
1117     return is_read ? blk->on_read_error : blk->on_write_error;
1118 }
1119 
1120 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1121                                       int error)
1122 {
1123     BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1124 
1125     switch (on_err) {
1126     case BLOCKDEV_ON_ERROR_ENOSPC:
1127         return (error == ENOSPC) ?
1128                BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1129     case BLOCKDEV_ON_ERROR_STOP:
1130         return BLOCK_ERROR_ACTION_STOP;
1131     case BLOCKDEV_ON_ERROR_REPORT:
1132         return BLOCK_ERROR_ACTION_REPORT;
1133     case BLOCKDEV_ON_ERROR_IGNORE:
1134         return BLOCK_ERROR_ACTION_IGNORE;
1135     default:
1136         abort();
1137     }
1138 }
1139 
1140 static void send_qmp_error_event(BlockBackend *blk,
1141                                  BlockErrorAction action,
1142                                  bool is_read, int error)
1143 {
1144     IoOperationType optype;
1145 
1146     optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1147     qapi_event_send_block_io_error(blk_name(blk), optype, action,
1148                                    blk_iostatus_is_enabled(blk),
1149                                    error == ENOSPC, strerror(error),
1150                                    &error_abort);
1151 }
1152 
1153 /* This is done by device models because, while the block layer knows
1154  * about the error, it does not know whether an operation comes from
1155  * the device or the block layer (from a job, for example).
1156  */
1157 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1158                       bool is_read, int error)
1159 {
1160     assert(error >= 0);
1161 
1162     if (action == BLOCK_ERROR_ACTION_STOP) {
1163         /* First set the iostatus, so that "info block" returns an iostatus
1164          * that matches the events raised so far (an additional error iostatus
1165          * is fine, but not a lost one).
1166          */
1167         blk_iostatus_set_err(blk, error);
1168 
1169         /* Then raise the request to stop the VM and the event.
1170          * qemu_system_vmstop_request_prepare has two effects.  First,
1171          * it ensures that the STOP event always comes after the
1172          * BLOCK_IO_ERROR event.  Second, it ensures that even if management
1173          * can observe the STOP event and do a "cont" before the STOP
1174          * event is issued, the VM will not stop.  In this case, vm_start()
1175          * also ensures that the STOP/RESUME pair of events is emitted.
1176          */
1177         qemu_system_vmstop_request_prepare();
1178         send_qmp_error_event(blk, action, is_read, error);
1179         qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1180     } else {
1181         send_qmp_error_event(blk, action, is_read, error);
1182     }
1183 }
1184 
1185 int blk_is_read_only(BlockBackend *blk)
1186 {
1187     BlockDriverState *bs = blk_bs(blk);
1188 
1189     if (bs) {
1190         return bdrv_is_read_only(bs);
1191     } else {
1192         return blk->root_state.read_only;
1193     }
1194 }
1195 
1196 int blk_is_sg(BlockBackend *blk)
1197 {
1198     BlockDriverState *bs = blk_bs(blk);
1199 
1200     if (!bs) {
1201         return 0;
1202     }
1203 
1204     return bdrv_is_sg(bs);
1205 }
1206 
1207 int blk_enable_write_cache(BlockBackend *blk)
1208 {
1209     BlockDriverState *bs = blk_bs(blk);
1210 
1211     if (bs) {
1212         return bdrv_enable_write_cache(bs);
1213     } else {
1214         return !!(blk->root_state.open_flags & BDRV_O_CACHE_WB);
1215     }
1216 }
1217 
1218 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1219 {
1220     BlockDriverState *bs = blk_bs(blk);
1221 
1222     if (bs) {
1223         bdrv_set_enable_write_cache(bs, wce);
1224     } else {
1225         if (wce) {
1226             blk->root_state.open_flags |= BDRV_O_CACHE_WB;
1227         } else {
1228             blk->root_state.open_flags &= ~BDRV_O_CACHE_WB;
1229         }
1230     }
1231 }
1232 
1233 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1234 {
1235     BlockDriverState *bs = blk_bs(blk);
1236 
1237     if (!bs) {
1238         error_setg(errp, "Device '%s' has no medium", blk->name);
1239         return;
1240     }
1241 
1242     bdrv_invalidate_cache(bs, errp);
1243 }
1244 
1245 bool blk_is_inserted(BlockBackend *blk)
1246 {
1247     BlockDriverState *bs = blk_bs(blk);
1248 
1249     return bs && bdrv_is_inserted(bs);
1250 }
1251 
1252 bool blk_is_available(BlockBackend *blk)
1253 {
1254     return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1255 }
1256 
1257 void blk_lock_medium(BlockBackend *blk, bool locked)
1258 {
1259     BlockDriverState *bs = blk_bs(blk);
1260 
1261     if (bs) {
1262         bdrv_lock_medium(bs, locked);
1263     }
1264 }
1265 
1266 void blk_eject(BlockBackend *blk, bool eject_flag)
1267 {
1268     BlockDriverState *bs = blk_bs(blk);
1269 
1270     if (bs) {
1271         bdrv_eject(bs, eject_flag);
1272     }
1273 }
1274 
1275 int blk_get_flags(BlockBackend *blk)
1276 {
1277     BlockDriverState *bs = blk_bs(blk);
1278 
1279     if (bs) {
1280         return bdrv_get_flags(bs);
1281     } else {
1282         return blk->root_state.open_flags;
1283     }
1284 }
1285 
1286 int blk_get_max_transfer_length(BlockBackend *blk)
1287 {
1288     BlockDriverState *bs = blk_bs(blk);
1289 
1290     if (bs) {
1291         return bs->bl.max_transfer_length;
1292     } else {
1293         return 0;
1294     }
1295 }
1296 
1297 int blk_get_max_iov(BlockBackend *blk)
1298 {
1299     return blk->root->bs->bl.max_iov;
1300 }
1301 
1302 void blk_set_guest_block_size(BlockBackend *blk, int align)
1303 {
1304     blk->guest_block_size = align;
1305 }
1306 
1307 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1308 {
1309     return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1310 }
1311 
1312 void *blk_blockalign(BlockBackend *blk, size_t size)
1313 {
1314     return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1315 }
1316 
1317 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1318 {
1319     BlockDriverState *bs = blk_bs(blk);
1320 
1321     if (!bs) {
1322         return false;
1323     }
1324 
1325     return bdrv_op_is_blocked(bs, op, errp);
1326 }
1327 
1328 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1329 {
1330     BlockDriverState *bs = blk_bs(blk);
1331 
1332     if (bs) {
1333         bdrv_op_unblock(bs, op, reason);
1334     }
1335 }
1336 
1337 void blk_op_block_all(BlockBackend *blk, Error *reason)
1338 {
1339     BlockDriverState *bs = blk_bs(blk);
1340 
1341     if (bs) {
1342         bdrv_op_block_all(bs, reason);
1343     }
1344 }
1345 
1346 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1347 {
1348     BlockDriverState *bs = blk_bs(blk);
1349 
1350     if (bs) {
1351         bdrv_op_unblock_all(bs, reason);
1352     }
1353 }
1354 
1355 AioContext *blk_get_aio_context(BlockBackend *blk)
1356 {
1357     BlockDriverState *bs = blk_bs(blk);
1358 
1359     if (bs) {
1360         return bdrv_get_aio_context(bs);
1361     } else {
1362         return qemu_get_aio_context();
1363     }
1364 }
1365 
1366 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1367 {
1368     BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1369     return blk_get_aio_context(blk_acb->blk);
1370 }
1371 
1372 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1373 {
1374     BlockDriverState *bs = blk_bs(blk);
1375 
1376     if (bs) {
1377         bdrv_set_aio_context(bs, new_context);
1378     }
1379 }
1380 
1381 void blk_add_aio_context_notifier(BlockBackend *blk,
1382         void (*attached_aio_context)(AioContext *new_context, void *opaque),
1383         void (*detach_aio_context)(void *opaque), void *opaque)
1384 {
1385     BlockDriverState *bs = blk_bs(blk);
1386 
1387     if (bs) {
1388         bdrv_add_aio_context_notifier(bs, attached_aio_context,
1389                                       detach_aio_context, opaque);
1390     }
1391 }
1392 
1393 void blk_remove_aio_context_notifier(BlockBackend *blk,
1394                                      void (*attached_aio_context)(AioContext *,
1395                                                                   void *),
1396                                      void (*detach_aio_context)(void *),
1397                                      void *opaque)
1398 {
1399     BlockDriverState *bs = blk_bs(blk);
1400 
1401     if (bs) {
1402         bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1403                                          detach_aio_context, opaque);
1404     }
1405 }
1406 
1407 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1408 {
1409     notifier_list_add(&blk->remove_bs_notifiers, notify);
1410 }
1411 
1412 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1413 {
1414     notifier_list_add(&blk->insert_bs_notifiers, notify);
1415 }
1416 
1417 void blk_io_plug(BlockBackend *blk)
1418 {
1419     BlockDriverState *bs = blk_bs(blk);
1420 
1421     if (bs) {
1422         bdrv_io_plug(bs);
1423     }
1424 }
1425 
1426 void blk_io_unplug(BlockBackend *blk)
1427 {
1428     BlockDriverState *bs = blk_bs(blk);
1429 
1430     if (bs) {
1431         bdrv_io_unplug(bs);
1432     }
1433 }
1434 
1435 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1436 {
1437     return &blk->stats;
1438 }
1439 
1440 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1441                   BlockCompletionFunc *cb, void *opaque)
1442 {
1443     return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1444 }
1445 
1446 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
1447                                      int nb_sectors, BdrvRequestFlags flags)
1448 {
1449     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1450         return -EINVAL;
1451     }
1452 
1453     return blk_co_pwritev(blk, sector_num << BDRV_SECTOR_BITS,
1454                           nb_sectors << BDRV_SECTOR_BITS, NULL,
1455                           BDRV_REQ_ZERO_WRITE);
1456 }
1457 
1458 int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
1459                          const uint8_t *buf, int nb_sectors)
1460 {
1461     int ret = blk_check_request(blk, sector_num, nb_sectors);
1462     if (ret < 0) {
1463         return ret;
1464     }
1465 
1466     return bdrv_write_compressed(blk_bs(blk), sector_num, buf, nb_sectors);
1467 }
1468 
1469 int blk_truncate(BlockBackend *blk, int64_t offset)
1470 {
1471     if (!blk_is_available(blk)) {
1472         return -ENOMEDIUM;
1473     }
1474 
1475     return bdrv_truncate(blk_bs(blk), offset);
1476 }
1477 
1478 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1479 {
1480     int ret = blk_check_request(blk, sector_num, nb_sectors);
1481     if (ret < 0) {
1482         return ret;
1483     }
1484 
1485     return bdrv_discard(blk_bs(blk), sector_num, nb_sectors);
1486 }
1487 
1488 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1489                      int64_t pos, int size)
1490 {
1491     if (!blk_is_available(blk)) {
1492         return -ENOMEDIUM;
1493     }
1494 
1495     return bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
1496 }
1497 
1498 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
1499 {
1500     if (!blk_is_available(blk)) {
1501         return -ENOMEDIUM;
1502     }
1503 
1504     return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
1505 }
1506 
1507 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
1508 {
1509     if (!blk_is_available(blk)) {
1510         return -ENOMEDIUM;
1511     }
1512 
1513     return bdrv_probe_blocksizes(blk_bs(blk), bsz);
1514 }
1515 
1516 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
1517 {
1518     if (!blk_is_available(blk)) {
1519         return -ENOMEDIUM;
1520     }
1521 
1522     return bdrv_probe_geometry(blk_bs(blk), geo);
1523 }
1524 
1525 /*
1526  * Updates the BlockBackendRootState object with data from the currently
1527  * attached BlockDriverState.
1528  */
1529 void blk_update_root_state(BlockBackend *blk)
1530 {
1531     assert(blk->root);
1532 
1533     blk->root_state.open_flags    = blk->root->bs->open_flags;
1534     blk->root_state.read_only     = blk->root->bs->read_only;
1535     blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
1536 
1537     if (blk->root_state.throttle_group) {
1538         g_free(blk->root_state.throttle_group);
1539         throttle_group_unref(blk->root_state.throttle_state);
1540     }
1541     if (blk->root->bs->throttle_state) {
1542         const char *name = throttle_group_get_name(blk->root->bs);
1543         blk->root_state.throttle_group = g_strdup(name);
1544         blk->root_state.throttle_state = throttle_group_incref(name);
1545     } else {
1546         blk->root_state.throttle_group = NULL;
1547         blk->root_state.throttle_state = NULL;
1548     }
1549 }
1550 
1551 /*
1552  * Applies the information in the root state to the given BlockDriverState. This
1553  * does not include the flags which have to be specified for bdrv_open(), use
1554  * blk_get_open_flags_from_root_state() to inquire them.
1555  */
1556 void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs)
1557 {
1558     bs->detect_zeroes = blk->root_state.detect_zeroes;
1559     if (blk->root_state.throttle_group) {
1560         bdrv_io_limits_enable(bs, blk->root_state.throttle_group);
1561     }
1562 }
1563 
1564 /*
1565  * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1566  * supposed to inherit the root state.
1567  */
1568 int blk_get_open_flags_from_root_state(BlockBackend *blk)
1569 {
1570     int bs_flags;
1571 
1572     bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
1573     bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
1574 
1575     return bs_flags;
1576 }
1577 
1578 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
1579 {
1580     return &blk->root_state;
1581 }
1582 
1583 int blk_commit_all(void)
1584 {
1585     BlockBackend *blk = NULL;
1586 
1587     while ((blk = blk_all_next(blk)) != NULL) {
1588         AioContext *aio_context = blk_get_aio_context(blk);
1589 
1590         aio_context_acquire(aio_context);
1591         if (blk_is_inserted(blk) && blk->root->bs->backing) {
1592             int ret = bdrv_commit(blk->root->bs);
1593             if (ret < 0) {
1594                 aio_context_release(aio_context);
1595                 return ret;
1596             }
1597         }
1598         aio_context_release(aio_context);
1599     }
1600     return 0;
1601 }
1602 
1603 int blk_flush_all(void)
1604 {
1605     BlockBackend *blk = NULL;
1606     int result = 0;
1607 
1608     while ((blk = blk_all_next(blk)) != NULL) {
1609         AioContext *aio_context = blk_get_aio_context(blk);
1610         int ret;
1611 
1612         aio_context_acquire(aio_context);
1613         if (blk_is_inserted(blk)) {
1614             ret = blk_flush(blk);
1615             if (ret < 0 && !result) {
1616                 result = ret;
1617             }
1618         }
1619         aio_context_release(aio_context);
1620     }
1621 
1622     return result;
1623 }
1624