xref: /openbmc/qemu/block/block-backend.c (revision 0430891c)
1 /*
2  * QEMU Block backends
3  *
4  * Copyright (C) 2014 Red Hat, Inc.
5  *
6  * Authors:
7  *  Markus Armbruster <armbru@redhat.com>,
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2.1
10  * or later.  See the COPYING.LIB file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
21 
22 /* Number of coroutines to reserve per attached device model */
23 #define COROUTINE_POOL_RESERVATION 64
24 
25 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
26 
27 struct BlockBackend {
28     char *name;
29     int refcnt;
30     BlockDriverState *bs;
31     DriveInfo *legacy_dinfo;    /* null unless created by drive_new() */
32     QTAILQ_ENTRY(BlockBackend) link; /* for blk_backends */
33 
34     void *dev;                  /* attached device model, if any */
35     /* TODO change to DeviceState when all users are qdevified */
36     const BlockDevOps *dev_ops;
37     void *dev_opaque;
38 
39     /* the block size for which the guest device expects atomicity */
40     int guest_block_size;
41 
42     /* If the BDS tree is removed, some of its options are stored here (which
43      * can be used to restore those options in the new BDS on insert) */
44     BlockBackendRootState root_state;
45 
46     /* I/O stats (display with "info blockstats"). */
47     BlockAcctStats stats;
48 
49     BlockdevOnError on_read_error, on_write_error;
50     bool iostatus_enabled;
51     BlockDeviceIoStatus iostatus;
52 };
53 
54 typedef struct BlockBackendAIOCB {
55     BlockAIOCB common;
56     QEMUBH *bh;
57     BlockBackend *blk;
58     int ret;
59 } BlockBackendAIOCB;
60 
61 static const AIOCBInfo block_backend_aiocb_info = {
62     .get_aio_context = blk_aiocb_get_aio_context,
63     .aiocb_size = sizeof(BlockBackendAIOCB),
64 };
65 
66 static void drive_info_del(DriveInfo *dinfo);
67 
68 /* All the BlockBackends (except for hidden ones) */
69 static QTAILQ_HEAD(, BlockBackend) blk_backends =
70     QTAILQ_HEAD_INITIALIZER(blk_backends);
71 
72 /*
73  * Create a new BlockBackend with @name, with a reference count of one.
74  * @name must not be null or empty.
75  * Fail if a BlockBackend with this name already exists.
76  * Store an error through @errp on failure, unless it's null.
77  * Return the new BlockBackend on success, null on failure.
78  */
79 BlockBackend *blk_new(const char *name, Error **errp)
80 {
81     BlockBackend *blk;
82 
83     assert(name && name[0]);
84     if (!id_wellformed(name)) {
85         error_setg(errp, "Invalid device name");
86         return NULL;
87     }
88     if (blk_by_name(name)) {
89         error_setg(errp, "Device with id '%s' already exists", name);
90         return NULL;
91     }
92     if (bdrv_find_node(name)) {
93         error_setg(errp,
94                    "Device name '%s' conflicts with an existing node name",
95                    name);
96         return NULL;
97     }
98 
99     blk = g_new0(BlockBackend, 1);
100     blk->name = g_strdup(name);
101     blk->refcnt = 1;
102     QTAILQ_INSERT_TAIL(&blk_backends, blk, link);
103     return blk;
104 }
105 
106 /*
107  * Create a new BlockBackend with a new BlockDriverState attached.
108  * Otherwise just like blk_new(), which see.
109  */
110 BlockBackend *blk_new_with_bs(const char *name, Error **errp)
111 {
112     BlockBackend *blk;
113     BlockDriverState *bs;
114 
115     blk = blk_new(name, errp);
116     if (!blk) {
117         return NULL;
118     }
119 
120     bs = bdrv_new_root();
121     blk->bs = bs;
122     bs->blk = blk;
123     return blk;
124 }
125 
126 /*
127  * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
128  *
129  * Just as with bdrv_open(), after having called this function the reference to
130  * @options belongs to the block layer (even on failure).
131  *
132  * TODO: Remove @filename and @flags; it should be possible to specify a whole
133  * BDS tree just by specifying the @options QDict (or @reference,
134  * alternatively). At the time of adding this function, this is not possible,
135  * though, so callers of this function have to be able to specify @filename and
136  * @flags.
137  */
138 BlockBackend *blk_new_open(const char *name, const char *filename,
139                            const char *reference, QDict *options, int flags,
140                            Error **errp)
141 {
142     BlockBackend *blk;
143     int ret;
144 
145     blk = blk_new_with_bs(name, errp);
146     if (!blk) {
147         QDECREF(options);
148         return NULL;
149     }
150 
151     ret = bdrv_open(&blk->bs, filename, reference, options, flags, errp);
152     if (ret < 0) {
153         blk_unref(blk);
154         return NULL;
155     }
156 
157     return blk;
158 }
159 
160 static void blk_delete(BlockBackend *blk)
161 {
162     assert(!blk->refcnt);
163     assert(!blk->dev);
164     if (blk->bs) {
165         assert(blk->bs->blk == blk);
166         blk->bs->blk = NULL;
167         bdrv_unref(blk->bs);
168         blk->bs = NULL;
169     }
170     if (blk->root_state.throttle_state) {
171         g_free(blk->root_state.throttle_group);
172         throttle_group_unref(blk->root_state.throttle_state);
173     }
174     /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
175     if (blk->name[0]) {
176         QTAILQ_REMOVE(&blk_backends, blk, link);
177     }
178     g_free(blk->name);
179     drive_info_del(blk->legacy_dinfo);
180     block_acct_cleanup(&blk->stats);
181     g_free(blk);
182 }
183 
184 static void drive_info_del(DriveInfo *dinfo)
185 {
186     if (!dinfo) {
187         return;
188     }
189     qemu_opts_del(dinfo->opts);
190     g_free(dinfo->serial);
191     g_free(dinfo);
192 }
193 
194 int blk_get_refcnt(BlockBackend *blk)
195 {
196     return blk ? blk->refcnt : 0;
197 }
198 
199 /*
200  * Increment @blk's reference count.
201  * @blk must not be null.
202  */
203 void blk_ref(BlockBackend *blk)
204 {
205     blk->refcnt++;
206 }
207 
208 /*
209  * Decrement @blk's reference count.
210  * If this drops it to zero, destroy @blk.
211  * For convenience, do nothing if @blk is null.
212  */
213 void blk_unref(BlockBackend *blk)
214 {
215     if (blk) {
216         assert(blk->refcnt > 0);
217         if (!--blk->refcnt) {
218             blk_delete(blk);
219         }
220     }
221 }
222 
223 /*
224  * Return the BlockBackend after @blk.
225  * If @blk is null, return the first one.
226  * Else, return @blk's next sibling, which may be null.
227  *
228  * To iterate over all BlockBackends, do
229  * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
230  *     ...
231  * }
232  */
233 BlockBackend *blk_next(BlockBackend *blk)
234 {
235     return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&blk_backends);
236 }
237 
238 /*
239  * Return @blk's name, a non-null string.
240  * Wart: the name is empty iff @blk has been hidden with
241  * blk_hide_on_behalf_of_hmp_drive_del().
242  */
243 const char *blk_name(BlockBackend *blk)
244 {
245     return blk->name;
246 }
247 
248 /*
249  * Return the BlockBackend with name @name if it exists, else null.
250  * @name must not be null.
251  */
252 BlockBackend *blk_by_name(const char *name)
253 {
254     BlockBackend *blk;
255 
256     assert(name);
257     QTAILQ_FOREACH(blk, &blk_backends, link) {
258         if (!strcmp(name, blk->name)) {
259             return blk;
260         }
261     }
262     return NULL;
263 }
264 
265 /*
266  * Return the BlockDriverState attached to @blk if any, else null.
267  */
268 BlockDriverState *blk_bs(BlockBackend *blk)
269 {
270     return blk->bs;
271 }
272 
273 /*
274  * Changes the BlockDriverState attached to @blk
275  */
276 void blk_set_bs(BlockBackend *blk, BlockDriverState *bs)
277 {
278     bdrv_ref(bs);
279 
280     if (blk->bs) {
281         blk->bs->blk = NULL;
282         bdrv_unref(blk->bs);
283     }
284     assert(bs->blk == NULL);
285 
286     blk->bs = bs;
287     bs->blk = blk;
288 }
289 
290 /*
291  * Return @blk's DriveInfo if any, else null.
292  */
293 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
294 {
295     return blk->legacy_dinfo;
296 }
297 
298 /*
299  * Set @blk's DriveInfo to @dinfo, and return it.
300  * @blk must not have a DriveInfo set already.
301  * No other BlockBackend may have the same DriveInfo set.
302  */
303 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
304 {
305     assert(!blk->legacy_dinfo);
306     return blk->legacy_dinfo = dinfo;
307 }
308 
309 /*
310  * Return the BlockBackend with DriveInfo @dinfo.
311  * It must exist.
312  */
313 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
314 {
315     BlockBackend *blk;
316 
317     QTAILQ_FOREACH(blk, &blk_backends, link) {
318         if (blk->legacy_dinfo == dinfo) {
319             return blk;
320         }
321     }
322     abort();
323 }
324 
325 /*
326  * Hide @blk.
327  * @blk must not have been hidden already.
328  * Make attached BlockDriverState, if any, anonymous.
329  * Once hidden, @blk is invisible to all functions that don't receive
330  * it as argument.  For example, blk_by_name() won't return it.
331  * Strictly for use by do_drive_del().
332  * TODO get rid of it!
333  */
334 void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk)
335 {
336     QTAILQ_REMOVE(&blk_backends, blk, link);
337     blk->name[0] = 0;
338     if (blk->bs) {
339         bdrv_make_anon(blk->bs);
340     }
341 }
342 
343 /*
344  * Disassociates the currently associated BlockDriverState from @blk.
345  */
346 void blk_remove_bs(BlockBackend *blk)
347 {
348     blk_update_root_state(blk);
349 
350     blk->bs->blk = NULL;
351     bdrv_unref(blk->bs);
352     blk->bs = NULL;
353 }
354 
355 /*
356  * Associates a new BlockDriverState with @blk.
357  */
358 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
359 {
360     assert(!blk->bs && !bs->blk);
361     bdrv_ref(bs);
362     blk->bs = bs;
363     bs->blk = blk;
364 }
365 
366 /*
367  * Attach device model @dev to @blk.
368  * Return 0 on success, -EBUSY when a device model is attached already.
369  */
370 int blk_attach_dev(BlockBackend *blk, void *dev)
371 /* TODO change to DeviceState *dev when all users are qdevified */
372 {
373     if (blk->dev) {
374         return -EBUSY;
375     }
376     blk_ref(blk);
377     blk->dev = dev;
378     blk_iostatus_reset(blk);
379     return 0;
380 }
381 
382 /*
383  * Attach device model @dev to @blk.
384  * @blk must not have a device model attached already.
385  * TODO qdevified devices don't use this, remove when devices are qdevified
386  */
387 void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
388 {
389     if (blk_attach_dev(blk, dev) < 0) {
390         abort();
391     }
392 }
393 
394 /*
395  * Detach device model @dev from @blk.
396  * @dev must be currently attached to @blk.
397  */
398 void blk_detach_dev(BlockBackend *blk, void *dev)
399 /* TODO change to DeviceState *dev when all users are qdevified */
400 {
401     assert(blk->dev == dev);
402     blk->dev = NULL;
403     blk->dev_ops = NULL;
404     blk->dev_opaque = NULL;
405     blk->guest_block_size = 512;
406     blk_unref(blk);
407 }
408 
409 /*
410  * Return the device model attached to @blk if any, else null.
411  */
412 void *blk_get_attached_dev(BlockBackend *blk)
413 /* TODO change to return DeviceState * when all users are qdevified */
414 {
415     return blk->dev;
416 }
417 
418 /*
419  * Set @blk's device model callbacks to @ops.
420  * @opaque is the opaque argument to pass to the callbacks.
421  * This is for use by device models.
422  */
423 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
424                      void *opaque)
425 {
426     blk->dev_ops = ops;
427     blk->dev_opaque = opaque;
428 }
429 
430 /*
431  * Notify @blk's attached device model of media change.
432  * If @load is true, notify of media load.
433  * Else, notify of media eject.
434  * Also send DEVICE_TRAY_MOVED events as appropriate.
435  */
436 void blk_dev_change_media_cb(BlockBackend *blk, bool load)
437 {
438     if (blk->dev_ops && blk->dev_ops->change_media_cb) {
439         bool tray_was_open, tray_is_open;
440 
441         tray_was_open = blk_dev_is_tray_open(blk);
442         blk->dev_ops->change_media_cb(blk->dev_opaque, load);
443         tray_is_open = blk_dev_is_tray_open(blk);
444 
445         if (tray_was_open != tray_is_open) {
446             qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
447                                               &error_abort);
448         }
449     }
450 }
451 
452 /*
453  * Does @blk's attached device model have removable media?
454  * %true if no device model is attached.
455  */
456 bool blk_dev_has_removable_media(BlockBackend *blk)
457 {
458     return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
459 }
460 
461 /*
462  * Notify @blk's attached device model of a media eject request.
463  * If @force is true, the medium is about to be yanked out forcefully.
464  */
465 void blk_dev_eject_request(BlockBackend *blk, bool force)
466 {
467     if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
468         blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
469     }
470 }
471 
472 /*
473  * Does @blk's attached device model have a tray, and is it open?
474  */
475 bool blk_dev_is_tray_open(BlockBackend *blk)
476 {
477     if (blk->dev_ops && blk->dev_ops->is_tray_open) {
478         return blk->dev_ops->is_tray_open(blk->dev_opaque);
479     }
480     return false;
481 }
482 
483 /*
484  * Does @blk's attached device model have the medium locked?
485  * %false if the device model has no such lock.
486  */
487 bool blk_dev_is_medium_locked(BlockBackend *blk)
488 {
489     if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
490         return blk->dev_ops->is_medium_locked(blk->dev_opaque);
491     }
492     return false;
493 }
494 
495 /*
496  * Notify @blk's attached device model of a backend size change.
497  */
498 void blk_dev_resize_cb(BlockBackend *blk)
499 {
500     if (blk->dev_ops && blk->dev_ops->resize_cb) {
501         blk->dev_ops->resize_cb(blk->dev_opaque);
502     }
503 }
504 
505 void blk_iostatus_enable(BlockBackend *blk)
506 {
507     blk->iostatus_enabled = true;
508     blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
509 }
510 
511 /* The I/O status is only enabled if the drive explicitly
512  * enables it _and_ the VM is configured to stop on errors */
513 bool blk_iostatus_is_enabled(const BlockBackend *blk)
514 {
515     return (blk->iostatus_enabled &&
516            (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
517             blk->on_write_error == BLOCKDEV_ON_ERROR_STOP   ||
518             blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
519 }
520 
521 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
522 {
523     return blk->iostatus;
524 }
525 
526 void blk_iostatus_disable(BlockBackend *blk)
527 {
528     blk->iostatus_enabled = false;
529 }
530 
531 void blk_iostatus_reset(BlockBackend *blk)
532 {
533     if (blk_iostatus_is_enabled(blk)) {
534         blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
535         if (blk->bs && blk->bs->job) {
536             block_job_iostatus_reset(blk->bs->job);
537         }
538     }
539 }
540 
541 void blk_iostatus_set_err(BlockBackend *blk, int error)
542 {
543     assert(blk_iostatus_is_enabled(blk));
544     if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
545         blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
546                                           BLOCK_DEVICE_IO_STATUS_FAILED;
547     }
548 }
549 
550 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
551                                   size_t size)
552 {
553     int64_t len;
554 
555     if (size > INT_MAX) {
556         return -EIO;
557     }
558 
559     if (!blk_is_available(blk)) {
560         return -ENOMEDIUM;
561     }
562 
563     len = blk_getlength(blk);
564     if (len < 0) {
565         return len;
566     }
567 
568     if (offset < 0) {
569         return -EIO;
570     }
571 
572     if (offset > len || len - offset < size) {
573         return -EIO;
574     }
575 
576     return 0;
577 }
578 
579 static int blk_check_request(BlockBackend *blk, int64_t sector_num,
580                              int nb_sectors)
581 {
582     if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
583         return -EIO;
584     }
585 
586     if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
587         return -EIO;
588     }
589 
590     return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
591                                   nb_sectors * BDRV_SECTOR_SIZE);
592 }
593 
594 int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
595              int nb_sectors)
596 {
597     int ret = blk_check_request(blk, sector_num, nb_sectors);
598     if (ret < 0) {
599         return ret;
600     }
601 
602     return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
603 }
604 
605 int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
606                          int nb_sectors)
607 {
608     int ret = blk_check_request(blk, sector_num, nb_sectors);
609     if (ret < 0) {
610         return ret;
611     }
612 
613     return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
614 }
615 
616 int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
617               int nb_sectors)
618 {
619     int ret = blk_check_request(blk, sector_num, nb_sectors);
620     if (ret < 0) {
621         return ret;
622     }
623 
624     return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
625 }
626 
627 int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
628                      int nb_sectors, BdrvRequestFlags flags)
629 {
630     int ret = blk_check_request(blk, sector_num, nb_sectors);
631     if (ret < 0) {
632         return ret;
633     }
634 
635     return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
636 }
637 
638 static void error_callback_bh(void *opaque)
639 {
640     struct BlockBackendAIOCB *acb = opaque;
641     qemu_bh_delete(acb->bh);
642     acb->common.cb(acb->common.opaque, acb->ret);
643     qemu_aio_unref(acb);
644 }
645 
646 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
647                                   BlockCompletionFunc *cb,
648                                   void *opaque, int ret)
649 {
650     struct BlockBackendAIOCB *acb;
651     QEMUBH *bh;
652 
653     acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
654     acb->blk = blk;
655     acb->ret = ret;
656 
657     bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
658     acb->bh = bh;
659     qemu_bh_schedule(bh);
660 
661     return &acb->common;
662 }
663 
664 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
665                                  int nb_sectors, BdrvRequestFlags flags,
666                                  BlockCompletionFunc *cb, void *opaque)
667 {
668     int ret = blk_check_request(blk, sector_num, nb_sectors);
669     if (ret < 0) {
670         return blk_abort_aio_request(blk, cb, opaque, ret);
671     }
672 
673     return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
674                                  cb, opaque);
675 }
676 
677 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
678 {
679     int ret = blk_check_byte_request(blk, offset, count);
680     if (ret < 0) {
681         return ret;
682     }
683 
684     return bdrv_pread(blk->bs, offset, buf, count);
685 }
686 
687 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
688 {
689     int ret = blk_check_byte_request(blk, offset, count);
690     if (ret < 0) {
691         return ret;
692     }
693 
694     return bdrv_pwrite(blk->bs, offset, buf, count);
695 }
696 
697 int64_t blk_getlength(BlockBackend *blk)
698 {
699     if (!blk_is_available(blk)) {
700         return -ENOMEDIUM;
701     }
702 
703     return bdrv_getlength(blk->bs);
704 }
705 
706 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
707 {
708     if (!blk->bs) {
709         *nb_sectors_ptr = 0;
710     } else {
711         bdrv_get_geometry(blk->bs, nb_sectors_ptr);
712     }
713 }
714 
715 int64_t blk_nb_sectors(BlockBackend *blk)
716 {
717     if (!blk_is_available(blk)) {
718         return -ENOMEDIUM;
719     }
720 
721     return bdrv_nb_sectors(blk->bs);
722 }
723 
724 BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
725                           QEMUIOVector *iov, int nb_sectors,
726                           BlockCompletionFunc *cb, void *opaque)
727 {
728     int ret = blk_check_request(blk, sector_num, nb_sectors);
729     if (ret < 0) {
730         return blk_abort_aio_request(blk, cb, opaque, ret);
731     }
732 
733     return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
734 }
735 
736 BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
737                            QEMUIOVector *iov, int nb_sectors,
738                            BlockCompletionFunc *cb, void *opaque)
739 {
740     int ret = blk_check_request(blk, sector_num, nb_sectors);
741     if (ret < 0) {
742         return blk_abort_aio_request(blk, cb, opaque, ret);
743     }
744 
745     return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
746 }
747 
748 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
749                           BlockCompletionFunc *cb, void *opaque)
750 {
751     if (!blk_is_available(blk)) {
752         return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
753     }
754 
755     return bdrv_aio_flush(blk->bs, cb, opaque);
756 }
757 
758 BlockAIOCB *blk_aio_discard(BlockBackend *blk,
759                             int64_t sector_num, int nb_sectors,
760                             BlockCompletionFunc *cb, void *opaque)
761 {
762     int ret = blk_check_request(blk, sector_num, nb_sectors);
763     if (ret < 0) {
764         return blk_abort_aio_request(blk, cb, opaque, ret);
765     }
766 
767     return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
768 }
769 
770 void blk_aio_cancel(BlockAIOCB *acb)
771 {
772     bdrv_aio_cancel(acb);
773 }
774 
775 void blk_aio_cancel_async(BlockAIOCB *acb)
776 {
777     bdrv_aio_cancel_async(acb);
778 }
779 
780 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
781 {
782     int i, ret;
783 
784     for (i = 0; i < num_reqs; i++) {
785         ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
786         if (ret < 0) {
787             return ret;
788         }
789     }
790 
791     return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
792 }
793 
794 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
795 {
796     if (!blk_is_available(blk)) {
797         return -ENOMEDIUM;
798     }
799 
800     return bdrv_ioctl(blk->bs, req, buf);
801 }
802 
803 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
804                           BlockCompletionFunc *cb, void *opaque)
805 {
806     if (!blk_is_available(blk)) {
807         return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
808     }
809 
810     return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
811 }
812 
813 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
814 {
815     int ret = blk_check_request(blk, sector_num, nb_sectors);
816     if (ret < 0) {
817         return ret;
818     }
819 
820     return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
821 }
822 
823 int blk_co_flush(BlockBackend *blk)
824 {
825     if (!blk_is_available(blk)) {
826         return -ENOMEDIUM;
827     }
828 
829     return bdrv_co_flush(blk->bs);
830 }
831 
832 int blk_flush(BlockBackend *blk)
833 {
834     if (!blk_is_available(blk)) {
835         return -ENOMEDIUM;
836     }
837 
838     return bdrv_flush(blk->bs);
839 }
840 
841 int blk_flush_all(void)
842 {
843     return bdrv_flush_all();
844 }
845 
846 void blk_drain(BlockBackend *blk)
847 {
848     if (blk->bs) {
849         bdrv_drain(blk->bs);
850     }
851 }
852 
853 void blk_drain_all(void)
854 {
855     bdrv_drain_all();
856 }
857 
858 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
859                       BlockdevOnError on_write_error)
860 {
861     blk->on_read_error = on_read_error;
862     blk->on_write_error = on_write_error;
863 }
864 
865 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
866 {
867     return is_read ? blk->on_read_error : blk->on_write_error;
868 }
869 
870 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
871                                       int error)
872 {
873     BlockdevOnError on_err = blk_get_on_error(blk, is_read);
874 
875     switch (on_err) {
876     case BLOCKDEV_ON_ERROR_ENOSPC:
877         return (error == ENOSPC) ?
878                BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
879     case BLOCKDEV_ON_ERROR_STOP:
880         return BLOCK_ERROR_ACTION_STOP;
881     case BLOCKDEV_ON_ERROR_REPORT:
882         return BLOCK_ERROR_ACTION_REPORT;
883     case BLOCKDEV_ON_ERROR_IGNORE:
884         return BLOCK_ERROR_ACTION_IGNORE;
885     default:
886         abort();
887     }
888 }
889 
890 static void send_qmp_error_event(BlockBackend *blk,
891                                  BlockErrorAction action,
892                                  bool is_read, int error)
893 {
894     IoOperationType optype;
895 
896     optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
897     qapi_event_send_block_io_error(blk_name(blk), optype, action,
898                                    blk_iostatus_is_enabled(blk),
899                                    error == ENOSPC, strerror(error),
900                                    &error_abort);
901 }
902 
903 /* This is done by device models because, while the block layer knows
904  * about the error, it does not know whether an operation comes from
905  * the device or the block layer (from a job, for example).
906  */
907 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
908                       bool is_read, int error)
909 {
910     assert(error >= 0);
911 
912     if (action == BLOCK_ERROR_ACTION_STOP) {
913         /* First set the iostatus, so that "info block" returns an iostatus
914          * that matches the events raised so far (an additional error iostatus
915          * is fine, but not a lost one).
916          */
917         blk_iostatus_set_err(blk, error);
918 
919         /* Then raise the request to stop the VM and the event.
920          * qemu_system_vmstop_request_prepare has two effects.  First,
921          * it ensures that the STOP event always comes after the
922          * BLOCK_IO_ERROR event.  Second, it ensures that even if management
923          * can observe the STOP event and do a "cont" before the STOP
924          * event is issued, the VM will not stop.  In this case, vm_start()
925          * also ensures that the STOP/RESUME pair of events is emitted.
926          */
927         qemu_system_vmstop_request_prepare();
928         send_qmp_error_event(blk, action, is_read, error);
929         qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
930     } else {
931         send_qmp_error_event(blk, action, is_read, error);
932     }
933 }
934 
935 int blk_is_read_only(BlockBackend *blk)
936 {
937     if (blk->bs) {
938         return bdrv_is_read_only(blk->bs);
939     } else {
940         return blk->root_state.read_only;
941     }
942 }
943 
944 int blk_is_sg(BlockBackend *blk)
945 {
946     if (!blk->bs) {
947         return 0;
948     }
949 
950     return bdrv_is_sg(blk->bs);
951 }
952 
953 int blk_enable_write_cache(BlockBackend *blk)
954 {
955     if (blk->bs) {
956         return bdrv_enable_write_cache(blk->bs);
957     } else {
958         return !!(blk->root_state.open_flags & BDRV_O_CACHE_WB);
959     }
960 }
961 
962 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
963 {
964     if (blk->bs) {
965         bdrv_set_enable_write_cache(blk->bs, wce);
966     } else {
967         if (wce) {
968             blk->root_state.open_flags |= BDRV_O_CACHE_WB;
969         } else {
970             blk->root_state.open_flags &= ~BDRV_O_CACHE_WB;
971         }
972     }
973 }
974 
975 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
976 {
977     if (!blk->bs) {
978         error_setg(errp, "Device '%s' has no medium", blk->name);
979         return;
980     }
981 
982     bdrv_invalidate_cache(blk->bs, errp);
983 }
984 
985 bool blk_is_inserted(BlockBackend *blk)
986 {
987     return blk->bs && bdrv_is_inserted(blk->bs);
988 }
989 
990 bool blk_is_available(BlockBackend *blk)
991 {
992     return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
993 }
994 
995 void blk_lock_medium(BlockBackend *blk, bool locked)
996 {
997     if (blk->bs) {
998         bdrv_lock_medium(blk->bs, locked);
999     }
1000 }
1001 
1002 void blk_eject(BlockBackend *blk, bool eject_flag)
1003 {
1004     if (blk->bs) {
1005         bdrv_eject(blk->bs, eject_flag);
1006     }
1007 }
1008 
1009 int blk_get_flags(BlockBackend *blk)
1010 {
1011     if (blk->bs) {
1012         return bdrv_get_flags(blk->bs);
1013     } else {
1014         return blk->root_state.open_flags;
1015     }
1016 }
1017 
1018 int blk_get_max_transfer_length(BlockBackend *blk)
1019 {
1020     if (blk->bs) {
1021         return blk->bs->bl.max_transfer_length;
1022     } else {
1023         return 0;
1024     }
1025 }
1026 
1027 int blk_get_max_iov(BlockBackend *blk)
1028 {
1029     return blk->bs->bl.max_iov;
1030 }
1031 
1032 void blk_set_guest_block_size(BlockBackend *blk, int align)
1033 {
1034     blk->guest_block_size = align;
1035 }
1036 
1037 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1038 {
1039     return qemu_try_blockalign(blk ? blk->bs : NULL, size);
1040 }
1041 
1042 void *blk_blockalign(BlockBackend *blk, size_t size)
1043 {
1044     return qemu_blockalign(blk ? blk->bs : NULL, size);
1045 }
1046 
1047 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1048 {
1049     if (!blk->bs) {
1050         return false;
1051     }
1052 
1053     return bdrv_op_is_blocked(blk->bs, op, errp);
1054 }
1055 
1056 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1057 {
1058     if (blk->bs) {
1059         bdrv_op_unblock(blk->bs, op, reason);
1060     }
1061 }
1062 
1063 void blk_op_block_all(BlockBackend *blk, Error *reason)
1064 {
1065     if (blk->bs) {
1066         bdrv_op_block_all(blk->bs, reason);
1067     }
1068 }
1069 
1070 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1071 {
1072     if (blk->bs) {
1073         bdrv_op_unblock_all(blk->bs, reason);
1074     }
1075 }
1076 
1077 AioContext *blk_get_aio_context(BlockBackend *blk)
1078 {
1079     if (blk->bs) {
1080         return bdrv_get_aio_context(blk->bs);
1081     } else {
1082         return qemu_get_aio_context();
1083     }
1084 }
1085 
1086 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1087 {
1088     BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1089     return blk_get_aio_context(blk_acb->blk);
1090 }
1091 
1092 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1093 {
1094     if (blk->bs) {
1095         bdrv_set_aio_context(blk->bs, new_context);
1096     }
1097 }
1098 
1099 void blk_add_aio_context_notifier(BlockBackend *blk,
1100         void (*attached_aio_context)(AioContext *new_context, void *opaque),
1101         void (*detach_aio_context)(void *opaque), void *opaque)
1102 {
1103     if (blk->bs) {
1104         bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
1105                                       detach_aio_context, opaque);
1106     }
1107 }
1108 
1109 void blk_remove_aio_context_notifier(BlockBackend *blk,
1110                                      void (*attached_aio_context)(AioContext *,
1111                                                                   void *),
1112                                      void (*detach_aio_context)(void *),
1113                                      void *opaque)
1114 {
1115     if (blk->bs) {
1116         bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
1117                                          detach_aio_context, opaque);
1118     }
1119 }
1120 
1121 void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
1122 {
1123     if (blk->bs) {
1124         bdrv_add_close_notifier(blk->bs, notify);
1125     }
1126 }
1127 
1128 void blk_io_plug(BlockBackend *blk)
1129 {
1130     if (blk->bs) {
1131         bdrv_io_plug(blk->bs);
1132     }
1133 }
1134 
1135 void blk_io_unplug(BlockBackend *blk)
1136 {
1137     if (blk->bs) {
1138         bdrv_io_unplug(blk->bs);
1139     }
1140 }
1141 
1142 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1143 {
1144     return &blk->stats;
1145 }
1146 
1147 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1148                   BlockCompletionFunc *cb, void *opaque)
1149 {
1150     return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1151 }
1152 
1153 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
1154                                      int nb_sectors, BdrvRequestFlags flags)
1155 {
1156     int ret = blk_check_request(blk, sector_num, nb_sectors);
1157     if (ret < 0) {
1158         return ret;
1159     }
1160 
1161     return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
1162 }
1163 
1164 int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
1165                          const uint8_t *buf, int nb_sectors)
1166 {
1167     int ret = blk_check_request(blk, sector_num, nb_sectors);
1168     if (ret < 0) {
1169         return ret;
1170     }
1171 
1172     return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
1173 }
1174 
1175 int blk_truncate(BlockBackend *blk, int64_t offset)
1176 {
1177     if (!blk_is_available(blk)) {
1178         return -ENOMEDIUM;
1179     }
1180 
1181     return bdrv_truncate(blk->bs, offset);
1182 }
1183 
1184 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1185 {
1186     int ret = blk_check_request(blk, sector_num, nb_sectors);
1187     if (ret < 0) {
1188         return ret;
1189     }
1190 
1191     return bdrv_discard(blk->bs, sector_num, nb_sectors);
1192 }
1193 
1194 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1195                      int64_t pos, int size)
1196 {
1197     if (!blk_is_available(blk)) {
1198         return -ENOMEDIUM;
1199     }
1200 
1201     return bdrv_save_vmstate(blk->bs, buf, pos, size);
1202 }
1203 
1204 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
1205 {
1206     if (!blk_is_available(blk)) {
1207         return -ENOMEDIUM;
1208     }
1209 
1210     return bdrv_load_vmstate(blk->bs, buf, pos, size);
1211 }
1212 
1213 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
1214 {
1215     if (!blk_is_available(blk)) {
1216         return -ENOMEDIUM;
1217     }
1218 
1219     return bdrv_probe_blocksizes(blk->bs, bsz);
1220 }
1221 
1222 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
1223 {
1224     if (!blk_is_available(blk)) {
1225         return -ENOMEDIUM;
1226     }
1227 
1228     return bdrv_probe_geometry(blk->bs, geo);
1229 }
1230 
1231 /*
1232  * Updates the BlockBackendRootState object with data from the currently
1233  * attached BlockDriverState.
1234  */
1235 void blk_update_root_state(BlockBackend *blk)
1236 {
1237     assert(blk->bs);
1238 
1239     blk->root_state.open_flags    = blk->bs->open_flags;
1240     blk->root_state.read_only     = blk->bs->read_only;
1241     blk->root_state.detect_zeroes = blk->bs->detect_zeroes;
1242 
1243     if (blk->root_state.throttle_group) {
1244         g_free(blk->root_state.throttle_group);
1245         throttle_group_unref(blk->root_state.throttle_state);
1246     }
1247     if (blk->bs->throttle_state) {
1248         const char *name = throttle_group_get_name(blk->bs);
1249         blk->root_state.throttle_group = g_strdup(name);
1250         blk->root_state.throttle_state = throttle_group_incref(name);
1251     } else {
1252         blk->root_state.throttle_group = NULL;
1253         blk->root_state.throttle_state = NULL;
1254     }
1255 }
1256 
1257 /*
1258  * Applies the information in the root state to the given BlockDriverState. This
1259  * does not include the flags which have to be specified for bdrv_open(), use
1260  * blk_get_open_flags_from_root_state() to inquire them.
1261  */
1262 void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs)
1263 {
1264     bs->detect_zeroes = blk->root_state.detect_zeroes;
1265     if (blk->root_state.throttle_group) {
1266         bdrv_io_limits_enable(bs, blk->root_state.throttle_group);
1267     }
1268 }
1269 
1270 /*
1271  * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1272  * supposed to inherit the root state.
1273  */
1274 int blk_get_open_flags_from_root_state(BlockBackend *blk)
1275 {
1276     int bs_flags;
1277 
1278     bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
1279     bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
1280 
1281     return bs_flags;
1282 }
1283 
1284 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
1285 {
1286     return &blk->root_state;
1287 }
1288