xref: /openbmc/qemu/block.c (revision 0eb7217e49b84553bb30f97bc34380633fd846fe)
1 /*
2  * QEMU System Emulator block driver
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "block/block_int.h"
28 #include "block/blockjob.h"
29 #include "qemu/module.h"
30 #include "qapi/qmp/qjson.h"
31 #include "sysemu/block-backend.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/qtest.h"
34 #include "qemu/notify.h"
35 #include "block/coroutine.h"
36 #include "block/qapi.h"
37 #include "qmp-commands.h"
38 #include "qemu/timer.h"
39 #include "qapi-event.h"
40 
41 #ifdef CONFIG_BSD
42 #include <sys/types.h>
43 #include <sys/stat.h>
44 #include <sys/ioctl.h>
45 #include <sys/queue.h>
46 #ifndef __DragonFly__
47 #include <sys/disk.h>
48 #endif
49 #endif
50 
51 #ifdef _WIN32
52 #include <windows.h>
53 #endif
54 
55 /**
56  * A BdrvDirtyBitmap can be in three possible states:
57  * (1) successor is NULL and disabled is false: full r/w mode
58  * (2) successor is NULL and disabled is true: read only mode ("disabled")
59  * (3) successor is set: frozen mode.
60  *     A frozen bitmap cannot be renamed, deleted, anonymized, cleared, set,
61  *     or enabled. A frozen bitmap can only abdicate() or reclaim().
62  */
63 struct BdrvDirtyBitmap {
64     HBitmap *bitmap;            /* Dirty sector bitmap implementation */
65     BdrvDirtyBitmap *successor; /* Anonymous child; implies frozen status */
66     char *name;                 /* Optional non-empty unique ID */
67     int64_t size;               /* Size of the bitmap (Number of sectors) */
68     bool disabled;              /* Bitmap is read-only */
69     QLIST_ENTRY(BdrvDirtyBitmap) list;
70 };
71 
72 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
73 
74 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
75         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
76         BlockCompletionFunc *cb, void *opaque);
77 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
78         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
79         BlockCompletionFunc *cb, void *opaque);
80 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
81                                          int64_t sector_num, int nb_sectors,
82                                          QEMUIOVector *iov);
83 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
84                                          int64_t sector_num, int nb_sectors,
85                                          QEMUIOVector *iov);
86 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
87     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
88     BdrvRequestFlags flags);
89 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
90     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
91     BdrvRequestFlags flags);
92 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
93                                          int64_t sector_num,
94                                          QEMUIOVector *qiov,
95                                          int nb_sectors,
96                                          BdrvRequestFlags flags,
97                                          BlockCompletionFunc *cb,
98                                          void *opaque,
99                                          bool is_write);
100 static void coroutine_fn bdrv_co_do_rw(void *opaque);
101 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
102     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
103 
104 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
105     QTAILQ_HEAD_INITIALIZER(bdrv_states);
106 
107 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
108     QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
109 
110 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
111     QLIST_HEAD_INITIALIZER(bdrv_drivers);
112 
113 static void bdrv_dirty_bitmap_truncate(BlockDriverState *bs);
114 /* If non-zero, use only whitelisted block drivers */
115 static int use_bdrv_whitelist;
116 
117 #ifdef _WIN32
118 static int is_windows_drive_prefix(const char *filename)
119 {
120     return (((filename[0] >= 'a' && filename[0] <= 'z') ||
121              (filename[0] >= 'A' && filename[0] <= 'Z')) &&
122             filename[1] == ':');
123 }
124 
125 int is_windows_drive(const char *filename)
126 {
127     if (is_windows_drive_prefix(filename) &&
128         filename[2] == '\0')
129         return 1;
130     if (strstart(filename, "\\\\.\\", NULL) ||
131         strstart(filename, "//./", NULL))
132         return 1;
133     return 0;
134 }
135 #endif
136 
137 /* throttling disk I/O limits */
138 void bdrv_set_io_limits(BlockDriverState *bs,
139                         ThrottleConfig *cfg)
140 {
141     int i;
142 
143     throttle_config(&bs->throttle_state, cfg);
144 
145     for (i = 0; i < 2; i++) {
146         qemu_co_enter_next(&bs->throttled_reqs[i]);
147     }
148 }
149 
150 /* this function drain all the throttled IOs */
151 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
152 {
153     bool drained = false;
154     bool enabled = bs->io_limits_enabled;
155     int i;
156 
157     bs->io_limits_enabled = false;
158 
159     for (i = 0; i < 2; i++) {
160         while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
161             drained = true;
162         }
163     }
164 
165     bs->io_limits_enabled = enabled;
166 
167     return drained;
168 }
169 
170 void bdrv_io_limits_disable(BlockDriverState *bs)
171 {
172     bs->io_limits_enabled = false;
173 
174     bdrv_start_throttled_reqs(bs);
175 
176     throttle_destroy(&bs->throttle_state);
177 }
178 
179 static void bdrv_throttle_read_timer_cb(void *opaque)
180 {
181     BlockDriverState *bs = opaque;
182     qemu_co_enter_next(&bs->throttled_reqs[0]);
183 }
184 
185 static void bdrv_throttle_write_timer_cb(void *opaque)
186 {
187     BlockDriverState *bs = opaque;
188     qemu_co_enter_next(&bs->throttled_reqs[1]);
189 }
190 
191 /* should be called before bdrv_set_io_limits if a limit is set */
192 void bdrv_io_limits_enable(BlockDriverState *bs)
193 {
194     int clock_type = QEMU_CLOCK_REALTIME;
195 
196     if (qtest_enabled()) {
197         /* For testing block IO throttling only */
198         clock_type = QEMU_CLOCK_VIRTUAL;
199     }
200     assert(!bs->io_limits_enabled);
201     throttle_init(&bs->throttle_state,
202                   bdrv_get_aio_context(bs),
203                   clock_type,
204                   bdrv_throttle_read_timer_cb,
205                   bdrv_throttle_write_timer_cb,
206                   bs);
207     bs->io_limits_enabled = true;
208 }
209 
210 /* This function makes an IO wait if needed
211  *
212  * @nb_sectors: the number of sectors of the IO
213  * @is_write:   is the IO a write
214  */
215 static void bdrv_io_limits_intercept(BlockDriverState *bs,
216                                      unsigned int bytes,
217                                      bool is_write)
218 {
219     /* does this io must wait */
220     bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
221 
222     /* if must wait or any request of this type throttled queue the IO */
223     if (must_wait ||
224         !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
225         qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
226     }
227 
228     /* the IO will be executed, do the accounting */
229     throttle_account(&bs->throttle_state, is_write, bytes);
230 
231 
232     /* if the next request must wait -> do nothing */
233     if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
234         return;
235     }
236 
237     /* else queue next request for execution */
238     qemu_co_queue_next(&bs->throttled_reqs[is_write]);
239 }
240 
241 size_t bdrv_opt_mem_align(BlockDriverState *bs)
242 {
243     if (!bs || !bs->drv) {
244         /* 4k should be on the safe side */
245         return 4096;
246     }
247 
248     return bs->bl.opt_mem_alignment;
249 }
250 
251 /* check if the path starts with "<protocol>:" */
252 int path_has_protocol(const char *path)
253 {
254     const char *p;
255 
256 #ifdef _WIN32
257     if (is_windows_drive(path) ||
258         is_windows_drive_prefix(path)) {
259         return 0;
260     }
261     p = path + strcspn(path, ":/\\");
262 #else
263     p = path + strcspn(path, ":/");
264 #endif
265 
266     return *p == ':';
267 }
268 
269 int path_is_absolute(const char *path)
270 {
271 #ifdef _WIN32
272     /* specific case for names like: "\\.\d:" */
273     if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
274         return 1;
275     }
276     return (*path == '/' || *path == '\\');
277 #else
278     return (*path == '/');
279 #endif
280 }
281 
282 /* if filename is absolute, just copy it to dest. Otherwise, build a
283    path to it by considering it is relative to base_path. URL are
284    supported. */
285 void path_combine(char *dest, int dest_size,
286                   const char *base_path,
287                   const char *filename)
288 {
289     const char *p, *p1;
290     int len;
291 
292     if (dest_size <= 0)
293         return;
294     if (path_is_absolute(filename)) {
295         pstrcpy(dest, dest_size, filename);
296     } else {
297         p = strchr(base_path, ':');
298         if (p)
299             p++;
300         else
301             p = base_path;
302         p1 = strrchr(base_path, '/');
303 #ifdef _WIN32
304         {
305             const char *p2;
306             p2 = strrchr(base_path, '\\');
307             if (!p1 || p2 > p1)
308                 p1 = p2;
309         }
310 #endif
311         if (p1)
312             p1++;
313         else
314             p1 = base_path;
315         if (p1 > p)
316             p = p1;
317         len = p - base_path;
318         if (len > dest_size - 1)
319             len = dest_size - 1;
320         memcpy(dest, base_path, len);
321         dest[len] = '\0';
322         pstrcat(dest, dest_size, filename);
323     }
324 }
325 
326 void bdrv_get_full_backing_filename_from_filename(const char *backed,
327                                                   const char *backing,
328                                                   char *dest, size_t sz,
329                                                   Error **errp)
330 {
331     if (backing[0] == '\0' || path_has_protocol(backing) ||
332         path_is_absolute(backing))
333     {
334         pstrcpy(dest, sz, backing);
335     } else if (backed[0] == '\0' || strstart(backed, "json:", NULL)) {
336         error_setg(errp, "Cannot use relative backing file names for '%s'",
337                    backed);
338     } else {
339         path_combine(dest, sz, backed, backing);
340     }
341 }
342 
343 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz,
344                                     Error **errp)
345 {
346     char *backed = bs->exact_filename[0] ? bs->exact_filename : bs->filename;
347 
348     bdrv_get_full_backing_filename_from_filename(backed, bs->backing_file,
349                                                  dest, sz, errp);
350 }
351 
352 void bdrv_setup_io_funcs(BlockDriver *bdrv)
353 {
354     /* Block drivers without coroutine functions need emulation */
355     if (!bdrv->bdrv_co_readv) {
356         bdrv->bdrv_co_readv = bdrv_co_readv_em;
357         bdrv->bdrv_co_writev = bdrv_co_writev_em;
358 
359         /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
360          * the block driver lacks aio we need to emulate that too.
361          */
362         if (!bdrv->bdrv_aio_readv) {
363             /* add AIO emulation layer */
364             bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
365             bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
366         }
367     }
368 }
369 
370 void bdrv_register(BlockDriver *bdrv)
371 {
372     bdrv_setup_io_funcs(bdrv);
373 
374     QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
375 }
376 
377 BlockDriverState *bdrv_new_root(void)
378 {
379     BlockDriverState *bs = bdrv_new();
380 
381     QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
382     return bs;
383 }
384 
385 BlockDriverState *bdrv_new(void)
386 {
387     BlockDriverState *bs;
388     int i;
389 
390     bs = g_new0(BlockDriverState, 1);
391     QLIST_INIT(&bs->dirty_bitmaps);
392     for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
393         QLIST_INIT(&bs->op_blockers[i]);
394     }
395     bdrv_iostatus_disable(bs);
396     notifier_list_init(&bs->close_notifiers);
397     notifier_with_return_list_init(&bs->before_write_notifiers);
398     qemu_co_queue_init(&bs->throttled_reqs[0]);
399     qemu_co_queue_init(&bs->throttled_reqs[1]);
400     bs->refcnt = 1;
401     bs->aio_context = qemu_get_aio_context();
402 
403     return bs;
404 }
405 
406 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
407 {
408     notifier_list_add(&bs->close_notifiers, notify);
409 }
410 
411 BlockDriver *bdrv_find_format(const char *format_name)
412 {
413     BlockDriver *drv1;
414     QLIST_FOREACH(drv1, &bdrv_drivers, list) {
415         if (!strcmp(drv1->format_name, format_name)) {
416             return drv1;
417         }
418     }
419     return NULL;
420 }
421 
422 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
423 {
424     static const char *whitelist_rw[] = {
425         CONFIG_BDRV_RW_WHITELIST
426     };
427     static const char *whitelist_ro[] = {
428         CONFIG_BDRV_RO_WHITELIST
429     };
430     const char **p;
431 
432     if (!whitelist_rw[0] && !whitelist_ro[0]) {
433         return 1;               /* no whitelist, anything goes */
434     }
435 
436     for (p = whitelist_rw; *p; p++) {
437         if (!strcmp(drv->format_name, *p)) {
438             return 1;
439         }
440     }
441     if (read_only) {
442         for (p = whitelist_ro; *p; p++) {
443             if (!strcmp(drv->format_name, *p)) {
444                 return 1;
445             }
446         }
447     }
448     return 0;
449 }
450 
451 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
452                                           bool read_only)
453 {
454     BlockDriver *drv = bdrv_find_format(format_name);
455     return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
456 }
457 
458 typedef struct CreateCo {
459     BlockDriver *drv;
460     char *filename;
461     QemuOpts *opts;
462     int ret;
463     Error *err;
464 } CreateCo;
465 
466 static void coroutine_fn bdrv_create_co_entry(void *opaque)
467 {
468     Error *local_err = NULL;
469     int ret;
470 
471     CreateCo *cco = opaque;
472     assert(cco->drv);
473 
474     ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
475     if (local_err) {
476         error_propagate(&cco->err, local_err);
477     }
478     cco->ret = ret;
479 }
480 
481 int bdrv_create(BlockDriver *drv, const char* filename,
482                 QemuOpts *opts, Error **errp)
483 {
484     int ret;
485 
486     Coroutine *co;
487     CreateCo cco = {
488         .drv = drv,
489         .filename = g_strdup(filename),
490         .opts = opts,
491         .ret = NOT_DONE,
492         .err = NULL,
493     };
494 
495     if (!drv->bdrv_create) {
496         error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
497         ret = -ENOTSUP;
498         goto out;
499     }
500 
501     if (qemu_in_coroutine()) {
502         /* Fast-path if already in coroutine context */
503         bdrv_create_co_entry(&cco);
504     } else {
505         co = qemu_coroutine_create(bdrv_create_co_entry);
506         qemu_coroutine_enter(co, &cco);
507         while (cco.ret == NOT_DONE) {
508             aio_poll(qemu_get_aio_context(), true);
509         }
510     }
511 
512     ret = cco.ret;
513     if (ret < 0) {
514         if (cco.err) {
515             error_propagate(errp, cco.err);
516         } else {
517             error_setg_errno(errp, -ret, "Could not create image");
518         }
519     }
520 
521 out:
522     g_free(cco.filename);
523     return ret;
524 }
525 
526 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
527 {
528     BlockDriver *drv;
529     Error *local_err = NULL;
530     int ret;
531 
532     drv = bdrv_find_protocol(filename, true, errp);
533     if (drv == NULL) {
534         return -ENOENT;
535     }
536 
537     ret = bdrv_create(drv, filename, opts, &local_err);
538     if (local_err) {
539         error_propagate(errp, local_err);
540     }
541     return ret;
542 }
543 
544 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
545 {
546     BlockDriver *drv = bs->drv;
547     Error *local_err = NULL;
548 
549     memset(&bs->bl, 0, sizeof(bs->bl));
550 
551     if (!drv) {
552         return;
553     }
554 
555     /* Take some limits from the children as a default */
556     if (bs->file) {
557         bdrv_refresh_limits(bs->file, &local_err);
558         if (local_err) {
559             error_propagate(errp, local_err);
560             return;
561         }
562         bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
563         bs->bl.max_transfer_length = bs->file->bl.max_transfer_length;
564         bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
565     } else {
566         bs->bl.opt_mem_alignment = 512;
567     }
568 
569     if (bs->backing_hd) {
570         bdrv_refresh_limits(bs->backing_hd, &local_err);
571         if (local_err) {
572             error_propagate(errp, local_err);
573             return;
574         }
575         bs->bl.opt_transfer_length =
576             MAX(bs->bl.opt_transfer_length,
577                 bs->backing_hd->bl.opt_transfer_length);
578         bs->bl.max_transfer_length =
579             MIN_NON_ZERO(bs->bl.max_transfer_length,
580                          bs->backing_hd->bl.max_transfer_length);
581         bs->bl.opt_mem_alignment =
582             MAX(bs->bl.opt_mem_alignment,
583                 bs->backing_hd->bl.opt_mem_alignment);
584     }
585 
586     /* Then let the driver override it */
587     if (drv->bdrv_refresh_limits) {
588         drv->bdrv_refresh_limits(bs, errp);
589     }
590 }
591 
592 /**
593  * Try to get @bs's logical and physical block size.
594  * On success, store them in @bsz struct and return 0.
595  * On failure return -errno.
596  * @bs must not be empty.
597  */
598 int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
599 {
600     BlockDriver *drv = bs->drv;
601 
602     if (drv && drv->bdrv_probe_blocksizes) {
603         return drv->bdrv_probe_blocksizes(bs, bsz);
604     }
605 
606     return -ENOTSUP;
607 }
608 
609 /**
610  * Try to get @bs's geometry (cyls, heads, sectors).
611  * On success, store them in @geo struct and return 0.
612  * On failure return -errno.
613  * @bs must not be empty.
614  */
615 int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
616 {
617     BlockDriver *drv = bs->drv;
618 
619     if (drv && drv->bdrv_probe_geometry) {
620         return drv->bdrv_probe_geometry(bs, geo);
621     }
622 
623     return -ENOTSUP;
624 }
625 
626 /*
627  * Create a uniquely-named empty temporary file.
628  * Return 0 upon success, otherwise a negative errno value.
629  */
630 int get_tmp_filename(char *filename, int size)
631 {
632 #ifdef _WIN32
633     char temp_dir[MAX_PATH];
634     /* GetTempFileName requires that its output buffer (4th param)
635        have length MAX_PATH or greater.  */
636     assert(size >= MAX_PATH);
637     return (GetTempPath(MAX_PATH, temp_dir)
638             && GetTempFileName(temp_dir, "qem", 0, filename)
639             ? 0 : -GetLastError());
640 #else
641     int fd;
642     const char *tmpdir;
643     tmpdir = getenv("TMPDIR");
644     if (!tmpdir) {
645         tmpdir = "/var/tmp";
646     }
647     if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
648         return -EOVERFLOW;
649     }
650     fd = mkstemp(filename);
651     if (fd < 0) {
652         return -errno;
653     }
654     if (close(fd) != 0) {
655         unlink(filename);
656         return -errno;
657     }
658     return 0;
659 #endif
660 }
661 
662 /*
663  * Detect host devices. By convention, /dev/cdrom[N] is always
664  * recognized as a host CDROM.
665  */
666 static BlockDriver *find_hdev_driver(const char *filename)
667 {
668     int score_max = 0, score;
669     BlockDriver *drv = NULL, *d;
670 
671     QLIST_FOREACH(d, &bdrv_drivers, list) {
672         if (d->bdrv_probe_device) {
673             score = d->bdrv_probe_device(filename);
674             if (score > score_max) {
675                 score_max = score;
676                 drv = d;
677             }
678         }
679     }
680 
681     return drv;
682 }
683 
684 BlockDriver *bdrv_find_protocol(const char *filename,
685                                 bool allow_protocol_prefix,
686                                 Error **errp)
687 {
688     BlockDriver *drv1;
689     char protocol[128];
690     int len;
691     const char *p;
692 
693     /* TODO Drivers without bdrv_file_open must be specified explicitly */
694 
695     /*
696      * XXX(hch): we really should not let host device detection
697      * override an explicit protocol specification, but moving this
698      * later breaks access to device names with colons in them.
699      * Thanks to the brain-dead persistent naming schemes on udev-
700      * based Linux systems those actually are quite common.
701      */
702     drv1 = find_hdev_driver(filename);
703     if (drv1) {
704         return drv1;
705     }
706 
707     if (!path_has_protocol(filename) || !allow_protocol_prefix) {
708         return &bdrv_file;
709     }
710 
711     p = strchr(filename, ':');
712     assert(p != NULL);
713     len = p - filename;
714     if (len > sizeof(protocol) - 1)
715         len = sizeof(protocol) - 1;
716     memcpy(protocol, filename, len);
717     protocol[len] = '\0';
718     QLIST_FOREACH(drv1, &bdrv_drivers, list) {
719         if (drv1->protocol_name &&
720             !strcmp(drv1->protocol_name, protocol)) {
721             return drv1;
722         }
723     }
724 
725     error_setg(errp, "Unknown protocol '%s'", protocol);
726     return NULL;
727 }
728 
729 /*
730  * Guess image format by probing its contents.
731  * This is not a good idea when your image is raw (CVE-2008-2004), but
732  * we do it anyway for backward compatibility.
733  *
734  * @buf         contains the image's first @buf_size bytes.
735  * @buf_size    is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE,
736  *              but can be smaller if the image file is smaller)
737  * @filename    is its filename.
738  *
739  * For all block drivers, call the bdrv_probe() method to get its
740  * probing score.
741  * Return the first block driver with the highest probing score.
742  */
743 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
744                             const char *filename)
745 {
746     int score_max = 0, score;
747     BlockDriver *drv = NULL, *d;
748 
749     QLIST_FOREACH(d, &bdrv_drivers, list) {
750         if (d->bdrv_probe) {
751             score = d->bdrv_probe(buf, buf_size, filename);
752             if (score > score_max) {
753                 score_max = score;
754                 drv = d;
755             }
756         }
757     }
758 
759     return drv;
760 }
761 
762 static int find_image_format(BlockDriverState *bs, const char *filename,
763                              BlockDriver **pdrv, Error **errp)
764 {
765     BlockDriver *drv;
766     uint8_t buf[BLOCK_PROBE_BUF_SIZE];
767     int ret = 0;
768 
769     /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
770     if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
771         *pdrv = &bdrv_raw;
772         return ret;
773     }
774 
775     ret = bdrv_pread(bs, 0, buf, sizeof(buf));
776     if (ret < 0) {
777         error_setg_errno(errp, -ret, "Could not read image for determining its "
778                          "format");
779         *pdrv = NULL;
780         return ret;
781     }
782 
783     drv = bdrv_probe_all(buf, ret, filename);
784     if (!drv) {
785         error_setg(errp, "Could not determine image format: No compatible "
786                    "driver found");
787         ret = -ENOENT;
788     }
789     *pdrv = drv;
790     return ret;
791 }
792 
793 /**
794  * Set the current 'total_sectors' value
795  * Return 0 on success, -errno on error.
796  */
797 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
798 {
799     BlockDriver *drv = bs->drv;
800 
801     /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
802     if (bs->sg)
803         return 0;
804 
805     /* query actual device if possible, otherwise just trust the hint */
806     if (drv->bdrv_getlength) {
807         int64_t length = drv->bdrv_getlength(bs);
808         if (length < 0) {
809             return length;
810         }
811         hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
812     }
813 
814     bs->total_sectors = hint;
815     return 0;
816 }
817 
818 /**
819  * Set open flags for a given discard mode
820  *
821  * Return 0 on success, -1 if the discard mode was invalid.
822  */
823 int bdrv_parse_discard_flags(const char *mode, int *flags)
824 {
825     *flags &= ~BDRV_O_UNMAP;
826 
827     if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
828         /* do nothing */
829     } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
830         *flags |= BDRV_O_UNMAP;
831     } else {
832         return -1;
833     }
834 
835     return 0;
836 }
837 
838 /**
839  * Set open flags for a given cache mode
840  *
841  * Return 0 on success, -1 if the cache mode was invalid.
842  */
843 int bdrv_parse_cache_flags(const char *mode, int *flags)
844 {
845     *flags &= ~BDRV_O_CACHE_MASK;
846 
847     if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
848         *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
849     } else if (!strcmp(mode, "directsync")) {
850         *flags |= BDRV_O_NOCACHE;
851     } else if (!strcmp(mode, "writeback")) {
852         *flags |= BDRV_O_CACHE_WB;
853     } else if (!strcmp(mode, "unsafe")) {
854         *flags |= BDRV_O_CACHE_WB;
855         *flags |= BDRV_O_NO_FLUSH;
856     } else if (!strcmp(mode, "writethrough")) {
857         /* this is the default */
858     } else {
859         return -1;
860     }
861 
862     return 0;
863 }
864 
865 /**
866  * The copy-on-read flag is actually a reference count so multiple users may
867  * use the feature without worrying about clobbering its previous state.
868  * Copy-on-read stays enabled until all users have called to disable it.
869  */
870 void bdrv_enable_copy_on_read(BlockDriverState *bs)
871 {
872     bs->copy_on_read++;
873 }
874 
875 void bdrv_disable_copy_on_read(BlockDriverState *bs)
876 {
877     assert(bs->copy_on_read > 0);
878     bs->copy_on_read--;
879 }
880 
881 /*
882  * Returns the flags that a temporary snapshot should get, based on the
883  * originally requested flags (the originally requested image will have flags
884  * like a backing file)
885  */
886 static int bdrv_temp_snapshot_flags(int flags)
887 {
888     return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
889 }
890 
891 /*
892  * Returns the flags that bs->file should get, based on the given flags for
893  * the parent BDS
894  */
895 static int bdrv_inherited_flags(int flags)
896 {
897     /* Enable protocol handling, disable format probing for bs->file */
898     flags |= BDRV_O_PROTOCOL;
899 
900     /* Our block drivers take care to send flushes and respect unmap policy,
901      * so we can enable both unconditionally on lower layers. */
902     flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
903 
904     /* Clear flags that only apply to the top layer */
905     flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
906 
907     return flags;
908 }
909 
910 /*
911  * Returns the flags that bs->backing_hd should get, based on the given flags
912  * for the parent BDS
913  */
914 static int bdrv_backing_flags(int flags)
915 {
916     /* backing files always opened read-only */
917     flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
918 
919     /* snapshot=on is handled on the top layer */
920     flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
921 
922     return flags;
923 }
924 
925 static int bdrv_open_flags(BlockDriverState *bs, int flags)
926 {
927     int open_flags = flags | BDRV_O_CACHE_WB;
928 
929     /*
930      * Clear flags that are internal to the block layer before opening the
931      * image.
932      */
933     open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL);
934 
935     /*
936      * Snapshots should be writable.
937      */
938     if (flags & BDRV_O_TEMPORARY) {
939         open_flags |= BDRV_O_RDWR;
940     }
941 
942     return open_flags;
943 }
944 
945 static void bdrv_assign_node_name(BlockDriverState *bs,
946                                   const char *node_name,
947                                   Error **errp)
948 {
949     if (!node_name) {
950         return;
951     }
952 
953     /* Check for empty string or invalid characters */
954     if (!id_wellformed(node_name)) {
955         error_setg(errp, "Invalid node name");
956         return;
957     }
958 
959     /* takes care of avoiding namespaces collisions */
960     if (blk_by_name(node_name)) {
961         error_setg(errp, "node-name=%s is conflicting with a device id",
962                    node_name);
963         return;
964     }
965 
966     /* takes care of avoiding duplicates node names */
967     if (bdrv_find_node(node_name)) {
968         error_setg(errp, "Duplicate node name");
969         return;
970     }
971 
972     /* copy node name into the bs and insert it into the graph list */
973     pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
974     QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
975 }
976 
977 /*
978  * Common part for opening disk images and files
979  *
980  * Removes all processed options from *options.
981  */
982 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
983     QDict *options, int flags, BlockDriver *drv, Error **errp)
984 {
985     int ret, open_flags;
986     const char *filename;
987     const char *node_name = NULL;
988     Error *local_err = NULL;
989 
990     assert(drv != NULL);
991     assert(bs->file == NULL);
992     assert(options != NULL && bs->options != options);
993 
994     if (file != NULL) {
995         filename = file->filename;
996     } else {
997         filename = qdict_get_try_str(options, "filename");
998     }
999 
1000     if (drv->bdrv_needs_filename && !filename) {
1001         error_setg(errp, "The '%s' block driver requires a file name",
1002                    drv->format_name);
1003         return -EINVAL;
1004     }
1005 
1006     trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
1007 
1008     node_name = qdict_get_try_str(options, "node-name");
1009     bdrv_assign_node_name(bs, node_name, &local_err);
1010     if (local_err) {
1011         error_propagate(errp, local_err);
1012         return -EINVAL;
1013     }
1014     qdict_del(options, "node-name");
1015 
1016     /* bdrv_open() with directly using a protocol as drv. This layer is already
1017      * opened, so assign it to bs (while file becomes a closed BlockDriverState)
1018      * and return immediately. */
1019     if (file != NULL && drv->bdrv_file_open) {
1020         bdrv_swap(file, bs);
1021         return 0;
1022     }
1023 
1024     bs->open_flags = flags;
1025     bs->guest_block_size = 512;
1026     bs->request_alignment = 512;
1027     bs->zero_beyond_eof = true;
1028     open_flags = bdrv_open_flags(bs, flags);
1029     bs->read_only = !(open_flags & BDRV_O_RDWR);
1030 
1031     if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
1032         error_setg(errp,
1033                    !bs->read_only && bdrv_is_whitelisted(drv, true)
1034                         ? "Driver '%s' can only be used for read-only devices"
1035                         : "Driver '%s' is not whitelisted",
1036                    drv->format_name);
1037         return -ENOTSUP;
1038     }
1039 
1040     assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
1041     if (flags & BDRV_O_COPY_ON_READ) {
1042         if (!bs->read_only) {
1043             bdrv_enable_copy_on_read(bs);
1044         } else {
1045             error_setg(errp, "Can't use copy-on-read on read-only device");
1046             return -EINVAL;
1047         }
1048     }
1049 
1050     if (filename != NULL) {
1051         pstrcpy(bs->filename, sizeof(bs->filename), filename);
1052     } else {
1053         bs->filename[0] = '\0';
1054     }
1055     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename);
1056 
1057     bs->drv = drv;
1058     bs->opaque = g_malloc0(drv->instance_size);
1059 
1060     bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
1061 
1062     /* Open the image, either directly or using a protocol */
1063     if (drv->bdrv_file_open) {
1064         assert(file == NULL);
1065         assert(!drv->bdrv_needs_filename || filename != NULL);
1066         ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
1067     } else {
1068         if (file == NULL) {
1069             error_setg(errp, "Can't use '%s' as a block driver for the "
1070                        "protocol level", drv->format_name);
1071             ret = -EINVAL;
1072             goto free_and_fail;
1073         }
1074         bs->file = file;
1075         ret = drv->bdrv_open(bs, options, open_flags, &local_err);
1076     }
1077 
1078     if (ret < 0) {
1079         if (local_err) {
1080             error_propagate(errp, local_err);
1081         } else if (bs->filename[0]) {
1082             error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
1083         } else {
1084             error_setg_errno(errp, -ret, "Could not open image");
1085         }
1086         goto free_and_fail;
1087     }
1088 
1089     if (bs->encrypted) {
1090         error_report("Encrypted images are deprecated");
1091         error_printf("Support for them will be removed in a future release.\n"
1092                      "You can use 'qemu-img convert' to convert your image"
1093                      " to an unencrypted one.\n");
1094     }
1095 
1096     ret = refresh_total_sectors(bs, bs->total_sectors);
1097     if (ret < 0) {
1098         error_setg_errno(errp, -ret, "Could not refresh total sector count");
1099         goto free_and_fail;
1100     }
1101 
1102     bdrv_refresh_limits(bs, &local_err);
1103     if (local_err) {
1104         error_propagate(errp, local_err);
1105         ret = -EINVAL;
1106         goto free_and_fail;
1107     }
1108 
1109     assert(bdrv_opt_mem_align(bs) != 0);
1110     assert((bs->request_alignment != 0) || bs->sg);
1111     return 0;
1112 
1113 free_and_fail:
1114     bs->file = NULL;
1115     g_free(bs->opaque);
1116     bs->opaque = NULL;
1117     bs->drv = NULL;
1118     return ret;
1119 }
1120 
1121 static QDict *parse_json_filename(const char *filename, Error **errp)
1122 {
1123     QObject *options_obj;
1124     QDict *options;
1125     int ret;
1126 
1127     ret = strstart(filename, "json:", &filename);
1128     assert(ret);
1129 
1130     options_obj = qobject_from_json(filename);
1131     if (!options_obj) {
1132         error_setg(errp, "Could not parse the JSON options");
1133         return NULL;
1134     }
1135 
1136     if (qobject_type(options_obj) != QTYPE_QDICT) {
1137         qobject_decref(options_obj);
1138         error_setg(errp, "Invalid JSON object given");
1139         return NULL;
1140     }
1141 
1142     options = qobject_to_qdict(options_obj);
1143     qdict_flatten(options);
1144 
1145     return options;
1146 }
1147 
1148 /*
1149  * Fills in default options for opening images and converts the legacy
1150  * filename/flags pair to option QDict entries.
1151  */
1152 static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
1153                              BlockDriver *drv, Error **errp)
1154 {
1155     const char *filename = *pfilename;
1156     const char *drvname;
1157     bool protocol = flags & BDRV_O_PROTOCOL;
1158     bool parse_filename = false;
1159     Error *local_err = NULL;
1160 
1161     /* Parse json: pseudo-protocol */
1162     if (filename && g_str_has_prefix(filename, "json:")) {
1163         QDict *json_options = parse_json_filename(filename, &local_err);
1164         if (local_err) {
1165             error_propagate(errp, local_err);
1166             return -EINVAL;
1167         }
1168 
1169         /* Options given in the filename have lower priority than options
1170          * specified directly */
1171         qdict_join(*options, json_options, false);
1172         QDECREF(json_options);
1173         *pfilename = filename = NULL;
1174     }
1175 
1176     /* Fetch the file name from the options QDict if necessary */
1177     if (protocol && filename) {
1178         if (!qdict_haskey(*options, "filename")) {
1179             qdict_put(*options, "filename", qstring_from_str(filename));
1180             parse_filename = true;
1181         } else {
1182             error_setg(errp, "Can't specify 'file' and 'filename' options at "
1183                              "the same time");
1184             return -EINVAL;
1185         }
1186     }
1187 
1188     /* Find the right block driver */
1189     filename = qdict_get_try_str(*options, "filename");
1190     drvname = qdict_get_try_str(*options, "driver");
1191 
1192     if (drv) {
1193         if (drvname) {
1194             error_setg(errp, "Driver specified twice");
1195             return -EINVAL;
1196         }
1197         drvname = drv->format_name;
1198         qdict_put(*options, "driver", qstring_from_str(drvname));
1199     } else {
1200         if (!drvname && protocol) {
1201             if (filename) {
1202                 drv = bdrv_find_protocol(filename, parse_filename, errp);
1203                 if (!drv) {
1204                     return -EINVAL;
1205                 }
1206 
1207                 drvname = drv->format_name;
1208                 qdict_put(*options, "driver", qstring_from_str(drvname));
1209             } else {
1210                 error_setg(errp, "Must specify either driver or file");
1211                 return -EINVAL;
1212             }
1213         } else if (drvname) {
1214             drv = bdrv_find_format(drvname);
1215             if (!drv) {
1216                 error_setg(errp, "Unknown driver '%s'", drvname);
1217                 return -ENOENT;
1218             }
1219         }
1220     }
1221 
1222     assert(drv || !protocol);
1223 
1224     /* Driver-specific filename parsing */
1225     if (drv && drv->bdrv_parse_filename && parse_filename) {
1226         drv->bdrv_parse_filename(filename, *options, &local_err);
1227         if (local_err) {
1228             error_propagate(errp, local_err);
1229             return -EINVAL;
1230         }
1231 
1232         if (!drv->bdrv_needs_filename) {
1233             qdict_del(*options, "filename");
1234         }
1235     }
1236 
1237     return 0;
1238 }
1239 
1240 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1241 {
1242 
1243     if (bs->backing_hd) {
1244         assert(bs->backing_blocker);
1245         bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1246     } else if (backing_hd) {
1247         error_setg(&bs->backing_blocker,
1248                    "node is used as backing hd of '%s'",
1249                    bdrv_get_device_or_node_name(bs));
1250     }
1251 
1252     bs->backing_hd = backing_hd;
1253     if (!backing_hd) {
1254         error_free(bs->backing_blocker);
1255         bs->backing_blocker = NULL;
1256         goto out;
1257     }
1258     bs->open_flags &= ~BDRV_O_NO_BACKING;
1259     pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1260     pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1261             backing_hd->drv ? backing_hd->drv->format_name : "");
1262 
1263     bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1264     /* Otherwise we won't be able to commit due to check in bdrv_commit */
1265     bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET,
1266                     bs->backing_blocker);
1267 out:
1268     bdrv_refresh_limits(bs, NULL);
1269 }
1270 
1271 /*
1272  * Opens the backing file for a BlockDriverState if not yet open
1273  *
1274  * options is a QDict of options to pass to the block drivers, or NULL for an
1275  * empty set of options. The reference to the QDict is transferred to this
1276  * function (even on failure), so if the caller intends to reuse the dictionary,
1277  * it needs to use QINCREF() before calling bdrv_file_open.
1278  */
1279 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1280 {
1281     char *backing_filename = g_malloc0(PATH_MAX);
1282     int ret = 0;
1283     BlockDriverState *backing_hd;
1284     Error *local_err = NULL;
1285 
1286     if (bs->backing_hd != NULL) {
1287         QDECREF(options);
1288         goto free_exit;
1289     }
1290 
1291     /* NULL means an empty set of options */
1292     if (options == NULL) {
1293         options = qdict_new();
1294     }
1295 
1296     bs->open_flags &= ~BDRV_O_NO_BACKING;
1297     if (qdict_haskey(options, "file.filename")) {
1298         backing_filename[0] = '\0';
1299     } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1300         QDECREF(options);
1301         goto free_exit;
1302     } else {
1303         bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX,
1304                                        &local_err);
1305         if (local_err) {
1306             ret = -EINVAL;
1307             error_propagate(errp, local_err);
1308             QDECREF(options);
1309             goto free_exit;
1310         }
1311     }
1312 
1313     if (!bs->drv || !bs->drv->supports_backing) {
1314         ret = -EINVAL;
1315         error_setg(errp, "Driver doesn't support backing files");
1316         QDECREF(options);
1317         goto free_exit;
1318     }
1319 
1320     backing_hd = bdrv_new();
1321 
1322     if (bs->backing_format[0] != '\0' && !qdict_haskey(options, "driver")) {
1323         qdict_put(options, "driver", qstring_from_str(bs->backing_format));
1324     }
1325 
1326     assert(bs->backing_hd == NULL);
1327     ret = bdrv_open(&backing_hd,
1328                     *backing_filename ? backing_filename : NULL, NULL, options,
1329                     bdrv_backing_flags(bs->open_flags), NULL, &local_err);
1330     if (ret < 0) {
1331         bdrv_unref(backing_hd);
1332         backing_hd = NULL;
1333         bs->open_flags |= BDRV_O_NO_BACKING;
1334         error_setg(errp, "Could not open backing file: %s",
1335                    error_get_pretty(local_err));
1336         error_free(local_err);
1337         goto free_exit;
1338     }
1339     bdrv_set_backing_hd(bs, backing_hd);
1340 
1341 free_exit:
1342     g_free(backing_filename);
1343     return ret;
1344 }
1345 
1346 /*
1347  * Opens a disk image whose options are given as BlockdevRef in another block
1348  * device's options.
1349  *
1350  * If allow_none is true, no image will be opened if filename is false and no
1351  * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1352  *
1353  * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1354  * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1355  * itself, all options starting with "${bdref_key}." are considered part of the
1356  * BlockdevRef.
1357  *
1358  * The BlockdevRef will be removed from the options QDict.
1359  *
1360  * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1361  */
1362 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1363                     QDict *options, const char *bdref_key, int flags,
1364                     bool allow_none, Error **errp)
1365 {
1366     QDict *image_options;
1367     int ret;
1368     char *bdref_key_dot;
1369     const char *reference;
1370 
1371     assert(pbs);
1372     assert(*pbs == NULL);
1373 
1374     bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1375     qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1376     g_free(bdref_key_dot);
1377 
1378     reference = qdict_get_try_str(options, bdref_key);
1379     if (!filename && !reference && !qdict_size(image_options)) {
1380         if (allow_none) {
1381             ret = 0;
1382         } else {
1383             error_setg(errp, "A block device must be specified for \"%s\"",
1384                        bdref_key);
1385             ret = -EINVAL;
1386         }
1387         QDECREF(image_options);
1388         goto done;
1389     }
1390 
1391     ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1392 
1393 done:
1394     qdict_del(options, bdref_key);
1395     return ret;
1396 }
1397 
1398 int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1399 {
1400     /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1401     char *tmp_filename = g_malloc0(PATH_MAX + 1);
1402     int64_t total_size;
1403     QemuOpts *opts = NULL;
1404     QDict *snapshot_options;
1405     BlockDriverState *bs_snapshot;
1406     Error *local_err;
1407     int ret;
1408 
1409     /* if snapshot, we create a temporary backing file and open it
1410        instead of opening 'filename' directly */
1411 
1412     /* Get the required size from the image */
1413     total_size = bdrv_getlength(bs);
1414     if (total_size < 0) {
1415         ret = total_size;
1416         error_setg_errno(errp, -total_size, "Could not get image size");
1417         goto out;
1418     }
1419 
1420     /* Create the temporary image */
1421     ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1422     if (ret < 0) {
1423         error_setg_errno(errp, -ret, "Could not get temporary filename");
1424         goto out;
1425     }
1426 
1427     opts = qemu_opts_create(bdrv_qcow2.create_opts, NULL, 0,
1428                             &error_abort);
1429     qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size, &error_abort);
1430     ret = bdrv_create(&bdrv_qcow2, tmp_filename, opts, &local_err);
1431     qemu_opts_del(opts);
1432     if (ret < 0) {
1433         error_setg_errno(errp, -ret, "Could not create temporary overlay "
1434                          "'%s': %s", tmp_filename,
1435                          error_get_pretty(local_err));
1436         error_free(local_err);
1437         goto out;
1438     }
1439 
1440     /* Prepare a new options QDict for the temporary file */
1441     snapshot_options = qdict_new();
1442     qdict_put(snapshot_options, "file.driver",
1443               qstring_from_str("file"));
1444     qdict_put(snapshot_options, "file.filename",
1445               qstring_from_str(tmp_filename));
1446 
1447     bs_snapshot = bdrv_new();
1448 
1449     ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1450                     flags, &bdrv_qcow2, &local_err);
1451     if (ret < 0) {
1452         error_propagate(errp, local_err);
1453         goto out;
1454     }
1455 
1456     bdrv_append(bs_snapshot, bs);
1457 
1458 out:
1459     g_free(tmp_filename);
1460     return ret;
1461 }
1462 
1463 /*
1464  * Opens a disk image (raw, qcow2, vmdk, ...)
1465  *
1466  * options is a QDict of options to pass to the block drivers, or NULL for an
1467  * empty set of options. The reference to the QDict belongs to the block layer
1468  * after the call (even on failure), so if the caller intends to reuse the
1469  * dictionary, it needs to use QINCREF() before calling bdrv_open.
1470  *
1471  * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1472  * If it is not NULL, the referenced BDS will be reused.
1473  *
1474  * The reference parameter may be used to specify an existing block device which
1475  * should be opened. If specified, neither options nor a filename may be given,
1476  * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1477  */
1478 int bdrv_open(BlockDriverState **pbs, const char *filename,
1479               const char *reference, QDict *options, int flags,
1480               BlockDriver *drv, Error **errp)
1481 {
1482     int ret;
1483     BlockDriverState *file = NULL, *bs;
1484     const char *drvname;
1485     Error *local_err = NULL;
1486     int snapshot_flags = 0;
1487 
1488     assert(pbs);
1489 
1490     if (reference) {
1491         bool options_non_empty = options ? qdict_size(options) : false;
1492         QDECREF(options);
1493 
1494         if (*pbs) {
1495             error_setg(errp, "Cannot reuse an existing BDS when referencing "
1496                        "another block device");
1497             return -EINVAL;
1498         }
1499 
1500         if (filename || options_non_empty) {
1501             error_setg(errp, "Cannot reference an existing block device with "
1502                        "additional options or a new filename");
1503             return -EINVAL;
1504         }
1505 
1506         bs = bdrv_lookup_bs(reference, reference, errp);
1507         if (!bs) {
1508             return -ENODEV;
1509         }
1510         bdrv_ref(bs);
1511         *pbs = bs;
1512         return 0;
1513     }
1514 
1515     if (*pbs) {
1516         bs = *pbs;
1517     } else {
1518         bs = bdrv_new();
1519     }
1520 
1521     /* NULL means an empty set of options */
1522     if (options == NULL) {
1523         options = qdict_new();
1524     }
1525 
1526     ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err);
1527     if (local_err) {
1528         goto fail;
1529     }
1530 
1531     /* Find the right image format driver */
1532     drv = NULL;
1533     drvname = qdict_get_try_str(options, "driver");
1534     if (drvname) {
1535         drv = bdrv_find_format(drvname);
1536         qdict_del(options, "driver");
1537         if (!drv) {
1538             error_setg(errp, "Unknown driver: '%s'", drvname);
1539             ret = -EINVAL;
1540             goto fail;
1541         }
1542     }
1543 
1544     assert(drvname || !(flags & BDRV_O_PROTOCOL));
1545     if (drv && !drv->bdrv_file_open) {
1546         /* If the user explicitly wants a format driver here, we'll need to add
1547          * another layer for the protocol in bs->file */
1548         flags &= ~BDRV_O_PROTOCOL;
1549     }
1550 
1551     bs->options = options;
1552     options = qdict_clone_shallow(options);
1553 
1554     /* Open image file without format layer */
1555     if ((flags & BDRV_O_PROTOCOL) == 0) {
1556         if (flags & BDRV_O_RDWR) {
1557             flags |= BDRV_O_ALLOW_RDWR;
1558         }
1559         if (flags & BDRV_O_SNAPSHOT) {
1560             snapshot_flags = bdrv_temp_snapshot_flags(flags);
1561             flags = bdrv_backing_flags(flags);
1562         }
1563 
1564         assert(file == NULL);
1565         ret = bdrv_open_image(&file, filename, options, "file",
1566                               bdrv_inherited_flags(flags),
1567                               true, &local_err);
1568         if (ret < 0) {
1569             goto fail;
1570         }
1571     }
1572 
1573     /* Image format probing */
1574     bs->probed = !drv;
1575     if (!drv && file) {
1576         ret = find_image_format(file, filename, &drv, &local_err);
1577         if (ret < 0) {
1578             goto fail;
1579         }
1580     } else if (!drv) {
1581         error_setg(errp, "Must specify either driver or file");
1582         ret = -EINVAL;
1583         goto fail;
1584     }
1585 
1586     /* Open the image */
1587     ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1588     if (ret < 0) {
1589         goto fail;
1590     }
1591 
1592     if (file && (bs->file != file)) {
1593         bdrv_unref(file);
1594         file = NULL;
1595     }
1596 
1597     /* If there is a backing file, use it */
1598     if ((flags & BDRV_O_NO_BACKING) == 0) {
1599         QDict *backing_options;
1600 
1601         qdict_extract_subqdict(options, &backing_options, "backing.");
1602         ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1603         if (ret < 0) {
1604             goto close_and_fail;
1605         }
1606     }
1607 
1608     bdrv_refresh_filename(bs);
1609 
1610     /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1611      * temporary snapshot afterwards. */
1612     if (snapshot_flags) {
1613         ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1614         if (local_err) {
1615             goto close_and_fail;
1616         }
1617     }
1618 
1619     /* Check if any unknown options were used */
1620     if (options && (qdict_size(options) != 0)) {
1621         const QDictEntry *entry = qdict_first(options);
1622         if (flags & BDRV_O_PROTOCOL) {
1623             error_setg(errp, "Block protocol '%s' doesn't support the option "
1624                        "'%s'", drv->format_name, entry->key);
1625         } else {
1626             error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1627                        "support the option '%s'", drv->format_name,
1628                        bdrv_get_device_name(bs), entry->key);
1629         }
1630 
1631         ret = -EINVAL;
1632         goto close_and_fail;
1633     }
1634 
1635     if (!bdrv_key_required(bs)) {
1636         if (bs->blk) {
1637             blk_dev_change_media_cb(bs->blk, true);
1638         }
1639     } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1640                && !runstate_check(RUN_STATE_INMIGRATE)
1641                && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1642         error_setg(errp,
1643                    "Guest must be stopped for opening of encrypted image");
1644         ret = -EBUSY;
1645         goto close_and_fail;
1646     }
1647 
1648     QDECREF(options);
1649     *pbs = bs;
1650     return 0;
1651 
1652 fail:
1653     if (file != NULL) {
1654         bdrv_unref(file);
1655     }
1656     QDECREF(bs->options);
1657     QDECREF(options);
1658     bs->options = NULL;
1659     if (!*pbs) {
1660         /* If *pbs is NULL, a new BDS has been created in this function and
1661            needs to be freed now. Otherwise, it does not need to be closed,
1662            since it has not really been opened yet. */
1663         bdrv_unref(bs);
1664     }
1665     if (local_err) {
1666         error_propagate(errp, local_err);
1667     }
1668     return ret;
1669 
1670 close_and_fail:
1671     /* See fail path, but now the BDS has to be always closed */
1672     if (*pbs) {
1673         bdrv_close(bs);
1674     } else {
1675         bdrv_unref(bs);
1676     }
1677     QDECREF(options);
1678     if (local_err) {
1679         error_propagate(errp, local_err);
1680     }
1681     return ret;
1682 }
1683 
1684 typedef struct BlockReopenQueueEntry {
1685      bool prepared;
1686      BDRVReopenState state;
1687      QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1688 } BlockReopenQueueEntry;
1689 
1690 /*
1691  * Adds a BlockDriverState to a simple queue for an atomic, transactional
1692  * reopen of multiple devices.
1693  *
1694  * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1695  * already performed, or alternatively may be NULL a new BlockReopenQueue will
1696  * be created and initialized. This newly created BlockReopenQueue should be
1697  * passed back in for subsequent calls that are intended to be of the same
1698  * atomic 'set'.
1699  *
1700  * bs is the BlockDriverState to add to the reopen queue.
1701  *
1702  * flags contains the open flags for the associated bs
1703  *
1704  * returns a pointer to bs_queue, which is either the newly allocated
1705  * bs_queue, or the existing bs_queue being used.
1706  *
1707  */
1708 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1709                                     BlockDriverState *bs, int flags)
1710 {
1711     assert(bs != NULL);
1712 
1713     BlockReopenQueueEntry *bs_entry;
1714     if (bs_queue == NULL) {
1715         bs_queue = g_new0(BlockReopenQueue, 1);
1716         QSIMPLEQ_INIT(bs_queue);
1717     }
1718 
1719     /* bdrv_open() masks this flag out */
1720     flags &= ~BDRV_O_PROTOCOL;
1721 
1722     if (bs->file) {
1723         bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1724     }
1725 
1726     bs_entry = g_new0(BlockReopenQueueEntry, 1);
1727     QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1728 
1729     bs_entry->state.bs = bs;
1730     bs_entry->state.flags = flags;
1731 
1732     return bs_queue;
1733 }
1734 
1735 /*
1736  * Reopen multiple BlockDriverStates atomically & transactionally.
1737  *
1738  * The queue passed in (bs_queue) must have been built up previous
1739  * via bdrv_reopen_queue().
1740  *
1741  * Reopens all BDS specified in the queue, with the appropriate
1742  * flags.  All devices are prepared for reopen, and failure of any
1743  * device will cause all device changes to be abandonded, and intermediate
1744  * data cleaned up.
1745  *
1746  * If all devices prepare successfully, then the changes are committed
1747  * to all devices.
1748  *
1749  */
1750 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1751 {
1752     int ret = -1;
1753     BlockReopenQueueEntry *bs_entry, *next;
1754     Error *local_err = NULL;
1755 
1756     assert(bs_queue != NULL);
1757 
1758     bdrv_drain_all();
1759 
1760     QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1761         if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1762             error_propagate(errp, local_err);
1763             goto cleanup;
1764         }
1765         bs_entry->prepared = true;
1766     }
1767 
1768     /* If we reach this point, we have success and just need to apply the
1769      * changes
1770      */
1771     QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1772         bdrv_reopen_commit(&bs_entry->state);
1773     }
1774 
1775     ret = 0;
1776 
1777 cleanup:
1778     QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1779         if (ret && bs_entry->prepared) {
1780             bdrv_reopen_abort(&bs_entry->state);
1781         }
1782         g_free(bs_entry);
1783     }
1784     g_free(bs_queue);
1785     return ret;
1786 }
1787 
1788 
1789 /* Reopen a single BlockDriverState with the specified flags. */
1790 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1791 {
1792     int ret = -1;
1793     Error *local_err = NULL;
1794     BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1795 
1796     ret = bdrv_reopen_multiple(queue, &local_err);
1797     if (local_err != NULL) {
1798         error_propagate(errp, local_err);
1799     }
1800     return ret;
1801 }
1802 
1803 
1804 /*
1805  * Prepares a BlockDriverState for reopen. All changes are staged in the
1806  * 'opaque' field of the BDRVReopenState, which is used and allocated by
1807  * the block driver layer .bdrv_reopen_prepare()
1808  *
1809  * bs is the BlockDriverState to reopen
1810  * flags are the new open flags
1811  * queue is the reopen queue
1812  *
1813  * Returns 0 on success, non-zero on error.  On error errp will be set
1814  * as well.
1815  *
1816  * On failure, bdrv_reopen_abort() will be called to clean up any data.
1817  * It is the responsibility of the caller to then call the abort() or
1818  * commit() for any other BDS that have been left in a prepare() state
1819  *
1820  */
1821 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1822                         Error **errp)
1823 {
1824     int ret = -1;
1825     Error *local_err = NULL;
1826     BlockDriver *drv;
1827 
1828     assert(reopen_state != NULL);
1829     assert(reopen_state->bs->drv != NULL);
1830     drv = reopen_state->bs->drv;
1831 
1832     /* if we are to stay read-only, do not allow permission change
1833      * to r/w */
1834     if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1835         reopen_state->flags & BDRV_O_RDWR) {
1836         error_setg(errp, "Node '%s' is read only",
1837                    bdrv_get_device_or_node_name(reopen_state->bs));
1838         goto error;
1839     }
1840 
1841 
1842     ret = bdrv_flush(reopen_state->bs);
1843     if (ret) {
1844         error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1845                   strerror(-ret));
1846         goto error;
1847     }
1848 
1849     if (drv->bdrv_reopen_prepare) {
1850         ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1851         if (ret) {
1852             if (local_err != NULL) {
1853                 error_propagate(errp, local_err);
1854             } else {
1855                 error_setg(errp, "failed while preparing to reopen image '%s'",
1856                            reopen_state->bs->filename);
1857             }
1858             goto error;
1859         }
1860     } else {
1861         /* It is currently mandatory to have a bdrv_reopen_prepare()
1862          * handler for each supported drv. */
1863         error_setg(errp, "Block format '%s' used by node '%s' "
1864                    "does not support reopening files", drv->format_name,
1865                    bdrv_get_device_or_node_name(reopen_state->bs));
1866         ret = -1;
1867         goto error;
1868     }
1869 
1870     ret = 0;
1871 
1872 error:
1873     return ret;
1874 }
1875 
1876 /*
1877  * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1878  * makes them final by swapping the staging BlockDriverState contents into
1879  * the active BlockDriverState contents.
1880  */
1881 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1882 {
1883     BlockDriver *drv;
1884 
1885     assert(reopen_state != NULL);
1886     drv = reopen_state->bs->drv;
1887     assert(drv != NULL);
1888 
1889     /* If there are any driver level actions to take */
1890     if (drv->bdrv_reopen_commit) {
1891         drv->bdrv_reopen_commit(reopen_state);
1892     }
1893 
1894     /* set BDS specific flags now */
1895     reopen_state->bs->open_flags         = reopen_state->flags;
1896     reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1897                                               BDRV_O_CACHE_WB);
1898     reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1899 
1900     bdrv_refresh_limits(reopen_state->bs, NULL);
1901 }
1902 
1903 /*
1904  * Abort the reopen, and delete and free the staged changes in
1905  * reopen_state
1906  */
1907 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1908 {
1909     BlockDriver *drv;
1910 
1911     assert(reopen_state != NULL);
1912     drv = reopen_state->bs->drv;
1913     assert(drv != NULL);
1914 
1915     if (drv->bdrv_reopen_abort) {
1916         drv->bdrv_reopen_abort(reopen_state);
1917     }
1918 }
1919 
1920 
1921 void bdrv_close(BlockDriverState *bs)
1922 {
1923     BdrvAioNotifier *ban, *ban_next;
1924 
1925     if (bs->job) {
1926         block_job_cancel_sync(bs->job);
1927     }
1928     bdrv_drain_all(); /* complete I/O */
1929     bdrv_flush(bs);
1930     bdrv_drain_all(); /* in case flush left pending I/O */
1931     notifier_list_notify(&bs->close_notifiers, bs);
1932 
1933     if (bs->drv) {
1934         if (bs->backing_hd) {
1935             BlockDriverState *backing_hd = bs->backing_hd;
1936             bdrv_set_backing_hd(bs, NULL);
1937             bdrv_unref(backing_hd);
1938         }
1939         bs->drv->bdrv_close(bs);
1940         g_free(bs->opaque);
1941         bs->opaque = NULL;
1942         bs->drv = NULL;
1943         bs->copy_on_read = 0;
1944         bs->backing_file[0] = '\0';
1945         bs->backing_format[0] = '\0';
1946         bs->total_sectors = 0;
1947         bs->encrypted = 0;
1948         bs->valid_key = 0;
1949         bs->sg = 0;
1950         bs->zero_beyond_eof = false;
1951         QDECREF(bs->options);
1952         bs->options = NULL;
1953         QDECREF(bs->full_open_options);
1954         bs->full_open_options = NULL;
1955 
1956         if (bs->file != NULL) {
1957             bdrv_unref(bs->file);
1958             bs->file = NULL;
1959         }
1960     }
1961 
1962     if (bs->blk) {
1963         blk_dev_change_media_cb(bs->blk, false);
1964     }
1965 
1966     /*throttling disk I/O limits*/
1967     if (bs->io_limits_enabled) {
1968         bdrv_io_limits_disable(bs);
1969     }
1970 
1971     QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
1972         g_free(ban);
1973     }
1974     QLIST_INIT(&bs->aio_notifiers);
1975 }
1976 
1977 void bdrv_close_all(void)
1978 {
1979     BlockDriverState *bs;
1980 
1981     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1982         AioContext *aio_context = bdrv_get_aio_context(bs);
1983 
1984         aio_context_acquire(aio_context);
1985         bdrv_close(bs);
1986         aio_context_release(aio_context);
1987     }
1988 }
1989 
1990 /* Check if any requests are in-flight (including throttled requests) */
1991 static bool bdrv_requests_pending(BlockDriverState *bs)
1992 {
1993     if (!QLIST_EMPTY(&bs->tracked_requests)) {
1994         return true;
1995     }
1996     if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1997         return true;
1998     }
1999     if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
2000         return true;
2001     }
2002     if (bs->file && bdrv_requests_pending(bs->file)) {
2003         return true;
2004     }
2005     if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
2006         return true;
2007     }
2008     return false;
2009 }
2010 
2011 static bool bdrv_drain_one(BlockDriverState *bs)
2012 {
2013     bool bs_busy;
2014 
2015     bdrv_flush_io_queue(bs);
2016     bdrv_start_throttled_reqs(bs);
2017     bs_busy = bdrv_requests_pending(bs);
2018     bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy);
2019     return bs_busy;
2020 }
2021 
2022 /*
2023  * Wait for pending requests to complete on a single BlockDriverState subtree
2024  *
2025  * See the warning in bdrv_drain_all().  This function can only be called if
2026  * you are sure nothing can generate I/O because you have op blockers
2027  * installed.
2028  *
2029  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
2030  * AioContext.
2031  */
2032 void bdrv_drain(BlockDriverState *bs)
2033 {
2034     while (bdrv_drain_one(bs)) {
2035         /* Keep iterating */
2036     }
2037 }
2038 
2039 /*
2040  * Wait for pending requests to complete across all BlockDriverStates
2041  *
2042  * This function does not flush data to disk, use bdrv_flush_all() for that
2043  * after calling this function.
2044  *
2045  * Note that completion of an asynchronous I/O operation can trigger any
2046  * number of other I/O operations on other devices---for example a coroutine
2047  * can be arbitrarily complex and a constant flow of I/O can come until the
2048  * coroutine is complete.  Because of this, it is not possible to have a
2049  * function to drain a single device's I/O queue.
2050  */
2051 void bdrv_drain_all(void)
2052 {
2053     /* Always run first iteration so any pending completion BHs run */
2054     bool busy = true;
2055     BlockDriverState *bs = NULL;
2056 
2057     while ((bs = bdrv_next(bs))) {
2058         AioContext *aio_context = bdrv_get_aio_context(bs);
2059 
2060         aio_context_acquire(aio_context);
2061         if (bs->job) {
2062             block_job_pause(bs->job);
2063         }
2064         aio_context_release(aio_context);
2065     }
2066 
2067     while (busy) {
2068         busy = false;
2069         bs = NULL;
2070 
2071         while ((bs = bdrv_next(bs))) {
2072             AioContext *aio_context = bdrv_get_aio_context(bs);
2073 
2074             aio_context_acquire(aio_context);
2075             busy |= bdrv_drain_one(bs);
2076             aio_context_release(aio_context);
2077         }
2078     }
2079 
2080     bs = NULL;
2081     while ((bs = bdrv_next(bs))) {
2082         AioContext *aio_context = bdrv_get_aio_context(bs);
2083 
2084         aio_context_acquire(aio_context);
2085         if (bs->job) {
2086             block_job_resume(bs->job);
2087         }
2088         aio_context_release(aio_context);
2089     }
2090 }
2091 
2092 /* make a BlockDriverState anonymous by removing from bdrv_state and
2093  * graph_bdrv_state list.
2094    Also, NULL terminate the device_name to prevent double remove */
2095 void bdrv_make_anon(BlockDriverState *bs)
2096 {
2097     /*
2098      * Take care to remove bs from bdrv_states only when it's actually
2099      * in it.  Note that bs->device_list.tqe_prev is initially null,
2100      * and gets set to non-null by QTAILQ_INSERT_TAIL().  Establish
2101      * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by
2102      * resetting it to null on remove.
2103      */
2104     if (bs->device_list.tqe_prev) {
2105         QTAILQ_REMOVE(&bdrv_states, bs, device_list);
2106         bs->device_list.tqe_prev = NULL;
2107     }
2108     if (bs->node_name[0] != '\0') {
2109         QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
2110     }
2111     bs->node_name[0] = '\0';
2112 }
2113 
2114 static void bdrv_rebind(BlockDriverState *bs)
2115 {
2116     if (bs->drv && bs->drv->bdrv_rebind) {
2117         bs->drv->bdrv_rebind(bs);
2118     }
2119 }
2120 
2121 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
2122                                      BlockDriverState *bs_src)
2123 {
2124     /* move some fields that need to stay attached to the device */
2125 
2126     /* dev info */
2127     bs_dest->guest_block_size   = bs_src->guest_block_size;
2128     bs_dest->copy_on_read       = bs_src->copy_on_read;
2129 
2130     bs_dest->enable_write_cache = bs_src->enable_write_cache;
2131 
2132     /* i/o throttled req */
2133     memcpy(&bs_dest->throttle_state,
2134            &bs_src->throttle_state,
2135            sizeof(ThrottleState));
2136     bs_dest->throttled_reqs[0]  = bs_src->throttled_reqs[0];
2137     bs_dest->throttled_reqs[1]  = bs_src->throttled_reqs[1];
2138     bs_dest->io_limits_enabled  = bs_src->io_limits_enabled;
2139 
2140     /* r/w error */
2141     bs_dest->on_read_error      = bs_src->on_read_error;
2142     bs_dest->on_write_error     = bs_src->on_write_error;
2143 
2144     /* i/o status */
2145     bs_dest->iostatus_enabled   = bs_src->iostatus_enabled;
2146     bs_dest->iostatus           = bs_src->iostatus;
2147 
2148     /* dirty bitmap */
2149     bs_dest->dirty_bitmaps      = bs_src->dirty_bitmaps;
2150 
2151     /* reference count */
2152     bs_dest->refcnt             = bs_src->refcnt;
2153 
2154     /* job */
2155     bs_dest->job                = bs_src->job;
2156 
2157     /* keep the same entry in bdrv_states */
2158     bs_dest->device_list = bs_src->device_list;
2159     bs_dest->blk = bs_src->blk;
2160 
2161     memcpy(bs_dest->op_blockers, bs_src->op_blockers,
2162            sizeof(bs_dest->op_blockers));
2163 }
2164 
2165 /*
2166  * Swap bs contents for two image chains while they are live,
2167  * while keeping required fields on the BlockDriverState that is
2168  * actually attached to a device.
2169  *
2170  * This will modify the BlockDriverState fields, and swap contents
2171  * between bs_new and bs_old. Both bs_new and bs_old are modified.
2172  *
2173  * bs_new must not be attached to a BlockBackend.
2174  *
2175  * This function does not create any image files.
2176  */
2177 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2178 {
2179     BlockDriverState tmp;
2180 
2181     /* The code needs to swap the node_name but simply swapping node_list won't
2182      * work so first remove the nodes from the graph list, do the swap then
2183      * insert them back if needed.
2184      */
2185     if (bs_new->node_name[0] != '\0') {
2186         QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2187     }
2188     if (bs_old->node_name[0] != '\0') {
2189         QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2190     }
2191 
2192     /* bs_new must be unattached and shouldn't have anything fancy enabled */
2193     assert(!bs_new->blk);
2194     assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2195     assert(bs_new->job == NULL);
2196     assert(bs_new->io_limits_enabled == false);
2197     assert(!throttle_have_timer(&bs_new->throttle_state));
2198 
2199     tmp = *bs_new;
2200     *bs_new = *bs_old;
2201     *bs_old = tmp;
2202 
2203     /* there are some fields that should not be swapped, move them back */
2204     bdrv_move_feature_fields(&tmp, bs_old);
2205     bdrv_move_feature_fields(bs_old, bs_new);
2206     bdrv_move_feature_fields(bs_new, &tmp);
2207 
2208     /* bs_new must remain unattached */
2209     assert(!bs_new->blk);
2210 
2211     /* Check a few fields that should remain attached to the device */
2212     assert(bs_new->job == NULL);
2213     assert(bs_new->io_limits_enabled == false);
2214     assert(!throttle_have_timer(&bs_new->throttle_state));
2215 
2216     /* insert the nodes back into the graph node list if needed */
2217     if (bs_new->node_name[0] != '\0') {
2218         QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2219     }
2220     if (bs_old->node_name[0] != '\0') {
2221         QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2222     }
2223 
2224     bdrv_rebind(bs_new);
2225     bdrv_rebind(bs_old);
2226 }
2227 
2228 /*
2229  * Add new bs contents at the top of an image chain while the chain is
2230  * live, while keeping required fields on the top layer.
2231  *
2232  * This will modify the BlockDriverState fields, and swap contents
2233  * between bs_new and bs_top. Both bs_new and bs_top are modified.
2234  *
2235  * bs_new must not be attached to a BlockBackend.
2236  *
2237  * This function does not create any image files.
2238  */
2239 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2240 {
2241     bdrv_swap(bs_new, bs_top);
2242 
2243     /* The contents of 'tmp' will become bs_top, as we are
2244      * swapping bs_new and bs_top contents. */
2245     bdrv_set_backing_hd(bs_top, bs_new);
2246 }
2247 
2248 static void bdrv_delete(BlockDriverState *bs)
2249 {
2250     assert(!bs->job);
2251     assert(bdrv_op_blocker_is_empty(bs));
2252     assert(!bs->refcnt);
2253     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2254 
2255     bdrv_close(bs);
2256 
2257     /* remove from list, if necessary */
2258     bdrv_make_anon(bs);
2259 
2260     g_free(bs);
2261 }
2262 
2263 /*
2264  * Run consistency checks on an image
2265  *
2266  * Returns 0 if the check could be completed (it doesn't mean that the image is
2267  * free of errors) or -errno when an internal error occurred. The results of the
2268  * check are stored in res.
2269  */
2270 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2271 {
2272     if (bs->drv == NULL) {
2273         return -ENOMEDIUM;
2274     }
2275     if (bs->drv->bdrv_check == NULL) {
2276         return -ENOTSUP;
2277     }
2278 
2279     memset(res, 0, sizeof(*res));
2280     return bs->drv->bdrv_check(bs, res, fix);
2281 }
2282 
2283 #define COMMIT_BUF_SECTORS 2048
2284 
2285 /* commit COW file into the raw image */
2286 int bdrv_commit(BlockDriverState *bs)
2287 {
2288     BlockDriver *drv = bs->drv;
2289     int64_t sector, total_sectors, length, backing_length;
2290     int n, ro, open_flags;
2291     int ret = 0;
2292     uint8_t *buf = NULL;
2293 
2294     if (!drv)
2295         return -ENOMEDIUM;
2296 
2297     if (!bs->backing_hd) {
2298         return -ENOTSUP;
2299     }
2300 
2301     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
2302         bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) {
2303         return -EBUSY;
2304     }
2305 
2306     ro = bs->backing_hd->read_only;
2307     open_flags =  bs->backing_hd->open_flags;
2308 
2309     if (ro) {
2310         if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2311             return -EACCES;
2312         }
2313     }
2314 
2315     length = bdrv_getlength(bs);
2316     if (length < 0) {
2317         ret = length;
2318         goto ro_cleanup;
2319     }
2320 
2321     backing_length = bdrv_getlength(bs->backing_hd);
2322     if (backing_length < 0) {
2323         ret = backing_length;
2324         goto ro_cleanup;
2325     }
2326 
2327     /* If our top snapshot is larger than the backing file image,
2328      * grow the backing file image if possible.  If not possible,
2329      * we must return an error */
2330     if (length > backing_length) {
2331         ret = bdrv_truncate(bs->backing_hd, length);
2332         if (ret < 0) {
2333             goto ro_cleanup;
2334         }
2335     }
2336 
2337     total_sectors = length >> BDRV_SECTOR_BITS;
2338 
2339     /* qemu_try_blockalign() for bs will choose an alignment that works for
2340      * bs->backing_hd as well, so no need to compare the alignment manually. */
2341     buf = qemu_try_blockalign(bs, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2342     if (buf == NULL) {
2343         ret = -ENOMEM;
2344         goto ro_cleanup;
2345     }
2346 
2347     for (sector = 0; sector < total_sectors; sector += n) {
2348         ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2349         if (ret < 0) {
2350             goto ro_cleanup;
2351         }
2352         if (ret) {
2353             ret = bdrv_read(bs, sector, buf, n);
2354             if (ret < 0) {
2355                 goto ro_cleanup;
2356             }
2357 
2358             ret = bdrv_write(bs->backing_hd, sector, buf, n);
2359             if (ret < 0) {
2360                 goto ro_cleanup;
2361             }
2362         }
2363     }
2364 
2365     if (drv->bdrv_make_empty) {
2366         ret = drv->bdrv_make_empty(bs);
2367         if (ret < 0) {
2368             goto ro_cleanup;
2369         }
2370         bdrv_flush(bs);
2371     }
2372 
2373     /*
2374      * Make sure all data we wrote to the backing device is actually
2375      * stable on disk.
2376      */
2377     if (bs->backing_hd) {
2378         bdrv_flush(bs->backing_hd);
2379     }
2380 
2381     ret = 0;
2382 ro_cleanup:
2383     qemu_vfree(buf);
2384 
2385     if (ro) {
2386         /* ignoring error return here */
2387         bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2388     }
2389 
2390     return ret;
2391 }
2392 
2393 int bdrv_commit_all(void)
2394 {
2395     BlockDriverState *bs;
2396 
2397     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2398         AioContext *aio_context = bdrv_get_aio_context(bs);
2399 
2400         aio_context_acquire(aio_context);
2401         if (bs->drv && bs->backing_hd) {
2402             int ret = bdrv_commit(bs);
2403             if (ret < 0) {
2404                 aio_context_release(aio_context);
2405                 return ret;
2406             }
2407         }
2408         aio_context_release(aio_context);
2409     }
2410     return 0;
2411 }
2412 
2413 /**
2414  * Remove an active request from the tracked requests list
2415  *
2416  * This function should be called when a tracked request is completing.
2417  */
2418 static void tracked_request_end(BdrvTrackedRequest *req)
2419 {
2420     if (req->serialising) {
2421         req->bs->serialising_in_flight--;
2422     }
2423 
2424     QLIST_REMOVE(req, list);
2425     qemu_co_queue_restart_all(&req->wait_queue);
2426 }
2427 
2428 /**
2429  * Add an active request to the tracked requests list
2430  */
2431 static void tracked_request_begin(BdrvTrackedRequest *req,
2432                                   BlockDriverState *bs,
2433                                   int64_t offset,
2434                                   unsigned int bytes, bool is_write)
2435 {
2436     *req = (BdrvTrackedRequest){
2437         .bs = bs,
2438         .offset         = offset,
2439         .bytes          = bytes,
2440         .is_write       = is_write,
2441         .co             = qemu_coroutine_self(),
2442         .serialising    = false,
2443         .overlap_offset = offset,
2444         .overlap_bytes  = bytes,
2445     };
2446 
2447     qemu_co_queue_init(&req->wait_queue);
2448 
2449     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2450 }
2451 
2452 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2453 {
2454     int64_t overlap_offset = req->offset & ~(align - 1);
2455     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2456                                - overlap_offset;
2457 
2458     if (!req->serialising) {
2459         req->bs->serialising_in_flight++;
2460         req->serialising = true;
2461     }
2462 
2463     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2464     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2465 }
2466 
2467 /**
2468  * Round a region to cluster boundaries
2469  */
2470 void bdrv_round_to_clusters(BlockDriverState *bs,
2471                             int64_t sector_num, int nb_sectors,
2472                             int64_t *cluster_sector_num,
2473                             int *cluster_nb_sectors)
2474 {
2475     BlockDriverInfo bdi;
2476 
2477     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2478         *cluster_sector_num = sector_num;
2479         *cluster_nb_sectors = nb_sectors;
2480     } else {
2481         int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2482         *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2483         *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2484                                             nb_sectors, c);
2485     }
2486 }
2487 
2488 static int bdrv_get_cluster_size(BlockDriverState *bs)
2489 {
2490     BlockDriverInfo bdi;
2491     int ret;
2492 
2493     ret = bdrv_get_info(bs, &bdi);
2494     if (ret < 0 || bdi.cluster_size == 0) {
2495         return bs->request_alignment;
2496     } else {
2497         return bdi.cluster_size;
2498     }
2499 }
2500 
2501 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2502                                      int64_t offset, unsigned int bytes)
2503 {
2504     /*        aaaa   bbbb */
2505     if (offset >= req->overlap_offset + req->overlap_bytes) {
2506         return false;
2507     }
2508     /* bbbb   aaaa        */
2509     if (req->overlap_offset >= offset + bytes) {
2510         return false;
2511     }
2512     return true;
2513 }
2514 
2515 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2516 {
2517     BlockDriverState *bs = self->bs;
2518     BdrvTrackedRequest *req;
2519     bool retry;
2520     bool waited = false;
2521 
2522     if (!bs->serialising_in_flight) {
2523         return false;
2524     }
2525 
2526     do {
2527         retry = false;
2528         QLIST_FOREACH(req, &bs->tracked_requests, list) {
2529             if (req == self || (!req->serialising && !self->serialising)) {
2530                 continue;
2531             }
2532             if (tracked_request_overlaps(req, self->overlap_offset,
2533                                          self->overlap_bytes))
2534             {
2535                 /* Hitting this means there was a reentrant request, for
2536                  * example, a block driver issuing nested requests.  This must
2537                  * never happen since it means deadlock.
2538                  */
2539                 assert(qemu_coroutine_self() != req->co);
2540 
2541                 /* If the request is already (indirectly) waiting for us, or
2542                  * will wait for us as soon as it wakes up, then just go on
2543                  * (instead of producing a deadlock in the former case). */
2544                 if (!req->waiting_for) {
2545                     self->waiting_for = req;
2546                     qemu_co_queue_wait(&req->wait_queue);
2547                     self->waiting_for = NULL;
2548                     retry = true;
2549                     waited = true;
2550                     break;
2551                 }
2552             }
2553         }
2554     } while (retry);
2555 
2556     return waited;
2557 }
2558 
2559 /*
2560  * Return values:
2561  * 0        - success
2562  * -EINVAL  - backing format specified, but no file
2563  * -ENOSPC  - can't update the backing file because no space is left in the
2564  *            image file header
2565  * -ENOTSUP - format driver doesn't support changing the backing file
2566  */
2567 int bdrv_change_backing_file(BlockDriverState *bs,
2568     const char *backing_file, const char *backing_fmt)
2569 {
2570     BlockDriver *drv = bs->drv;
2571     int ret;
2572 
2573     /* Backing file format doesn't make sense without a backing file */
2574     if (backing_fmt && !backing_file) {
2575         return -EINVAL;
2576     }
2577 
2578     if (drv->bdrv_change_backing_file != NULL) {
2579         ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2580     } else {
2581         ret = -ENOTSUP;
2582     }
2583 
2584     if (ret == 0) {
2585         pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2586         pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2587     }
2588     return ret;
2589 }
2590 
2591 /*
2592  * Finds the image layer in the chain that has 'bs' as its backing file.
2593  *
2594  * active is the current topmost image.
2595  *
2596  * Returns NULL if bs is not found in active's image chain,
2597  * or if active == bs.
2598  *
2599  * Returns the bottommost base image if bs == NULL.
2600  */
2601 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2602                                     BlockDriverState *bs)
2603 {
2604     while (active && bs != active->backing_hd) {
2605         active = active->backing_hd;
2606     }
2607 
2608     return active;
2609 }
2610 
2611 /* Given a BDS, searches for the base layer. */
2612 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
2613 {
2614     return bdrv_find_overlay(bs, NULL);
2615 }
2616 
2617 typedef struct BlkIntermediateStates {
2618     BlockDriverState *bs;
2619     QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2620 } BlkIntermediateStates;
2621 
2622 
2623 /*
2624  * Drops images above 'base' up to and including 'top', and sets the image
2625  * above 'top' to have base as its backing file.
2626  *
2627  * Requires that the overlay to 'top' is opened r/w, so that the backing file
2628  * information in 'bs' can be properly updated.
2629  *
2630  * E.g., this will convert the following chain:
2631  * bottom <- base <- intermediate <- top <- active
2632  *
2633  * to
2634  *
2635  * bottom <- base <- active
2636  *
2637  * It is allowed for bottom==base, in which case it converts:
2638  *
2639  * base <- intermediate <- top <- active
2640  *
2641  * to
2642  *
2643  * base <- active
2644  *
2645  * If backing_file_str is non-NULL, it will be used when modifying top's
2646  * overlay image metadata.
2647  *
2648  * Error conditions:
2649  *  if active == top, that is considered an error
2650  *
2651  */
2652 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2653                            BlockDriverState *base, const char *backing_file_str)
2654 {
2655     BlockDriverState *intermediate;
2656     BlockDriverState *base_bs = NULL;
2657     BlockDriverState *new_top_bs = NULL;
2658     BlkIntermediateStates *intermediate_state, *next;
2659     int ret = -EIO;
2660 
2661     QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2662     QSIMPLEQ_INIT(&states_to_delete);
2663 
2664     if (!top->drv || !base->drv) {
2665         goto exit;
2666     }
2667 
2668     new_top_bs = bdrv_find_overlay(active, top);
2669 
2670     if (new_top_bs == NULL) {
2671         /* we could not find the image above 'top', this is an error */
2672         goto exit;
2673     }
2674 
2675     /* special case of new_top_bs->backing_hd already pointing to base - nothing
2676      * to do, no intermediate images */
2677     if (new_top_bs->backing_hd == base) {
2678         ret = 0;
2679         goto exit;
2680     }
2681 
2682     intermediate = top;
2683 
2684     /* now we will go down through the list, and add each BDS we find
2685      * into our deletion queue, until we hit the 'base'
2686      */
2687     while (intermediate) {
2688         intermediate_state = g_new0(BlkIntermediateStates, 1);
2689         intermediate_state->bs = intermediate;
2690         QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2691 
2692         if (intermediate->backing_hd == base) {
2693             base_bs = intermediate->backing_hd;
2694             break;
2695         }
2696         intermediate = intermediate->backing_hd;
2697     }
2698     if (base_bs == NULL) {
2699         /* something went wrong, we did not end at the base. safely
2700          * unravel everything, and exit with error */
2701         goto exit;
2702     }
2703 
2704     /* success - we can delete the intermediate states, and link top->base */
2705     backing_file_str = backing_file_str ? backing_file_str : base_bs->filename;
2706     ret = bdrv_change_backing_file(new_top_bs, backing_file_str,
2707                                    base_bs->drv ? base_bs->drv->format_name : "");
2708     if (ret) {
2709         goto exit;
2710     }
2711     bdrv_set_backing_hd(new_top_bs, base_bs);
2712 
2713     QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2714         /* so that bdrv_close() does not recursively close the chain */
2715         bdrv_set_backing_hd(intermediate_state->bs, NULL);
2716         bdrv_unref(intermediate_state->bs);
2717     }
2718     ret = 0;
2719 
2720 exit:
2721     QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2722         g_free(intermediate_state);
2723     }
2724     return ret;
2725 }
2726 
2727 
2728 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2729                                    size_t size)
2730 {
2731     if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
2732         return -EIO;
2733     }
2734 
2735     if (!bdrv_is_inserted(bs)) {
2736         return -ENOMEDIUM;
2737     }
2738 
2739     if (offset < 0) {
2740         return -EIO;
2741     }
2742 
2743     return 0;
2744 }
2745 
2746 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2747                               int nb_sectors)
2748 {
2749     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
2750         return -EIO;
2751     }
2752 
2753     return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2754                                    nb_sectors * BDRV_SECTOR_SIZE);
2755 }
2756 
2757 typedef struct RwCo {
2758     BlockDriverState *bs;
2759     int64_t offset;
2760     QEMUIOVector *qiov;
2761     bool is_write;
2762     int ret;
2763     BdrvRequestFlags flags;
2764 } RwCo;
2765 
2766 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2767 {
2768     RwCo *rwco = opaque;
2769 
2770     if (!rwco->is_write) {
2771         rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2772                                       rwco->qiov->size, rwco->qiov,
2773                                       rwco->flags);
2774     } else {
2775         rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2776                                        rwco->qiov->size, rwco->qiov,
2777                                        rwco->flags);
2778     }
2779 }
2780 
2781 /*
2782  * Process a vectored synchronous request using coroutines
2783  */
2784 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2785                         QEMUIOVector *qiov, bool is_write,
2786                         BdrvRequestFlags flags)
2787 {
2788     Coroutine *co;
2789     RwCo rwco = {
2790         .bs = bs,
2791         .offset = offset,
2792         .qiov = qiov,
2793         .is_write = is_write,
2794         .ret = NOT_DONE,
2795         .flags = flags,
2796     };
2797 
2798     /**
2799      * In sync call context, when the vcpu is blocked, this throttling timer
2800      * will not fire; so the I/O throttling function has to be disabled here
2801      * if it has been enabled.
2802      */
2803     if (bs->io_limits_enabled) {
2804         fprintf(stderr, "Disabling I/O throttling on '%s' due "
2805                         "to synchronous I/O.\n", bdrv_get_device_name(bs));
2806         bdrv_io_limits_disable(bs);
2807     }
2808 
2809     if (qemu_in_coroutine()) {
2810         /* Fast-path if already in coroutine context */
2811         bdrv_rw_co_entry(&rwco);
2812     } else {
2813         AioContext *aio_context = bdrv_get_aio_context(bs);
2814 
2815         co = qemu_coroutine_create(bdrv_rw_co_entry);
2816         qemu_coroutine_enter(co, &rwco);
2817         while (rwco.ret == NOT_DONE) {
2818             aio_poll(aio_context, true);
2819         }
2820     }
2821     return rwco.ret;
2822 }
2823 
2824 /*
2825  * Process a synchronous request using coroutines
2826  */
2827 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2828                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
2829 {
2830     QEMUIOVector qiov;
2831     struct iovec iov = {
2832         .iov_base = (void *)buf,
2833         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2834     };
2835 
2836     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
2837         return -EINVAL;
2838     }
2839 
2840     qemu_iovec_init_external(&qiov, &iov, 1);
2841     return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2842                         &qiov, is_write, flags);
2843 }
2844 
2845 /* return < 0 if error. See bdrv_write() for the return codes */
2846 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2847               uint8_t *buf, int nb_sectors)
2848 {
2849     return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2850 }
2851 
2852 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2853 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2854                           uint8_t *buf, int nb_sectors)
2855 {
2856     bool enabled;
2857     int ret;
2858 
2859     enabled = bs->io_limits_enabled;
2860     bs->io_limits_enabled = false;
2861     ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2862     bs->io_limits_enabled = enabled;
2863     return ret;
2864 }
2865 
2866 /* Return < 0 if error. Important errors are:
2867   -EIO         generic I/O error (may happen for all errors)
2868   -ENOMEDIUM   No media inserted.
2869   -EINVAL      Invalid sector number or nb_sectors
2870   -EACCES      Trying to write a read-only device
2871 */
2872 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2873                const uint8_t *buf, int nb_sectors)
2874 {
2875     return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2876 }
2877 
2878 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2879                       int nb_sectors, BdrvRequestFlags flags)
2880 {
2881     return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2882                       BDRV_REQ_ZERO_WRITE | flags);
2883 }
2884 
2885 /*
2886  * Completely zero out a block device with the help of bdrv_write_zeroes.
2887  * The operation is sped up by checking the block status and only writing
2888  * zeroes to the device if they currently do not return zeroes. Optional
2889  * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2890  *
2891  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2892  */
2893 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2894 {
2895     int64_t target_sectors, ret, nb_sectors, sector_num = 0;
2896     int n;
2897 
2898     target_sectors = bdrv_nb_sectors(bs);
2899     if (target_sectors < 0) {
2900         return target_sectors;
2901     }
2902 
2903     for (;;) {
2904         nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
2905         if (nb_sectors <= 0) {
2906             return 0;
2907         }
2908         ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2909         if (ret < 0) {
2910             error_report("error getting block status at sector %" PRId64 ": %s",
2911                          sector_num, strerror(-ret));
2912             return ret;
2913         }
2914         if (ret & BDRV_BLOCK_ZERO) {
2915             sector_num += n;
2916             continue;
2917         }
2918         ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2919         if (ret < 0) {
2920             error_report("error writing zeroes at sector %" PRId64 ": %s",
2921                          sector_num, strerror(-ret));
2922             return ret;
2923         }
2924         sector_num += n;
2925     }
2926 }
2927 
2928 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2929 {
2930     QEMUIOVector qiov;
2931     struct iovec iov = {
2932         .iov_base = (void *)buf,
2933         .iov_len = bytes,
2934     };
2935     int ret;
2936 
2937     if (bytes < 0) {
2938         return -EINVAL;
2939     }
2940 
2941     qemu_iovec_init_external(&qiov, &iov, 1);
2942     ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2943     if (ret < 0) {
2944         return ret;
2945     }
2946 
2947     return bytes;
2948 }
2949 
2950 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2951 {
2952     int ret;
2953 
2954     ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2955     if (ret < 0) {
2956         return ret;
2957     }
2958 
2959     return qiov->size;
2960 }
2961 
2962 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2963                 const void *buf, int bytes)
2964 {
2965     QEMUIOVector qiov;
2966     struct iovec iov = {
2967         .iov_base   = (void *) buf,
2968         .iov_len    = bytes,
2969     };
2970 
2971     if (bytes < 0) {
2972         return -EINVAL;
2973     }
2974 
2975     qemu_iovec_init_external(&qiov, &iov, 1);
2976     return bdrv_pwritev(bs, offset, &qiov);
2977 }
2978 
2979 /*
2980  * Writes to the file and ensures that no writes are reordered across this
2981  * request (acts as a barrier)
2982  *
2983  * Returns 0 on success, -errno in error cases.
2984  */
2985 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2986     const void *buf, int count)
2987 {
2988     int ret;
2989 
2990     ret = bdrv_pwrite(bs, offset, buf, count);
2991     if (ret < 0) {
2992         return ret;
2993     }
2994 
2995     /* No flush needed for cache modes that already do it */
2996     if (bs->enable_write_cache) {
2997         bdrv_flush(bs);
2998     }
2999 
3000     return 0;
3001 }
3002 
3003 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
3004         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3005 {
3006     /* Perform I/O through a temporary buffer so that users who scribble over
3007      * their read buffer while the operation is in progress do not end up
3008      * modifying the image file.  This is critical for zero-copy guest I/O
3009      * where anything might happen inside guest memory.
3010      */
3011     void *bounce_buffer;
3012 
3013     BlockDriver *drv = bs->drv;
3014     struct iovec iov;
3015     QEMUIOVector bounce_qiov;
3016     int64_t cluster_sector_num;
3017     int cluster_nb_sectors;
3018     size_t skip_bytes;
3019     int ret;
3020 
3021     /* Cover entire cluster so no additional backing file I/O is required when
3022      * allocating cluster in the image file.
3023      */
3024     bdrv_round_to_clusters(bs, sector_num, nb_sectors,
3025                            &cluster_sector_num, &cluster_nb_sectors);
3026 
3027     trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
3028                                    cluster_sector_num, cluster_nb_sectors);
3029 
3030     iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
3031     iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
3032     if (bounce_buffer == NULL) {
3033         ret = -ENOMEM;
3034         goto err;
3035     }
3036 
3037     qemu_iovec_init_external(&bounce_qiov, &iov, 1);
3038 
3039     ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
3040                              &bounce_qiov);
3041     if (ret < 0) {
3042         goto err;
3043     }
3044 
3045     if (drv->bdrv_co_write_zeroes &&
3046         buffer_is_zero(bounce_buffer, iov.iov_len)) {
3047         ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
3048                                       cluster_nb_sectors, 0);
3049     } else {
3050         /* This does not change the data on the disk, it is not necessary
3051          * to flush even in cache=writethrough mode.
3052          */
3053         ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
3054                                   &bounce_qiov);
3055     }
3056 
3057     if (ret < 0) {
3058         /* It might be okay to ignore write errors for guest requests.  If this
3059          * is a deliberate copy-on-read then we don't want to ignore the error.
3060          * Simply report it in all cases.
3061          */
3062         goto err;
3063     }
3064 
3065     skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
3066     qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
3067                         nb_sectors * BDRV_SECTOR_SIZE);
3068 
3069 err:
3070     qemu_vfree(bounce_buffer);
3071     return ret;
3072 }
3073 
3074 /*
3075  * Forwards an already correctly aligned request to the BlockDriver. This
3076  * handles copy on read and zeroing after EOF; any other features must be
3077  * implemented by the caller.
3078  */
3079 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3080     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3081     int64_t align, QEMUIOVector *qiov, int flags)
3082 {
3083     BlockDriver *drv = bs->drv;
3084     int ret;
3085 
3086     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3087     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3088 
3089     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3090     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3091     assert(!qiov || bytes == qiov->size);
3092 
3093     /* Handle Copy on Read and associated serialisation */
3094     if (flags & BDRV_REQ_COPY_ON_READ) {
3095         /* If we touch the same cluster it counts as an overlap.  This
3096          * guarantees that allocating writes will be serialized and not race
3097          * with each other for the same cluster.  For example, in copy-on-read
3098          * it ensures that the CoR read and write operations are atomic and
3099          * guest writes cannot interleave between them. */
3100         mark_request_serialising(req, bdrv_get_cluster_size(bs));
3101     }
3102 
3103     wait_serialising_requests(req);
3104 
3105     if (flags & BDRV_REQ_COPY_ON_READ) {
3106         int pnum;
3107 
3108         ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3109         if (ret < 0) {
3110             goto out;
3111         }
3112 
3113         if (!ret || pnum != nb_sectors) {
3114             ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3115             goto out;
3116         }
3117     }
3118 
3119     /* Forward the request to the BlockDriver */
3120     if (!bs->zero_beyond_eof) {
3121         ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3122     } else {
3123         /* Read zeros after EOF */
3124         int64_t total_sectors, max_nb_sectors;
3125 
3126         total_sectors = bdrv_nb_sectors(bs);
3127         if (total_sectors < 0) {
3128             ret = total_sectors;
3129             goto out;
3130         }
3131 
3132         max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3133                                   align >> BDRV_SECTOR_BITS);
3134         if (nb_sectors < max_nb_sectors) {
3135             ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3136         } else if (max_nb_sectors > 0) {
3137             QEMUIOVector local_qiov;
3138 
3139             qemu_iovec_init(&local_qiov, qiov->niov);
3140             qemu_iovec_concat(&local_qiov, qiov, 0,
3141                               max_nb_sectors * BDRV_SECTOR_SIZE);
3142 
3143             ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
3144                                      &local_qiov);
3145 
3146             qemu_iovec_destroy(&local_qiov);
3147         } else {
3148             ret = 0;
3149         }
3150 
3151         /* Reading beyond end of file is supposed to produce zeroes */
3152         if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3153             uint64_t offset = MAX(0, total_sectors - sector_num);
3154             uint64_t bytes = (sector_num + nb_sectors - offset) *
3155                               BDRV_SECTOR_SIZE;
3156             qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3157         }
3158     }
3159 
3160 out:
3161     return ret;
3162 }
3163 
3164 static inline uint64_t bdrv_get_align(BlockDriverState *bs)
3165 {
3166     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3167     return MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3168 }
3169 
3170 static inline bool bdrv_req_is_aligned(BlockDriverState *bs,
3171                                        int64_t offset, size_t bytes)
3172 {
3173     int64_t align = bdrv_get_align(bs);
3174     return !(offset & (align - 1) || (bytes & (align - 1)));
3175 }
3176 
3177 /*
3178  * Handle a read request in coroutine context
3179  */
3180 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3181     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3182     BdrvRequestFlags flags)
3183 {
3184     BlockDriver *drv = bs->drv;
3185     BdrvTrackedRequest req;
3186 
3187     uint64_t align = bdrv_get_align(bs);
3188     uint8_t *head_buf = NULL;
3189     uint8_t *tail_buf = NULL;
3190     QEMUIOVector local_qiov;
3191     bool use_local_qiov = false;
3192     int ret;
3193 
3194     if (!drv) {
3195         return -ENOMEDIUM;
3196     }
3197 
3198     ret = bdrv_check_byte_request(bs, offset, bytes);
3199     if (ret < 0) {
3200         return ret;
3201     }
3202 
3203     if (bs->copy_on_read) {
3204         flags |= BDRV_REQ_COPY_ON_READ;
3205     }
3206 
3207     /* throttling disk I/O */
3208     if (bs->io_limits_enabled) {
3209         bdrv_io_limits_intercept(bs, bytes, false);
3210     }
3211 
3212     /* Align read if necessary by padding qiov */
3213     if (offset & (align - 1)) {
3214         head_buf = qemu_blockalign(bs, align);
3215         qemu_iovec_init(&local_qiov, qiov->niov + 2);
3216         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3217         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3218         use_local_qiov = true;
3219 
3220         bytes += offset & (align - 1);
3221         offset = offset & ~(align - 1);
3222     }
3223 
3224     if ((offset + bytes) & (align - 1)) {
3225         if (!use_local_qiov) {
3226             qemu_iovec_init(&local_qiov, qiov->niov + 1);
3227             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3228             use_local_qiov = true;
3229         }
3230         tail_buf = qemu_blockalign(bs, align);
3231         qemu_iovec_add(&local_qiov, tail_buf,
3232                        align - ((offset + bytes) & (align - 1)));
3233 
3234         bytes = ROUND_UP(bytes, align);
3235     }
3236 
3237     tracked_request_begin(&req, bs, offset, bytes, false);
3238     ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3239                               use_local_qiov ? &local_qiov : qiov,
3240                               flags);
3241     tracked_request_end(&req);
3242 
3243     if (use_local_qiov) {
3244         qemu_iovec_destroy(&local_qiov);
3245         qemu_vfree(head_buf);
3246         qemu_vfree(tail_buf);
3247     }
3248 
3249     return ret;
3250 }
3251 
3252 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3253     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3254     BdrvRequestFlags flags)
3255 {
3256     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
3257         return -EINVAL;
3258     }
3259 
3260     return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3261                              nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3262 }
3263 
3264 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3265     int nb_sectors, QEMUIOVector *qiov)
3266 {
3267     trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3268 
3269     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3270 }
3271 
3272 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3273     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3274 {
3275     trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3276 
3277     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3278                             BDRV_REQ_COPY_ON_READ);
3279 }
3280 
3281 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
3282 
3283 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3284     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3285 {
3286     BlockDriver *drv = bs->drv;
3287     QEMUIOVector qiov;
3288     struct iovec iov = {0};
3289     int ret = 0;
3290 
3291     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
3292                                         BDRV_REQUEST_MAX_SECTORS);
3293 
3294     while (nb_sectors > 0 && !ret) {
3295         int num = nb_sectors;
3296 
3297         /* Align request.  Block drivers can expect the "bulk" of the request
3298          * to be aligned.
3299          */
3300         if (bs->bl.write_zeroes_alignment
3301             && num > bs->bl.write_zeroes_alignment) {
3302             if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3303                 /* Make a small request up to the first aligned sector.  */
3304                 num = bs->bl.write_zeroes_alignment;
3305                 num -= sector_num % bs->bl.write_zeroes_alignment;
3306             } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3307                 /* Shorten the request to the last aligned sector.  num cannot
3308                  * underflow because num > bs->bl.write_zeroes_alignment.
3309                  */
3310                 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3311             }
3312         }
3313 
3314         /* limit request size */
3315         if (num > max_write_zeroes) {
3316             num = max_write_zeroes;
3317         }
3318 
3319         ret = -ENOTSUP;
3320         /* First try the efficient write zeroes operation */
3321         if (drv->bdrv_co_write_zeroes) {
3322             ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3323         }
3324 
3325         if (ret == -ENOTSUP) {
3326             /* Fall back to bounce buffer if write zeroes is unsupported */
3327             int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
3328                                             MAX_WRITE_ZEROES_BOUNCE_BUFFER);
3329             num = MIN(num, max_xfer_len);
3330             iov.iov_len = num * BDRV_SECTOR_SIZE;
3331             if (iov.iov_base == NULL) {
3332                 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
3333                 if (iov.iov_base == NULL) {
3334                     ret = -ENOMEM;
3335                     goto fail;
3336                 }
3337                 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3338             }
3339             qemu_iovec_init_external(&qiov, &iov, 1);
3340 
3341             ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3342 
3343             /* Keep bounce buffer around if it is big enough for all
3344              * all future requests.
3345              */
3346             if (num < max_xfer_len) {
3347                 qemu_vfree(iov.iov_base);
3348                 iov.iov_base = NULL;
3349             }
3350         }
3351 
3352         sector_num += num;
3353         nb_sectors -= num;
3354     }
3355 
3356 fail:
3357     qemu_vfree(iov.iov_base);
3358     return ret;
3359 }
3360 
3361 /*
3362  * Forwards an already correctly aligned write request to the BlockDriver.
3363  */
3364 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3365     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3366     QEMUIOVector *qiov, int flags)
3367 {
3368     BlockDriver *drv = bs->drv;
3369     bool waited;
3370     int ret;
3371 
3372     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3373     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3374 
3375     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3376     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3377     assert(!qiov || bytes == qiov->size);
3378 
3379     waited = wait_serialising_requests(req);
3380     assert(!waited || !req->serialising);
3381     assert(req->overlap_offset <= offset);
3382     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3383 
3384     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3385 
3386     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3387         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3388         qemu_iovec_is_zero(qiov)) {
3389         flags |= BDRV_REQ_ZERO_WRITE;
3390         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3391             flags |= BDRV_REQ_MAY_UNMAP;
3392         }
3393     }
3394 
3395     if (ret < 0) {
3396         /* Do nothing, write notifier decided to fail this request */
3397     } else if (flags & BDRV_REQ_ZERO_WRITE) {
3398         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3399         ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3400     } else {
3401         BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3402         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3403     }
3404     BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3405 
3406     if (ret == 0 && !bs->enable_write_cache) {
3407         ret = bdrv_co_flush(bs);
3408     }
3409 
3410     bdrv_set_dirty(bs, sector_num, nb_sectors);
3411 
3412     block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
3413 
3414     if (ret >= 0) {
3415         bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3416     }
3417 
3418     return ret;
3419 }
3420 
3421 /*
3422  * Handle a write request in coroutine context
3423  */
3424 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3425     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3426     BdrvRequestFlags flags)
3427 {
3428     BdrvTrackedRequest req;
3429     uint64_t align = bdrv_get_align(bs);
3430     uint8_t *head_buf = NULL;
3431     uint8_t *tail_buf = NULL;
3432     QEMUIOVector local_qiov;
3433     bool use_local_qiov = false;
3434     int ret;
3435 
3436     if (!bs->drv) {
3437         return -ENOMEDIUM;
3438     }
3439     if (bs->read_only) {
3440         return -EACCES;
3441     }
3442 
3443     ret = bdrv_check_byte_request(bs, offset, bytes);
3444     if (ret < 0) {
3445         return ret;
3446     }
3447 
3448     /* throttling disk I/O */
3449     if (bs->io_limits_enabled) {
3450         bdrv_io_limits_intercept(bs, bytes, true);
3451     }
3452 
3453     /*
3454      * Align write if necessary by performing a read-modify-write cycle.
3455      * Pad qiov with the read parts and be sure to have a tracked request not
3456      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3457      */
3458     tracked_request_begin(&req, bs, offset, bytes, true);
3459 
3460     if (offset & (align - 1)) {
3461         QEMUIOVector head_qiov;
3462         struct iovec head_iov;
3463 
3464         mark_request_serialising(&req, align);
3465         wait_serialising_requests(&req);
3466 
3467         head_buf = qemu_blockalign(bs, align);
3468         head_iov = (struct iovec) {
3469             .iov_base   = head_buf,
3470             .iov_len    = align,
3471         };
3472         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3473 
3474         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3475         ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3476                                   align, &head_qiov, 0);
3477         if (ret < 0) {
3478             goto fail;
3479         }
3480         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3481 
3482         qemu_iovec_init(&local_qiov, qiov->niov + 2);
3483         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3484         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3485         use_local_qiov = true;
3486 
3487         bytes += offset & (align - 1);
3488         offset = offset & ~(align - 1);
3489     }
3490 
3491     if ((offset + bytes) & (align - 1)) {
3492         QEMUIOVector tail_qiov;
3493         struct iovec tail_iov;
3494         size_t tail_bytes;
3495         bool waited;
3496 
3497         mark_request_serialising(&req, align);
3498         waited = wait_serialising_requests(&req);
3499         assert(!waited || !use_local_qiov);
3500 
3501         tail_buf = qemu_blockalign(bs, align);
3502         tail_iov = (struct iovec) {
3503             .iov_base   = tail_buf,
3504             .iov_len    = align,
3505         };
3506         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3507 
3508         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3509         ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3510                                   align, &tail_qiov, 0);
3511         if (ret < 0) {
3512             goto fail;
3513         }
3514         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3515 
3516         if (!use_local_qiov) {
3517             qemu_iovec_init(&local_qiov, qiov->niov + 1);
3518             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3519             use_local_qiov = true;
3520         }
3521 
3522         tail_bytes = (offset + bytes) & (align - 1);
3523         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3524 
3525         bytes = ROUND_UP(bytes, align);
3526     }
3527 
3528     if (use_local_qiov) {
3529         /* Local buffer may have non-zero data. */
3530         flags &= ~BDRV_REQ_ZERO_WRITE;
3531     }
3532     ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3533                                use_local_qiov ? &local_qiov : qiov,
3534                                flags);
3535 
3536 fail:
3537     tracked_request_end(&req);
3538 
3539     if (use_local_qiov) {
3540         qemu_iovec_destroy(&local_qiov);
3541     }
3542     qemu_vfree(head_buf);
3543     qemu_vfree(tail_buf);
3544 
3545     return ret;
3546 }
3547 
3548 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3549     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3550     BdrvRequestFlags flags)
3551 {
3552     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
3553         return -EINVAL;
3554     }
3555 
3556     return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3557                               nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3558 }
3559 
3560 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3561     int nb_sectors, QEMUIOVector *qiov)
3562 {
3563     trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3564 
3565     return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3566 }
3567 
3568 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3569                                       int64_t sector_num, int nb_sectors,
3570                                       BdrvRequestFlags flags)
3571 {
3572     int ret;
3573 
3574     trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3575 
3576     if (!(bs->open_flags & BDRV_O_UNMAP)) {
3577         flags &= ~BDRV_REQ_MAY_UNMAP;
3578     }
3579     if (bdrv_req_is_aligned(bs, sector_num << BDRV_SECTOR_BITS,
3580                             nb_sectors << BDRV_SECTOR_BITS)) {
3581         ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3582                                 BDRV_REQ_ZERO_WRITE | flags);
3583     } else {
3584         uint8_t *buf;
3585         QEMUIOVector local_qiov;
3586         size_t bytes = nb_sectors << BDRV_SECTOR_BITS;
3587 
3588         buf = qemu_memalign(bdrv_opt_mem_align(bs), bytes);
3589         memset(buf, 0, bytes);
3590         qemu_iovec_init(&local_qiov, 1);
3591         qemu_iovec_add(&local_qiov, buf, bytes);
3592 
3593         ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, &local_qiov,
3594                                 BDRV_REQ_ZERO_WRITE | flags);
3595         qemu_vfree(buf);
3596     }
3597     return ret;
3598 }
3599 
3600 /**
3601  * Truncate file to 'offset' bytes (needed only for file protocols)
3602  */
3603 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3604 {
3605     BlockDriver *drv = bs->drv;
3606     int ret;
3607     if (!drv)
3608         return -ENOMEDIUM;
3609     if (!drv->bdrv_truncate)
3610         return -ENOTSUP;
3611     if (bs->read_only)
3612         return -EACCES;
3613 
3614     ret = drv->bdrv_truncate(bs, offset);
3615     if (ret == 0) {
3616         ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3617         bdrv_dirty_bitmap_truncate(bs);
3618         if (bs->blk) {
3619             blk_dev_resize_cb(bs->blk);
3620         }
3621     }
3622     return ret;
3623 }
3624 
3625 /**
3626  * Length of a allocated file in bytes. Sparse files are counted by actual
3627  * allocated space. Return < 0 if error or unknown.
3628  */
3629 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3630 {
3631     BlockDriver *drv = bs->drv;
3632     if (!drv) {
3633         return -ENOMEDIUM;
3634     }
3635     if (drv->bdrv_get_allocated_file_size) {
3636         return drv->bdrv_get_allocated_file_size(bs);
3637     }
3638     if (bs->file) {
3639         return bdrv_get_allocated_file_size(bs->file);
3640     }
3641     return -ENOTSUP;
3642 }
3643 
3644 /**
3645  * Return number of sectors on success, -errno on error.
3646  */
3647 int64_t bdrv_nb_sectors(BlockDriverState *bs)
3648 {
3649     BlockDriver *drv = bs->drv;
3650 
3651     if (!drv)
3652         return -ENOMEDIUM;
3653 
3654     if (drv->has_variable_length) {
3655         int ret = refresh_total_sectors(bs, bs->total_sectors);
3656         if (ret < 0) {
3657             return ret;
3658         }
3659     }
3660     return bs->total_sectors;
3661 }
3662 
3663 /**
3664  * Return length in bytes on success, -errno on error.
3665  * The length is always a multiple of BDRV_SECTOR_SIZE.
3666  */
3667 int64_t bdrv_getlength(BlockDriverState *bs)
3668 {
3669     int64_t ret = bdrv_nb_sectors(bs);
3670 
3671     return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE;
3672 }
3673 
3674 /* return 0 as number of sectors if no device present or error */
3675 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3676 {
3677     int64_t nb_sectors = bdrv_nb_sectors(bs);
3678 
3679     *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
3680 }
3681 
3682 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3683                        BlockdevOnError on_write_error)
3684 {
3685     bs->on_read_error = on_read_error;
3686     bs->on_write_error = on_write_error;
3687 }
3688 
3689 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3690 {
3691     return is_read ? bs->on_read_error : bs->on_write_error;
3692 }
3693 
3694 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3695 {
3696     BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3697 
3698     switch (on_err) {
3699     case BLOCKDEV_ON_ERROR_ENOSPC:
3700         return (error == ENOSPC) ?
3701                BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
3702     case BLOCKDEV_ON_ERROR_STOP:
3703         return BLOCK_ERROR_ACTION_STOP;
3704     case BLOCKDEV_ON_ERROR_REPORT:
3705         return BLOCK_ERROR_ACTION_REPORT;
3706     case BLOCKDEV_ON_ERROR_IGNORE:
3707         return BLOCK_ERROR_ACTION_IGNORE;
3708     default:
3709         abort();
3710     }
3711 }
3712 
3713 static void send_qmp_error_event(BlockDriverState *bs,
3714                                  BlockErrorAction action,
3715                                  bool is_read, int error)
3716 {
3717     IoOperationType optype;
3718 
3719     optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
3720     qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action,
3721                                    bdrv_iostatus_is_enabled(bs),
3722                                    error == ENOSPC, strerror(error),
3723                                    &error_abort);
3724 }
3725 
3726 /* This is done by device models because, while the block layer knows
3727  * about the error, it does not know whether an operation comes from
3728  * the device or the block layer (from a job, for example).
3729  */
3730 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3731                        bool is_read, int error)
3732 {
3733     assert(error >= 0);
3734 
3735     if (action == BLOCK_ERROR_ACTION_STOP) {
3736         /* First set the iostatus, so that "info block" returns an iostatus
3737          * that matches the events raised so far (an additional error iostatus
3738          * is fine, but not a lost one).
3739          */
3740         bdrv_iostatus_set_err(bs, error);
3741 
3742         /* Then raise the request to stop the VM and the event.
3743          * qemu_system_vmstop_request_prepare has two effects.  First,
3744          * it ensures that the STOP event always comes after the
3745          * BLOCK_IO_ERROR event.  Second, it ensures that even if management
3746          * can observe the STOP event and do a "cont" before the STOP
3747          * event is issued, the VM will not stop.  In this case, vm_start()
3748          * also ensures that the STOP/RESUME pair of events is emitted.
3749          */
3750         qemu_system_vmstop_request_prepare();
3751         send_qmp_error_event(bs, action, is_read, error);
3752         qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
3753     } else {
3754         send_qmp_error_event(bs, action, is_read, error);
3755     }
3756 }
3757 
3758 int bdrv_is_read_only(BlockDriverState *bs)
3759 {
3760     return bs->read_only;
3761 }
3762 
3763 int bdrv_is_sg(BlockDriverState *bs)
3764 {
3765     return bs->sg;
3766 }
3767 
3768 int bdrv_enable_write_cache(BlockDriverState *bs)
3769 {
3770     return bs->enable_write_cache;
3771 }
3772 
3773 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3774 {
3775     bs->enable_write_cache = wce;
3776 
3777     /* so a reopen() will preserve wce */
3778     if (wce) {
3779         bs->open_flags |= BDRV_O_CACHE_WB;
3780     } else {
3781         bs->open_flags &= ~BDRV_O_CACHE_WB;
3782     }
3783 }
3784 
3785 int bdrv_is_encrypted(BlockDriverState *bs)
3786 {
3787     if (bs->backing_hd && bs->backing_hd->encrypted)
3788         return 1;
3789     return bs->encrypted;
3790 }
3791 
3792 int bdrv_key_required(BlockDriverState *bs)
3793 {
3794     BlockDriverState *backing_hd = bs->backing_hd;
3795 
3796     if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3797         return 1;
3798     return (bs->encrypted && !bs->valid_key);
3799 }
3800 
3801 int bdrv_set_key(BlockDriverState *bs, const char *key)
3802 {
3803     int ret;
3804     if (bs->backing_hd && bs->backing_hd->encrypted) {
3805         ret = bdrv_set_key(bs->backing_hd, key);
3806         if (ret < 0)
3807             return ret;
3808         if (!bs->encrypted)
3809             return 0;
3810     }
3811     if (!bs->encrypted) {
3812         return -EINVAL;
3813     } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3814         return -ENOMEDIUM;
3815     }
3816     ret = bs->drv->bdrv_set_key(bs, key);
3817     if (ret < 0) {
3818         bs->valid_key = 0;
3819     } else if (!bs->valid_key) {
3820         bs->valid_key = 1;
3821         if (bs->blk) {
3822             /* call the change callback now, we skipped it on open */
3823             blk_dev_change_media_cb(bs->blk, true);
3824         }
3825     }
3826     return ret;
3827 }
3828 
3829 /*
3830  * Provide an encryption key for @bs.
3831  * If @key is non-null:
3832  *     If @bs is not encrypted, fail.
3833  *     Else if the key is invalid, fail.
3834  *     Else set @bs's key to @key, replacing the existing key, if any.
3835  * If @key is null:
3836  *     If @bs is encrypted and still lacks a key, fail.
3837  *     Else do nothing.
3838  * On failure, store an error object through @errp if non-null.
3839  */
3840 void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp)
3841 {
3842     if (key) {
3843         if (!bdrv_is_encrypted(bs)) {
3844             error_setg(errp, "Node '%s' is not encrypted",
3845                       bdrv_get_device_or_node_name(bs));
3846         } else if (bdrv_set_key(bs, key) < 0) {
3847             error_set(errp, QERR_INVALID_PASSWORD);
3848         }
3849     } else {
3850         if (bdrv_key_required(bs)) {
3851             error_set(errp, ERROR_CLASS_DEVICE_ENCRYPTED,
3852                       "'%s' (%s) is encrypted",
3853                       bdrv_get_device_or_node_name(bs),
3854                       bdrv_get_encrypted_filename(bs));
3855         }
3856     }
3857 }
3858 
3859 const char *bdrv_get_format_name(BlockDriverState *bs)
3860 {
3861     return bs->drv ? bs->drv->format_name : NULL;
3862 }
3863 
3864 static int qsort_strcmp(const void *a, const void *b)
3865 {
3866     return strcmp(a, b);
3867 }
3868 
3869 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3870                          void *opaque)
3871 {
3872     BlockDriver *drv;
3873     int count = 0;
3874     int i;
3875     const char **formats = NULL;
3876 
3877     QLIST_FOREACH(drv, &bdrv_drivers, list) {
3878         if (drv->format_name) {
3879             bool found = false;
3880             int i = count;
3881             while (formats && i && !found) {
3882                 found = !strcmp(formats[--i], drv->format_name);
3883             }
3884 
3885             if (!found) {
3886                 formats = g_renew(const char *, formats, count + 1);
3887                 formats[count++] = drv->format_name;
3888             }
3889         }
3890     }
3891 
3892     qsort(formats, count, sizeof(formats[0]), qsort_strcmp);
3893 
3894     for (i = 0; i < count; i++) {
3895         it(opaque, formats[i]);
3896     }
3897 
3898     g_free(formats);
3899 }
3900 
3901 /* This function is to find a node in the bs graph */
3902 BlockDriverState *bdrv_find_node(const char *node_name)
3903 {
3904     BlockDriverState *bs;
3905 
3906     assert(node_name);
3907 
3908     QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3909         if (!strcmp(node_name, bs->node_name)) {
3910             return bs;
3911         }
3912     }
3913     return NULL;
3914 }
3915 
3916 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3917 BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp)
3918 {
3919     BlockDeviceInfoList *list, *entry;
3920     BlockDriverState *bs;
3921 
3922     list = NULL;
3923     QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3924         BlockDeviceInfo *info = bdrv_block_device_info(bs, errp);
3925         if (!info) {
3926             qapi_free_BlockDeviceInfoList(list);
3927             return NULL;
3928         }
3929         entry = g_malloc0(sizeof(*entry));
3930         entry->value = info;
3931         entry->next = list;
3932         list = entry;
3933     }
3934 
3935     return list;
3936 }
3937 
3938 BlockDriverState *bdrv_lookup_bs(const char *device,
3939                                  const char *node_name,
3940                                  Error **errp)
3941 {
3942     BlockBackend *blk;
3943     BlockDriverState *bs;
3944 
3945     if (device) {
3946         blk = blk_by_name(device);
3947 
3948         if (blk) {
3949             return blk_bs(blk);
3950         }
3951     }
3952 
3953     if (node_name) {
3954         bs = bdrv_find_node(node_name);
3955 
3956         if (bs) {
3957             return bs;
3958         }
3959     }
3960 
3961     error_setg(errp, "Cannot find device=%s nor node_name=%s",
3962                      device ? device : "",
3963                      node_name ? node_name : "");
3964     return NULL;
3965 }
3966 
3967 /* If 'base' is in the same chain as 'top', return true. Otherwise,
3968  * return false.  If either argument is NULL, return false. */
3969 bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base)
3970 {
3971     while (top && top != base) {
3972         top = top->backing_hd;
3973     }
3974 
3975     return top != NULL;
3976 }
3977 
3978 BlockDriverState *bdrv_next_node(BlockDriverState *bs)
3979 {
3980     if (!bs) {
3981         return QTAILQ_FIRST(&graph_bdrv_states);
3982     }
3983     return QTAILQ_NEXT(bs, node_list);
3984 }
3985 
3986 BlockDriverState *bdrv_next(BlockDriverState *bs)
3987 {
3988     if (!bs) {
3989         return QTAILQ_FIRST(&bdrv_states);
3990     }
3991     return QTAILQ_NEXT(bs, device_list);
3992 }
3993 
3994 const char *bdrv_get_node_name(const BlockDriverState *bs)
3995 {
3996     return bs->node_name;
3997 }
3998 
3999 /* TODO check what callers really want: bs->node_name or blk_name() */
4000 const char *bdrv_get_device_name(const BlockDriverState *bs)
4001 {
4002     return bs->blk ? blk_name(bs->blk) : "";
4003 }
4004 
4005 /* This can be used to identify nodes that might not have a device
4006  * name associated. Since node and device names live in the same
4007  * namespace, the result is unambiguous. The exception is if both are
4008  * absent, then this returns an empty (non-null) string. */
4009 const char *bdrv_get_device_or_node_name(const BlockDriverState *bs)
4010 {
4011     return bs->blk ? blk_name(bs->blk) : bs->node_name;
4012 }
4013 
4014 int bdrv_get_flags(BlockDriverState *bs)
4015 {
4016     return bs->open_flags;
4017 }
4018 
4019 int bdrv_flush_all(void)
4020 {
4021     BlockDriverState *bs = NULL;
4022     int result = 0;
4023 
4024     while ((bs = bdrv_next(bs))) {
4025         AioContext *aio_context = bdrv_get_aio_context(bs);
4026         int ret;
4027 
4028         aio_context_acquire(aio_context);
4029         ret = bdrv_flush(bs);
4030         if (ret < 0 && !result) {
4031             result = ret;
4032         }
4033         aio_context_release(aio_context);
4034     }
4035 
4036     return result;
4037 }
4038 
4039 int bdrv_has_zero_init_1(BlockDriverState *bs)
4040 {
4041     return 1;
4042 }
4043 
4044 int bdrv_has_zero_init(BlockDriverState *bs)
4045 {
4046     assert(bs->drv);
4047 
4048     /* If BS is a copy on write image, it is initialized to
4049        the contents of the base image, which may not be zeroes.  */
4050     if (bs->backing_hd) {
4051         return 0;
4052     }
4053     if (bs->drv->bdrv_has_zero_init) {
4054         return bs->drv->bdrv_has_zero_init(bs);
4055     }
4056 
4057     /* safe default */
4058     return 0;
4059 }
4060 
4061 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
4062 {
4063     BlockDriverInfo bdi;
4064 
4065     if (bs->backing_hd) {
4066         return false;
4067     }
4068 
4069     if (bdrv_get_info(bs, &bdi) == 0) {
4070         return bdi.unallocated_blocks_are_zero;
4071     }
4072 
4073     return false;
4074 }
4075 
4076 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
4077 {
4078     BlockDriverInfo bdi;
4079 
4080     if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
4081         return false;
4082     }
4083 
4084     if (bdrv_get_info(bs, &bdi) == 0) {
4085         return bdi.can_write_zeroes_with_unmap;
4086     }
4087 
4088     return false;
4089 }
4090 
4091 typedef struct BdrvCoGetBlockStatusData {
4092     BlockDriverState *bs;
4093     BlockDriverState *base;
4094     int64_t sector_num;
4095     int nb_sectors;
4096     int *pnum;
4097     int64_t ret;
4098     bool done;
4099 } BdrvCoGetBlockStatusData;
4100 
4101 /*
4102  * Returns the allocation status of the specified sectors.
4103  * Drivers not implementing the functionality are assumed to not support
4104  * backing files, hence all their sectors are reported as allocated.
4105  *
4106  * If 'sector_num' is beyond the end of the disk image the return value is 0
4107  * and 'pnum' is set to 0.
4108  *
4109  * 'pnum' is set to the number of sectors (including and immediately following
4110  * the specified sector) that are known to be in the same
4111  * allocated/unallocated state.
4112  *
4113  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
4114  * beyond the end of the disk image it will be clamped.
4115  */
4116 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
4117                                                      int64_t sector_num,
4118                                                      int nb_sectors, int *pnum)
4119 {
4120     int64_t total_sectors;
4121     int64_t n;
4122     int64_t ret, ret2;
4123 
4124     total_sectors = bdrv_nb_sectors(bs);
4125     if (total_sectors < 0) {
4126         return total_sectors;
4127     }
4128 
4129     if (sector_num >= total_sectors) {
4130         *pnum = 0;
4131         return 0;
4132     }
4133 
4134     n = total_sectors - sector_num;
4135     if (n < nb_sectors) {
4136         nb_sectors = n;
4137     }
4138 
4139     if (!bs->drv->bdrv_co_get_block_status) {
4140         *pnum = nb_sectors;
4141         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
4142         if (bs->drv->protocol_name) {
4143             ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
4144         }
4145         return ret;
4146     }
4147 
4148     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
4149     if (ret < 0) {
4150         *pnum = 0;
4151         return ret;
4152     }
4153 
4154     if (ret & BDRV_BLOCK_RAW) {
4155         assert(ret & BDRV_BLOCK_OFFSET_VALID);
4156         return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4157                                      *pnum, pnum);
4158     }
4159 
4160     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
4161         ret |= BDRV_BLOCK_ALLOCATED;
4162     }
4163 
4164     if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
4165         if (bdrv_unallocated_blocks_are_zero(bs)) {
4166             ret |= BDRV_BLOCK_ZERO;
4167         } else if (bs->backing_hd) {
4168             BlockDriverState *bs2 = bs->backing_hd;
4169             int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
4170             if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
4171                 ret |= BDRV_BLOCK_ZERO;
4172             }
4173         }
4174     }
4175 
4176     if (bs->file &&
4177         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
4178         (ret & BDRV_BLOCK_OFFSET_VALID)) {
4179         int file_pnum;
4180 
4181         ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4182                                         *pnum, &file_pnum);
4183         if (ret2 >= 0) {
4184             /* Ignore errors.  This is just providing extra information, it
4185              * is useful but not necessary.
4186              */
4187             if (!file_pnum) {
4188                 /* !file_pnum indicates an offset at or beyond the EOF; it is
4189                  * perfectly valid for the format block driver to point to such
4190                  * offsets, so catch it and mark everything as zero */
4191                 ret |= BDRV_BLOCK_ZERO;
4192             } else {
4193                 /* Limit request to the range reported by the protocol driver */
4194                 *pnum = file_pnum;
4195                 ret |= (ret2 & BDRV_BLOCK_ZERO);
4196             }
4197         }
4198     }
4199 
4200     return ret;
4201 }
4202 
4203 /* Coroutine wrapper for bdrv_get_block_status() */
4204 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
4205 {
4206     BdrvCoGetBlockStatusData *data = opaque;
4207     BlockDriverState *bs = data->bs;
4208 
4209     data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4210                                          data->pnum);
4211     data->done = true;
4212 }
4213 
4214 /*
4215  * Synchronous wrapper around bdrv_co_get_block_status().
4216  *
4217  * See bdrv_co_get_block_status() for details.
4218  */
4219 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4220                               int nb_sectors, int *pnum)
4221 {
4222     Coroutine *co;
4223     BdrvCoGetBlockStatusData data = {
4224         .bs = bs,
4225         .sector_num = sector_num,
4226         .nb_sectors = nb_sectors,
4227         .pnum = pnum,
4228         .done = false,
4229     };
4230 
4231     if (qemu_in_coroutine()) {
4232         /* Fast-path if already in coroutine context */
4233         bdrv_get_block_status_co_entry(&data);
4234     } else {
4235         AioContext *aio_context = bdrv_get_aio_context(bs);
4236 
4237         co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4238         qemu_coroutine_enter(co, &data);
4239         while (!data.done) {
4240             aio_poll(aio_context, true);
4241         }
4242     }
4243     return data.ret;
4244 }
4245 
4246 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4247                                    int nb_sectors, int *pnum)
4248 {
4249     int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4250     if (ret < 0) {
4251         return ret;
4252     }
4253     return !!(ret & BDRV_BLOCK_ALLOCATED);
4254 }
4255 
4256 /*
4257  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4258  *
4259  * Return true if the given sector is allocated in any image between
4260  * BASE and TOP (inclusive).  BASE can be NULL to check if the given
4261  * sector is allocated in any image of the chain.  Return false otherwise.
4262  *
4263  * 'pnum' is set to the number of sectors (including and immediately following
4264  *  the specified sector) that are known to be in the same
4265  *  allocated/unallocated state.
4266  *
4267  */
4268 int bdrv_is_allocated_above(BlockDriverState *top,
4269                             BlockDriverState *base,
4270                             int64_t sector_num,
4271                             int nb_sectors, int *pnum)
4272 {
4273     BlockDriverState *intermediate;
4274     int ret, n = nb_sectors;
4275 
4276     intermediate = top;
4277     while (intermediate && intermediate != base) {
4278         int pnum_inter;
4279         ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4280                                 &pnum_inter);
4281         if (ret < 0) {
4282             return ret;
4283         } else if (ret) {
4284             *pnum = pnum_inter;
4285             return 1;
4286         }
4287 
4288         /*
4289          * [sector_num, nb_sectors] is unallocated on top but intermediate
4290          * might have
4291          *
4292          * [sector_num+x, nr_sectors] allocated.
4293          */
4294         if (n > pnum_inter &&
4295             (intermediate == top ||
4296              sector_num + pnum_inter < intermediate->total_sectors)) {
4297             n = pnum_inter;
4298         }
4299 
4300         intermediate = intermediate->backing_hd;
4301     }
4302 
4303     *pnum = n;
4304     return 0;
4305 }
4306 
4307 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4308 {
4309     if (bs->backing_hd && bs->backing_hd->encrypted)
4310         return bs->backing_file;
4311     else if (bs->encrypted)
4312         return bs->filename;
4313     else
4314         return NULL;
4315 }
4316 
4317 void bdrv_get_backing_filename(BlockDriverState *bs,
4318                                char *filename, int filename_size)
4319 {
4320     pstrcpy(filename, filename_size, bs->backing_file);
4321 }
4322 
4323 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4324                           const uint8_t *buf, int nb_sectors)
4325 {
4326     BlockDriver *drv = bs->drv;
4327     int ret;
4328 
4329     if (!drv) {
4330         return -ENOMEDIUM;
4331     }
4332     if (!drv->bdrv_write_compressed) {
4333         return -ENOTSUP;
4334     }
4335     ret = bdrv_check_request(bs, sector_num, nb_sectors);
4336     if (ret < 0) {
4337         return ret;
4338     }
4339 
4340     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4341 
4342     return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4343 }
4344 
4345 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4346 {
4347     BlockDriver *drv = bs->drv;
4348     if (!drv)
4349         return -ENOMEDIUM;
4350     if (!drv->bdrv_get_info)
4351         return -ENOTSUP;
4352     memset(bdi, 0, sizeof(*bdi));
4353     return drv->bdrv_get_info(bs, bdi);
4354 }
4355 
4356 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4357 {
4358     BlockDriver *drv = bs->drv;
4359     if (drv && drv->bdrv_get_specific_info) {
4360         return drv->bdrv_get_specific_info(bs);
4361     }
4362     return NULL;
4363 }
4364 
4365 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4366                       int64_t pos, int size)
4367 {
4368     QEMUIOVector qiov;
4369     struct iovec iov = {
4370         .iov_base   = (void *) buf,
4371         .iov_len    = size,
4372     };
4373 
4374     qemu_iovec_init_external(&qiov, &iov, 1);
4375     return bdrv_writev_vmstate(bs, &qiov, pos);
4376 }
4377 
4378 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4379 {
4380     BlockDriver *drv = bs->drv;
4381 
4382     if (!drv) {
4383         return -ENOMEDIUM;
4384     } else if (drv->bdrv_save_vmstate) {
4385         return drv->bdrv_save_vmstate(bs, qiov, pos);
4386     } else if (bs->file) {
4387         return bdrv_writev_vmstate(bs->file, qiov, pos);
4388     }
4389 
4390     return -ENOTSUP;
4391 }
4392 
4393 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4394                       int64_t pos, int size)
4395 {
4396     BlockDriver *drv = bs->drv;
4397     if (!drv)
4398         return -ENOMEDIUM;
4399     if (drv->bdrv_load_vmstate)
4400         return drv->bdrv_load_vmstate(bs, buf, pos, size);
4401     if (bs->file)
4402         return bdrv_load_vmstate(bs->file, buf, pos, size);
4403     return -ENOTSUP;
4404 }
4405 
4406 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4407 {
4408     if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4409         return;
4410     }
4411 
4412     bs->drv->bdrv_debug_event(bs, event);
4413 }
4414 
4415 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4416                           const char *tag)
4417 {
4418     while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4419         bs = bs->file;
4420     }
4421 
4422     if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4423         return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4424     }
4425 
4426     return -ENOTSUP;
4427 }
4428 
4429 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4430 {
4431     while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4432         bs = bs->file;
4433     }
4434 
4435     if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4436         return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4437     }
4438 
4439     return -ENOTSUP;
4440 }
4441 
4442 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4443 {
4444     while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4445         bs = bs->file;
4446     }
4447 
4448     if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4449         return bs->drv->bdrv_debug_resume(bs, tag);
4450     }
4451 
4452     return -ENOTSUP;
4453 }
4454 
4455 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4456 {
4457     while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4458         bs = bs->file;
4459     }
4460 
4461     if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4462         return bs->drv->bdrv_debug_is_suspended(bs, tag);
4463     }
4464 
4465     return false;
4466 }
4467 
4468 int bdrv_is_snapshot(BlockDriverState *bs)
4469 {
4470     return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4471 }
4472 
4473 /* backing_file can either be relative, or absolute, or a protocol.  If it is
4474  * relative, it must be relative to the chain.  So, passing in bs->filename
4475  * from a BDS as backing_file should not be done, as that may be relative to
4476  * the CWD rather than the chain. */
4477 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4478         const char *backing_file)
4479 {
4480     char *filename_full = NULL;
4481     char *backing_file_full = NULL;
4482     char *filename_tmp = NULL;
4483     int is_protocol = 0;
4484     BlockDriverState *curr_bs = NULL;
4485     BlockDriverState *retval = NULL;
4486 
4487     if (!bs || !bs->drv || !backing_file) {
4488         return NULL;
4489     }
4490 
4491     filename_full     = g_malloc(PATH_MAX);
4492     backing_file_full = g_malloc(PATH_MAX);
4493     filename_tmp      = g_malloc(PATH_MAX);
4494 
4495     is_protocol = path_has_protocol(backing_file);
4496 
4497     for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4498 
4499         /* If either of the filename paths is actually a protocol, then
4500          * compare unmodified paths; otherwise make paths relative */
4501         if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4502             if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4503                 retval = curr_bs->backing_hd;
4504                 break;
4505             }
4506         } else {
4507             /* If not an absolute filename path, make it relative to the current
4508              * image's filename path */
4509             path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4510                          backing_file);
4511 
4512             /* We are going to compare absolute pathnames */
4513             if (!realpath(filename_tmp, filename_full)) {
4514                 continue;
4515             }
4516 
4517             /* We need to make sure the backing filename we are comparing against
4518              * is relative to the current image filename (or absolute) */
4519             path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4520                          curr_bs->backing_file);
4521 
4522             if (!realpath(filename_tmp, backing_file_full)) {
4523                 continue;
4524             }
4525 
4526             if (strcmp(backing_file_full, filename_full) == 0) {
4527                 retval = curr_bs->backing_hd;
4528                 break;
4529             }
4530         }
4531     }
4532 
4533     g_free(filename_full);
4534     g_free(backing_file_full);
4535     g_free(filename_tmp);
4536     return retval;
4537 }
4538 
4539 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4540 {
4541     if (!bs->drv) {
4542         return 0;
4543     }
4544 
4545     if (!bs->backing_hd) {
4546         return 0;
4547     }
4548 
4549     return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4550 }
4551 
4552 /**************************************************************/
4553 /* async I/Os */
4554 
4555 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4556                            QEMUIOVector *qiov, int nb_sectors,
4557                            BlockCompletionFunc *cb, void *opaque)
4558 {
4559     trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4560 
4561     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4562                                  cb, opaque, false);
4563 }
4564 
4565 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4566                             QEMUIOVector *qiov, int nb_sectors,
4567                             BlockCompletionFunc *cb, void *opaque)
4568 {
4569     trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4570 
4571     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4572                                  cb, opaque, true);
4573 }
4574 
4575 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4576         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4577         BlockCompletionFunc *cb, void *opaque)
4578 {
4579     trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4580 
4581     return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4582                                  BDRV_REQ_ZERO_WRITE | flags,
4583                                  cb, opaque, true);
4584 }
4585 
4586 
4587 typedef struct MultiwriteCB {
4588     int error;
4589     int num_requests;
4590     int num_callbacks;
4591     struct {
4592         BlockCompletionFunc *cb;
4593         void *opaque;
4594         QEMUIOVector *free_qiov;
4595     } callbacks[];
4596 } MultiwriteCB;
4597 
4598 static void multiwrite_user_cb(MultiwriteCB *mcb)
4599 {
4600     int i;
4601 
4602     for (i = 0; i < mcb->num_callbacks; i++) {
4603         mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4604         if (mcb->callbacks[i].free_qiov) {
4605             qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4606         }
4607         g_free(mcb->callbacks[i].free_qiov);
4608     }
4609 }
4610 
4611 static void multiwrite_cb(void *opaque, int ret)
4612 {
4613     MultiwriteCB *mcb = opaque;
4614 
4615     trace_multiwrite_cb(mcb, ret);
4616 
4617     if (ret < 0 && !mcb->error) {
4618         mcb->error = ret;
4619     }
4620 
4621     mcb->num_requests--;
4622     if (mcb->num_requests == 0) {
4623         multiwrite_user_cb(mcb);
4624         g_free(mcb);
4625     }
4626 }
4627 
4628 static int multiwrite_req_compare(const void *a, const void *b)
4629 {
4630     const BlockRequest *req1 = a, *req2 = b;
4631 
4632     /*
4633      * Note that we can't simply subtract req2->sector from req1->sector
4634      * here as that could overflow the return value.
4635      */
4636     if (req1->sector > req2->sector) {
4637         return 1;
4638     } else if (req1->sector < req2->sector) {
4639         return -1;
4640     } else {
4641         return 0;
4642     }
4643 }
4644 
4645 /*
4646  * Takes a bunch of requests and tries to merge them. Returns the number of
4647  * requests that remain after merging.
4648  */
4649 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4650     int num_reqs, MultiwriteCB *mcb)
4651 {
4652     int i, outidx;
4653 
4654     // Sort requests by start sector
4655     qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4656 
4657     // Check if adjacent requests touch the same clusters. If so, combine them,
4658     // filling up gaps with zero sectors.
4659     outidx = 0;
4660     for (i = 1; i < num_reqs; i++) {
4661         int merge = 0;
4662         int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4663 
4664         // Handle exactly sequential writes and overlapping writes.
4665         if (reqs[i].sector <= oldreq_last) {
4666             merge = 1;
4667         }
4668 
4669         if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4670             merge = 0;
4671         }
4672 
4673         if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
4674             reqs[i].nb_sectors > bs->bl.max_transfer_length) {
4675             merge = 0;
4676         }
4677 
4678         if (merge) {
4679             size_t size;
4680             QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4681             qemu_iovec_init(qiov,
4682                 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4683 
4684             // Add the first request to the merged one. If the requests are
4685             // overlapping, drop the last sectors of the first request.
4686             size = (reqs[i].sector - reqs[outidx].sector) << 9;
4687             qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4688 
4689             // We should need to add any zeros between the two requests
4690             assert (reqs[i].sector <= oldreq_last);
4691 
4692             // Add the second request
4693             qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4694 
4695             // Add tail of first request, if necessary
4696             if (qiov->size < reqs[outidx].qiov->size) {
4697                 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
4698                                   reqs[outidx].qiov->size - qiov->size);
4699             }
4700 
4701             reqs[outidx].nb_sectors = qiov->size >> 9;
4702             reqs[outidx].qiov = qiov;
4703 
4704             mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4705         } else {
4706             outidx++;
4707             reqs[outidx].sector     = reqs[i].sector;
4708             reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4709             reqs[outidx].qiov       = reqs[i].qiov;
4710         }
4711     }
4712 
4713     block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1);
4714 
4715     return outidx + 1;
4716 }
4717 
4718 /*
4719  * Submit multiple AIO write requests at once.
4720  *
4721  * On success, the function returns 0 and all requests in the reqs array have
4722  * been submitted. In error case this function returns -1, and any of the
4723  * requests may or may not be submitted yet. In particular, this means that the
4724  * callback will be called for some of the requests, for others it won't. The
4725  * caller must check the error field of the BlockRequest to wait for the right
4726  * callbacks (if error != 0, no callback will be called).
4727  *
4728  * The implementation may modify the contents of the reqs array, e.g. to merge
4729  * requests. However, the fields opaque and error are left unmodified as they
4730  * are used to signal failure for a single request to the caller.
4731  */
4732 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4733 {
4734     MultiwriteCB *mcb;
4735     int i;
4736 
4737     /* don't submit writes if we don't have a medium */
4738     if (bs->drv == NULL) {
4739         for (i = 0; i < num_reqs; i++) {
4740             reqs[i].error = -ENOMEDIUM;
4741         }
4742         return -1;
4743     }
4744 
4745     if (num_reqs == 0) {
4746         return 0;
4747     }
4748 
4749     // Create MultiwriteCB structure
4750     mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4751     mcb->num_requests = 0;
4752     mcb->num_callbacks = num_reqs;
4753 
4754     for (i = 0; i < num_reqs; i++) {
4755         mcb->callbacks[i].cb = reqs[i].cb;
4756         mcb->callbacks[i].opaque = reqs[i].opaque;
4757     }
4758 
4759     // Check for mergable requests
4760     num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4761 
4762     trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4763 
4764     /* Run the aio requests. */
4765     mcb->num_requests = num_reqs;
4766     for (i = 0; i < num_reqs; i++) {
4767         bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4768                               reqs[i].nb_sectors, reqs[i].flags,
4769                               multiwrite_cb, mcb,
4770                               true);
4771     }
4772 
4773     return 0;
4774 }
4775 
4776 void bdrv_aio_cancel(BlockAIOCB *acb)
4777 {
4778     qemu_aio_ref(acb);
4779     bdrv_aio_cancel_async(acb);
4780     while (acb->refcnt > 1) {
4781         if (acb->aiocb_info->get_aio_context) {
4782             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
4783         } else if (acb->bs) {
4784             aio_poll(bdrv_get_aio_context(acb->bs), true);
4785         } else {
4786             abort();
4787         }
4788     }
4789     qemu_aio_unref(acb);
4790 }
4791 
4792 /* Async version of aio cancel. The caller is not blocked if the acb implements
4793  * cancel_async, otherwise we do nothing and let the request normally complete.
4794  * In either case the completion callback must be called. */
4795 void bdrv_aio_cancel_async(BlockAIOCB *acb)
4796 {
4797     if (acb->aiocb_info->cancel_async) {
4798         acb->aiocb_info->cancel_async(acb);
4799     }
4800 }
4801 
4802 /**************************************************************/
4803 /* async block device emulation */
4804 
4805 typedef struct BlockAIOCBSync {
4806     BlockAIOCB common;
4807     QEMUBH *bh;
4808     int ret;
4809     /* vector translation state */
4810     QEMUIOVector *qiov;
4811     uint8_t *bounce;
4812     int is_write;
4813 } BlockAIOCBSync;
4814 
4815 static const AIOCBInfo bdrv_em_aiocb_info = {
4816     .aiocb_size         = sizeof(BlockAIOCBSync),
4817 };
4818 
4819 static void bdrv_aio_bh_cb(void *opaque)
4820 {
4821     BlockAIOCBSync *acb = opaque;
4822 
4823     if (!acb->is_write && acb->ret >= 0) {
4824         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4825     }
4826     qemu_vfree(acb->bounce);
4827     acb->common.cb(acb->common.opaque, acb->ret);
4828     qemu_bh_delete(acb->bh);
4829     acb->bh = NULL;
4830     qemu_aio_unref(acb);
4831 }
4832 
4833 static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4834                                       int64_t sector_num,
4835                                       QEMUIOVector *qiov,
4836                                       int nb_sectors,
4837                                       BlockCompletionFunc *cb,
4838                                       void *opaque,
4839                                       int is_write)
4840 
4841 {
4842     BlockAIOCBSync *acb;
4843 
4844     acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4845     acb->is_write = is_write;
4846     acb->qiov = qiov;
4847     acb->bounce = qemu_try_blockalign(bs, qiov->size);
4848     acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4849 
4850     if (acb->bounce == NULL) {
4851         acb->ret = -ENOMEM;
4852     } else if (is_write) {
4853         qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4854         acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4855     } else {
4856         acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4857     }
4858 
4859     qemu_bh_schedule(acb->bh);
4860 
4861     return &acb->common;
4862 }
4863 
4864 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4865         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4866         BlockCompletionFunc *cb, void *opaque)
4867 {
4868     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4869 }
4870 
4871 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4872         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4873         BlockCompletionFunc *cb, void *opaque)
4874 {
4875     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4876 }
4877 
4878 
4879 typedef struct BlockAIOCBCoroutine {
4880     BlockAIOCB common;
4881     BlockRequest req;
4882     bool is_write;
4883     bool need_bh;
4884     bool *done;
4885     QEMUBH* bh;
4886 } BlockAIOCBCoroutine;
4887 
4888 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4889     .aiocb_size         = sizeof(BlockAIOCBCoroutine),
4890 };
4891 
4892 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
4893 {
4894     if (!acb->need_bh) {
4895         acb->common.cb(acb->common.opaque, acb->req.error);
4896         qemu_aio_unref(acb);
4897     }
4898 }
4899 
4900 static void bdrv_co_em_bh(void *opaque)
4901 {
4902     BlockAIOCBCoroutine *acb = opaque;
4903 
4904     assert(!acb->need_bh);
4905     qemu_bh_delete(acb->bh);
4906     bdrv_co_complete(acb);
4907 }
4908 
4909 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
4910 {
4911     acb->need_bh = false;
4912     if (acb->req.error != -EINPROGRESS) {
4913         BlockDriverState *bs = acb->common.bs;
4914 
4915         acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4916         qemu_bh_schedule(acb->bh);
4917     }
4918 }
4919 
4920 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4921 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4922 {
4923     BlockAIOCBCoroutine *acb = opaque;
4924     BlockDriverState *bs = acb->common.bs;
4925 
4926     if (!acb->is_write) {
4927         acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4928             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4929     } else {
4930         acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4931             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4932     }
4933 
4934     bdrv_co_complete(acb);
4935 }
4936 
4937 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4938                                          int64_t sector_num,
4939                                          QEMUIOVector *qiov,
4940                                          int nb_sectors,
4941                                          BdrvRequestFlags flags,
4942                                          BlockCompletionFunc *cb,
4943                                          void *opaque,
4944                                          bool is_write)
4945 {
4946     Coroutine *co;
4947     BlockAIOCBCoroutine *acb;
4948 
4949     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4950     acb->need_bh = true;
4951     acb->req.error = -EINPROGRESS;
4952     acb->req.sector = sector_num;
4953     acb->req.nb_sectors = nb_sectors;
4954     acb->req.qiov = qiov;
4955     acb->req.flags = flags;
4956     acb->is_write = is_write;
4957 
4958     co = qemu_coroutine_create(bdrv_co_do_rw);
4959     qemu_coroutine_enter(co, acb);
4960 
4961     bdrv_co_maybe_schedule_bh(acb);
4962     return &acb->common;
4963 }
4964 
4965 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4966 {
4967     BlockAIOCBCoroutine *acb = opaque;
4968     BlockDriverState *bs = acb->common.bs;
4969 
4970     acb->req.error = bdrv_co_flush(bs);
4971     bdrv_co_complete(acb);
4972 }
4973 
4974 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4975         BlockCompletionFunc *cb, void *opaque)
4976 {
4977     trace_bdrv_aio_flush(bs, opaque);
4978 
4979     Coroutine *co;
4980     BlockAIOCBCoroutine *acb;
4981 
4982     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4983     acb->need_bh = true;
4984     acb->req.error = -EINPROGRESS;
4985 
4986     co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4987     qemu_coroutine_enter(co, acb);
4988 
4989     bdrv_co_maybe_schedule_bh(acb);
4990     return &acb->common;
4991 }
4992 
4993 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4994 {
4995     BlockAIOCBCoroutine *acb = opaque;
4996     BlockDriverState *bs = acb->common.bs;
4997 
4998     acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4999     bdrv_co_complete(acb);
5000 }
5001 
5002 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
5003         int64_t sector_num, int nb_sectors,
5004         BlockCompletionFunc *cb, void *opaque)
5005 {
5006     Coroutine *co;
5007     BlockAIOCBCoroutine *acb;
5008 
5009     trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
5010 
5011     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
5012     acb->need_bh = true;
5013     acb->req.error = -EINPROGRESS;
5014     acb->req.sector = sector_num;
5015     acb->req.nb_sectors = nb_sectors;
5016     co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
5017     qemu_coroutine_enter(co, acb);
5018 
5019     bdrv_co_maybe_schedule_bh(acb);
5020     return &acb->common;
5021 }
5022 
5023 void bdrv_init(void)
5024 {
5025     module_call_init(MODULE_INIT_BLOCK);
5026 }
5027 
5028 void bdrv_init_with_whitelist(void)
5029 {
5030     use_bdrv_whitelist = 1;
5031     bdrv_init();
5032 }
5033 
5034 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
5035                    BlockCompletionFunc *cb, void *opaque)
5036 {
5037     BlockAIOCB *acb;
5038 
5039     acb = g_slice_alloc(aiocb_info->aiocb_size);
5040     acb->aiocb_info = aiocb_info;
5041     acb->bs = bs;
5042     acb->cb = cb;
5043     acb->opaque = opaque;
5044     acb->refcnt = 1;
5045     return acb;
5046 }
5047 
5048 void qemu_aio_ref(void *p)
5049 {
5050     BlockAIOCB *acb = p;
5051     acb->refcnt++;
5052 }
5053 
5054 void qemu_aio_unref(void *p)
5055 {
5056     BlockAIOCB *acb = p;
5057     assert(acb->refcnt > 0);
5058     if (--acb->refcnt == 0) {
5059         g_slice_free1(acb->aiocb_info->aiocb_size, acb);
5060     }
5061 }
5062 
5063 /**************************************************************/
5064 /* Coroutine block device emulation */
5065 
5066 typedef struct CoroutineIOCompletion {
5067     Coroutine *coroutine;
5068     int ret;
5069 } CoroutineIOCompletion;
5070 
5071 static void bdrv_co_io_em_complete(void *opaque, int ret)
5072 {
5073     CoroutineIOCompletion *co = opaque;
5074 
5075     co->ret = ret;
5076     qemu_coroutine_enter(co->coroutine, NULL);
5077 }
5078 
5079 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
5080                                       int nb_sectors, QEMUIOVector *iov,
5081                                       bool is_write)
5082 {
5083     CoroutineIOCompletion co = {
5084         .coroutine = qemu_coroutine_self(),
5085     };
5086     BlockAIOCB *acb;
5087 
5088     if (is_write) {
5089         acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
5090                                        bdrv_co_io_em_complete, &co);
5091     } else {
5092         acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
5093                                       bdrv_co_io_em_complete, &co);
5094     }
5095 
5096     trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
5097     if (!acb) {
5098         return -EIO;
5099     }
5100     qemu_coroutine_yield();
5101 
5102     return co.ret;
5103 }
5104 
5105 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
5106                                          int64_t sector_num, int nb_sectors,
5107                                          QEMUIOVector *iov)
5108 {
5109     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
5110 }
5111 
5112 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
5113                                          int64_t sector_num, int nb_sectors,
5114                                          QEMUIOVector *iov)
5115 {
5116     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
5117 }
5118 
5119 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
5120 {
5121     RwCo *rwco = opaque;
5122 
5123     rwco->ret = bdrv_co_flush(rwco->bs);
5124 }
5125 
5126 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
5127 {
5128     int ret;
5129 
5130     if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
5131         return 0;
5132     }
5133 
5134     /* Write back cached data to the OS even with cache=unsafe */
5135     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
5136     if (bs->drv->bdrv_co_flush_to_os) {
5137         ret = bs->drv->bdrv_co_flush_to_os(bs);
5138         if (ret < 0) {
5139             return ret;
5140         }
5141     }
5142 
5143     /* But don't actually force it to the disk with cache=unsafe */
5144     if (bs->open_flags & BDRV_O_NO_FLUSH) {
5145         goto flush_parent;
5146     }
5147 
5148     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
5149     if (bs->drv->bdrv_co_flush_to_disk) {
5150         ret = bs->drv->bdrv_co_flush_to_disk(bs);
5151     } else if (bs->drv->bdrv_aio_flush) {
5152         BlockAIOCB *acb;
5153         CoroutineIOCompletion co = {
5154             .coroutine = qemu_coroutine_self(),
5155         };
5156 
5157         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
5158         if (acb == NULL) {
5159             ret = -EIO;
5160         } else {
5161             qemu_coroutine_yield();
5162             ret = co.ret;
5163         }
5164     } else {
5165         /*
5166          * Some block drivers always operate in either writethrough or unsafe
5167          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
5168          * know how the server works (because the behaviour is hardcoded or
5169          * depends on server-side configuration), so we can't ensure that
5170          * everything is safe on disk. Returning an error doesn't work because
5171          * that would break guests even if the server operates in writethrough
5172          * mode.
5173          *
5174          * Let's hope the user knows what he's doing.
5175          */
5176         ret = 0;
5177     }
5178     if (ret < 0) {
5179         return ret;
5180     }
5181 
5182     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
5183      * in the case of cache=unsafe, so there are no useless flushes.
5184      */
5185 flush_parent:
5186     return bdrv_co_flush(bs->file);
5187 }
5188 
5189 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
5190 {
5191     Error *local_err = NULL;
5192     int ret;
5193 
5194     if (!bs->drv)  {
5195         return;
5196     }
5197 
5198     if (!(bs->open_flags & BDRV_O_INCOMING)) {
5199         return;
5200     }
5201     bs->open_flags &= ~BDRV_O_INCOMING;
5202 
5203     if (bs->drv->bdrv_invalidate_cache) {
5204         bs->drv->bdrv_invalidate_cache(bs, &local_err);
5205     } else if (bs->file) {
5206         bdrv_invalidate_cache(bs->file, &local_err);
5207     }
5208     if (local_err) {
5209         error_propagate(errp, local_err);
5210         return;
5211     }
5212 
5213     ret = refresh_total_sectors(bs, bs->total_sectors);
5214     if (ret < 0) {
5215         error_setg_errno(errp, -ret, "Could not refresh total sector count");
5216         return;
5217     }
5218 }
5219 
5220 void bdrv_invalidate_cache_all(Error **errp)
5221 {
5222     BlockDriverState *bs;
5223     Error *local_err = NULL;
5224 
5225     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5226         AioContext *aio_context = bdrv_get_aio_context(bs);
5227 
5228         aio_context_acquire(aio_context);
5229         bdrv_invalidate_cache(bs, &local_err);
5230         aio_context_release(aio_context);
5231         if (local_err) {
5232             error_propagate(errp, local_err);
5233             return;
5234         }
5235     }
5236 }
5237 
5238 int bdrv_flush(BlockDriverState *bs)
5239 {
5240     Coroutine *co;
5241     RwCo rwco = {
5242         .bs = bs,
5243         .ret = NOT_DONE,
5244     };
5245 
5246     if (qemu_in_coroutine()) {
5247         /* Fast-path if already in coroutine context */
5248         bdrv_flush_co_entry(&rwco);
5249     } else {
5250         AioContext *aio_context = bdrv_get_aio_context(bs);
5251 
5252         co = qemu_coroutine_create(bdrv_flush_co_entry);
5253         qemu_coroutine_enter(co, &rwco);
5254         while (rwco.ret == NOT_DONE) {
5255             aio_poll(aio_context, true);
5256         }
5257     }
5258 
5259     return rwco.ret;
5260 }
5261 
5262 typedef struct DiscardCo {
5263     BlockDriverState *bs;
5264     int64_t sector_num;
5265     int nb_sectors;
5266     int ret;
5267 } DiscardCo;
5268 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5269 {
5270     DiscardCo *rwco = opaque;
5271 
5272     rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5273 }
5274 
5275 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5276                                  int nb_sectors)
5277 {
5278     int max_discard, ret;
5279 
5280     if (!bs->drv) {
5281         return -ENOMEDIUM;
5282     }
5283 
5284     ret = bdrv_check_request(bs, sector_num, nb_sectors);
5285     if (ret < 0) {
5286         return ret;
5287     } else if (bs->read_only) {
5288         return -EROFS;
5289     }
5290 
5291     bdrv_reset_dirty(bs, sector_num, nb_sectors);
5292 
5293     /* Do nothing if disabled.  */
5294     if (!(bs->open_flags & BDRV_O_UNMAP)) {
5295         return 0;
5296     }
5297 
5298     if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5299         return 0;
5300     }
5301 
5302     max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
5303     while (nb_sectors > 0) {
5304         int ret;
5305         int num = nb_sectors;
5306 
5307         /* align request */
5308         if (bs->bl.discard_alignment &&
5309             num >= bs->bl.discard_alignment &&
5310             sector_num % bs->bl.discard_alignment) {
5311             if (num > bs->bl.discard_alignment) {
5312                 num = bs->bl.discard_alignment;
5313             }
5314             num -= sector_num % bs->bl.discard_alignment;
5315         }
5316 
5317         /* limit request size */
5318         if (num > max_discard) {
5319             num = max_discard;
5320         }
5321 
5322         if (bs->drv->bdrv_co_discard) {
5323             ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5324         } else {
5325             BlockAIOCB *acb;
5326             CoroutineIOCompletion co = {
5327                 .coroutine = qemu_coroutine_self(),
5328             };
5329 
5330             acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5331                                             bdrv_co_io_em_complete, &co);
5332             if (acb == NULL) {
5333                 return -EIO;
5334             } else {
5335                 qemu_coroutine_yield();
5336                 ret = co.ret;
5337             }
5338         }
5339         if (ret && ret != -ENOTSUP) {
5340             return ret;
5341         }
5342 
5343         sector_num += num;
5344         nb_sectors -= num;
5345     }
5346     return 0;
5347 }
5348 
5349 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5350 {
5351     Coroutine *co;
5352     DiscardCo rwco = {
5353         .bs = bs,
5354         .sector_num = sector_num,
5355         .nb_sectors = nb_sectors,
5356         .ret = NOT_DONE,
5357     };
5358 
5359     if (qemu_in_coroutine()) {
5360         /* Fast-path if already in coroutine context */
5361         bdrv_discard_co_entry(&rwco);
5362     } else {
5363         AioContext *aio_context = bdrv_get_aio_context(bs);
5364 
5365         co = qemu_coroutine_create(bdrv_discard_co_entry);
5366         qemu_coroutine_enter(co, &rwco);
5367         while (rwco.ret == NOT_DONE) {
5368             aio_poll(aio_context, true);
5369         }
5370     }
5371 
5372     return rwco.ret;
5373 }
5374 
5375 /**************************************************************/
5376 /* removable device support */
5377 
5378 /**
5379  * Return TRUE if the media is present
5380  */
5381 int bdrv_is_inserted(BlockDriverState *bs)
5382 {
5383     BlockDriver *drv = bs->drv;
5384 
5385     if (!drv)
5386         return 0;
5387     if (!drv->bdrv_is_inserted)
5388         return 1;
5389     return drv->bdrv_is_inserted(bs);
5390 }
5391 
5392 /**
5393  * Return whether the media changed since the last call to this
5394  * function, or -ENOTSUP if we don't know.  Most drivers don't know.
5395  */
5396 int bdrv_media_changed(BlockDriverState *bs)
5397 {
5398     BlockDriver *drv = bs->drv;
5399 
5400     if (drv && drv->bdrv_media_changed) {
5401         return drv->bdrv_media_changed(bs);
5402     }
5403     return -ENOTSUP;
5404 }
5405 
5406 /**
5407  * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5408  */
5409 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5410 {
5411     BlockDriver *drv = bs->drv;
5412     const char *device_name;
5413 
5414     if (drv && drv->bdrv_eject) {
5415         drv->bdrv_eject(bs, eject_flag);
5416     }
5417 
5418     device_name = bdrv_get_device_name(bs);
5419     if (device_name[0] != '\0') {
5420         qapi_event_send_device_tray_moved(device_name,
5421                                           eject_flag, &error_abort);
5422     }
5423 }
5424 
5425 /**
5426  * Lock or unlock the media (if it is locked, the user won't be able
5427  * to eject it manually).
5428  */
5429 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5430 {
5431     BlockDriver *drv = bs->drv;
5432 
5433     trace_bdrv_lock_medium(bs, locked);
5434 
5435     if (drv && drv->bdrv_lock_medium) {
5436         drv->bdrv_lock_medium(bs, locked);
5437     }
5438 }
5439 
5440 /* needed for generic scsi interface */
5441 
5442 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5443 {
5444     BlockDriver *drv = bs->drv;
5445 
5446     if (drv && drv->bdrv_ioctl)
5447         return drv->bdrv_ioctl(bs, req, buf);
5448     return -ENOTSUP;
5449 }
5450 
5451 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5452         unsigned long int req, void *buf,
5453         BlockCompletionFunc *cb, void *opaque)
5454 {
5455     BlockDriver *drv = bs->drv;
5456 
5457     if (drv && drv->bdrv_aio_ioctl)
5458         return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5459     return NULL;
5460 }
5461 
5462 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5463 {
5464     bs->guest_block_size = align;
5465 }
5466 
5467 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5468 {
5469     return qemu_memalign(bdrv_opt_mem_align(bs), size);
5470 }
5471 
5472 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
5473 {
5474     return memset(qemu_blockalign(bs, size), 0, size);
5475 }
5476 
5477 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
5478 {
5479     size_t align = bdrv_opt_mem_align(bs);
5480 
5481     /* Ensure that NULL is never returned on success */
5482     assert(align > 0);
5483     if (size == 0) {
5484         size = align;
5485     }
5486 
5487     return qemu_try_memalign(align, size);
5488 }
5489 
5490 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
5491 {
5492     void *mem = qemu_try_blockalign(bs, size);
5493 
5494     if (mem) {
5495         memset(mem, 0, size);
5496     }
5497 
5498     return mem;
5499 }
5500 
5501 /*
5502  * Check if all memory in this vector is sector aligned.
5503  */
5504 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5505 {
5506     int i;
5507     size_t alignment = bdrv_opt_mem_align(bs);
5508 
5509     for (i = 0; i < qiov->niov; i++) {
5510         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5511             return false;
5512         }
5513         if (qiov->iov[i].iov_len % alignment) {
5514             return false;
5515         }
5516     }
5517 
5518     return true;
5519 }
5520 
5521 BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs, const char *name)
5522 {
5523     BdrvDirtyBitmap *bm;
5524 
5525     assert(name);
5526     QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5527         if (bm->name && !strcmp(name, bm->name)) {
5528             return bm;
5529         }
5530     }
5531     return NULL;
5532 }
5533 
5534 void bdrv_dirty_bitmap_make_anon(BdrvDirtyBitmap *bitmap)
5535 {
5536     assert(!bdrv_dirty_bitmap_frozen(bitmap));
5537     g_free(bitmap->name);
5538     bitmap->name = NULL;
5539 }
5540 
5541 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs,
5542                                           uint32_t granularity,
5543                                           const char *name,
5544                                           Error **errp)
5545 {
5546     int64_t bitmap_size;
5547     BdrvDirtyBitmap *bitmap;
5548     uint32_t sector_granularity;
5549 
5550     assert((granularity & (granularity - 1)) == 0);
5551 
5552     if (name && bdrv_find_dirty_bitmap(bs, name)) {
5553         error_setg(errp, "Bitmap already exists: %s", name);
5554         return NULL;
5555     }
5556     sector_granularity = granularity >> BDRV_SECTOR_BITS;
5557     assert(sector_granularity);
5558     bitmap_size = bdrv_nb_sectors(bs);
5559     if (bitmap_size < 0) {
5560         error_setg_errno(errp, -bitmap_size, "could not get length of device");
5561         errno = -bitmap_size;
5562         return NULL;
5563     }
5564     bitmap = g_new0(BdrvDirtyBitmap, 1);
5565     bitmap->bitmap = hbitmap_alloc(bitmap_size, ctz32(sector_granularity));
5566     bitmap->size = bitmap_size;
5567     bitmap->name = g_strdup(name);
5568     bitmap->disabled = false;
5569     QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5570     return bitmap;
5571 }
5572 
5573 bool bdrv_dirty_bitmap_frozen(BdrvDirtyBitmap *bitmap)
5574 {
5575     return bitmap->successor;
5576 }
5577 
5578 bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap)
5579 {
5580     return !(bitmap->disabled || bitmap->successor);
5581 }
5582 
5583 /**
5584  * Create a successor bitmap destined to replace this bitmap after an operation.
5585  * Requires that the bitmap is not frozen and has no successor.
5586  */
5587 int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs,
5588                                        BdrvDirtyBitmap *bitmap, Error **errp)
5589 {
5590     uint64_t granularity;
5591     BdrvDirtyBitmap *child;
5592 
5593     if (bdrv_dirty_bitmap_frozen(bitmap)) {
5594         error_setg(errp, "Cannot create a successor for a bitmap that is "
5595                    "currently frozen");
5596         return -1;
5597     }
5598     assert(!bitmap->successor);
5599 
5600     /* Create an anonymous successor */
5601     granularity = bdrv_dirty_bitmap_granularity(bitmap);
5602     child = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
5603     if (!child) {
5604         return -1;
5605     }
5606 
5607     /* Successor will be on or off based on our current state. */
5608     child->disabled = bitmap->disabled;
5609 
5610     /* Install the successor and freeze the parent */
5611     bitmap->successor = child;
5612     return 0;
5613 }
5614 
5615 /**
5616  * For a bitmap with a successor, yield our name to the successor,
5617  * delete the old bitmap, and return a handle to the new bitmap.
5618  */
5619 BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs,
5620                                             BdrvDirtyBitmap *bitmap,
5621                                             Error **errp)
5622 {
5623     char *name;
5624     BdrvDirtyBitmap *successor = bitmap->successor;
5625 
5626     if (successor == NULL) {
5627         error_setg(errp, "Cannot relinquish control if "
5628                    "there's no successor present");
5629         return NULL;
5630     }
5631 
5632     name = bitmap->name;
5633     bitmap->name = NULL;
5634     successor->name = name;
5635     bitmap->successor = NULL;
5636     bdrv_release_dirty_bitmap(bs, bitmap);
5637 
5638     return successor;
5639 }
5640 
5641 /**
5642  * In cases of failure where we can no longer safely delete the parent,
5643  * we may wish to re-join the parent and child/successor.
5644  * The merged parent will be un-frozen, but not explicitly re-enabled.
5645  */
5646 BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs,
5647                                            BdrvDirtyBitmap *parent,
5648                                            Error **errp)
5649 {
5650     BdrvDirtyBitmap *successor = parent->successor;
5651 
5652     if (!successor) {
5653         error_setg(errp, "Cannot reclaim a successor when none is present");
5654         return NULL;
5655     }
5656 
5657     if (!hbitmap_merge(parent->bitmap, successor->bitmap)) {
5658         error_setg(errp, "Merging of parent and successor bitmap failed");
5659         return NULL;
5660     }
5661     bdrv_release_dirty_bitmap(bs, successor);
5662     parent->successor = NULL;
5663 
5664     return parent;
5665 }
5666 
5667 /**
5668  * Truncates _all_ bitmaps attached to a BDS.
5669  */
5670 static void bdrv_dirty_bitmap_truncate(BlockDriverState *bs)
5671 {
5672     BdrvDirtyBitmap *bitmap;
5673     uint64_t size = bdrv_nb_sectors(bs);
5674 
5675     QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5676         if (bdrv_dirty_bitmap_frozen(bitmap)) {
5677             continue;
5678         }
5679         hbitmap_truncate(bitmap->bitmap, size);
5680     }
5681 }
5682 
5683 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5684 {
5685     BdrvDirtyBitmap *bm, *next;
5686     QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5687         if (bm == bitmap) {
5688             assert(!bdrv_dirty_bitmap_frozen(bm));
5689             QLIST_REMOVE(bitmap, list);
5690             hbitmap_free(bitmap->bitmap);
5691             g_free(bitmap->name);
5692             g_free(bitmap);
5693             return;
5694         }
5695     }
5696 }
5697 
5698 void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap)
5699 {
5700     assert(!bdrv_dirty_bitmap_frozen(bitmap));
5701     bitmap->disabled = true;
5702 }
5703 
5704 void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap)
5705 {
5706     assert(!bdrv_dirty_bitmap_frozen(bitmap));
5707     bitmap->disabled = false;
5708 }
5709 
5710 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5711 {
5712     BdrvDirtyBitmap *bm;
5713     BlockDirtyInfoList *list = NULL;
5714     BlockDirtyInfoList **plist = &list;
5715 
5716     QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5717         BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1);
5718         BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1);
5719         info->count = bdrv_get_dirty_count(bm);
5720         info->granularity = bdrv_dirty_bitmap_granularity(bm);
5721         info->has_name = !!bm->name;
5722         info->name = g_strdup(bm->name);
5723         info->frozen = bdrv_dirty_bitmap_frozen(bm);
5724         entry->value = info;
5725         *plist = entry;
5726         plist = &entry->next;
5727     }
5728 
5729     return list;
5730 }
5731 
5732 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5733 {
5734     if (bitmap) {
5735         return hbitmap_get(bitmap->bitmap, sector);
5736     } else {
5737         return 0;
5738     }
5739 }
5740 
5741 /**
5742  * Chooses a default granularity based on the existing cluster size,
5743  * but clamped between [4K, 64K]. Defaults to 64K in the case that there
5744  * is no cluster size information available.
5745  */
5746 uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs)
5747 {
5748     BlockDriverInfo bdi;
5749     uint32_t granularity;
5750 
5751     if (bdrv_get_info(bs, &bdi) >= 0 && bdi.cluster_size > 0) {
5752         granularity = MAX(4096, bdi.cluster_size);
5753         granularity = MIN(65536, granularity);
5754     } else {
5755         granularity = 65536;
5756     }
5757 
5758     return granularity;
5759 }
5760 
5761 uint32_t bdrv_dirty_bitmap_granularity(BdrvDirtyBitmap *bitmap)
5762 {
5763     return BDRV_SECTOR_SIZE << hbitmap_granularity(bitmap->bitmap);
5764 }
5765 
5766 void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5767 {
5768     hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5769 }
5770 
5771 void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap,
5772                            int64_t cur_sector, int nr_sectors)
5773 {
5774     assert(bdrv_dirty_bitmap_enabled(bitmap));
5775     hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5776 }
5777 
5778 void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap,
5779                              int64_t cur_sector, int nr_sectors)
5780 {
5781     assert(bdrv_dirty_bitmap_enabled(bitmap));
5782     hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5783 }
5784 
5785 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap)
5786 {
5787     assert(bdrv_dirty_bitmap_enabled(bitmap));
5788     hbitmap_reset(bitmap->bitmap, 0, bitmap->size);
5789 }
5790 
5791 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5792                     int nr_sectors)
5793 {
5794     BdrvDirtyBitmap *bitmap;
5795     QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5796         if (!bdrv_dirty_bitmap_enabled(bitmap)) {
5797             continue;
5798         }
5799         hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5800     }
5801 }
5802 
5803 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
5804                       int nr_sectors)
5805 {
5806     BdrvDirtyBitmap *bitmap;
5807     QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5808         if (!bdrv_dirty_bitmap_enabled(bitmap)) {
5809             continue;
5810         }
5811         hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5812     }
5813 }
5814 
5815 /**
5816  * Advance an HBitmapIter to an arbitrary offset.
5817  */
5818 void bdrv_set_dirty_iter(HBitmapIter *hbi, int64_t offset)
5819 {
5820     assert(hbi->hb);
5821     hbitmap_iter_init(hbi, hbi->hb, offset);
5822 }
5823 
5824 int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap)
5825 {
5826     return hbitmap_count(bitmap->bitmap);
5827 }
5828 
5829 /* Get a reference to bs */
5830 void bdrv_ref(BlockDriverState *bs)
5831 {
5832     bs->refcnt++;
5833 }
5834 
5835 /* Release a previously grabbed reference to bs.
5836  * If after releasing, reference count is zero, the BlockDriverState is
5837  * deleted. */
5838 void bdrv_unref(BlockDriverState *bs)
5839 {
5840     if (!bs) {
5841         return;
5842     }
5843     assert(bs->refcnt > 0);
5844     if (--bs->refcnt == 0) {
5845         bdrv_delete(bs);
5846     }
5847 }
5848 
5849 struct BdrvOpBlocker {
5850     Error *reason;
5851     QLIST_ENTRY(BdrvOpBlocker) list;
5852 };
5853 
5854 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5855 {
5856     BdrvOpBlocker *blocker;
5857     assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5858     if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5859         blocker = QLIST_FIRST(&bs->op_blockers[op]);
5860         if (errp) {
5861             error_setg(errp, "Node '%s' is busy: %s",
5862                        bdrv_get_device_or_node_name(bs),
5863                        error_get_pretty(blocker->reason));
5864         }
5865         return true;
5866     }
5867     return false;
5868 }
5869 
5870 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5871 {
5872     BdrvOpBlocker *blocker;
5873     assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5874 
5875     blocker = g_new0(BdrvOpBlocker, 1);
5876     blocker->reason = reason;
5877     QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5878 }
5879 
5880 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5881 {
5882     BdrvOpBlocker *blocker, *next;
5883     assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5884     QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5885         if (blocker->reason == reason) {
5886             QLIST_REMOVE(blocker, list);
5887             g_free(blocker);
5888         }
5889     }
5890 }
5891 
5892 void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5893 {
5894     int i;
5895     for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5896         bdrv_op_block(bs, i, reason);
5897     }
5898 }
5899 
5900 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5901 {
5902     int i;
5903     for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5904         bdrv_op_unblock(bs, i, reason);
5905     }
5906 }
5907 
5908 bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5909 {
5910     int i;
5911 
5912     for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5913         if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5914             return false;
5915         }
5916     }
5917     return true;
5918 }
5919 
5920 void bdrv_iostatus_enable(BlockDriverState *bs)
5921 {
5922     bs->iostatus_enabled = true;
5923     bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5924 }
5925 
5926 /* The I/O status is only enabled if the drive explicitly
5927  * enables it _and_ the VM is configured to stop on errors */
5928 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5929 {
5930     return (bs->iostatus_enabled &&
5931            (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5932             bs->on_write_error == BLOCKDEV_ON_ERROR_STOP   ||
5933             bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5934 }
5935 
5936 void bdrv_iostatus_disable(BlockDriverState *bs)
5937 {
5938     bs->iostatus_enabled = false;
5939 }
5940 
5941 void bdrv_iostatus_reset(BlockDriverState *bs)
5942 {
5943     if (bdrv_iostatus_is_enabled(bs)) {
5944         bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5945         if (bs->job) {
5946             block_job_iostatus_reset(bs->job);
5947         }
5948     }
5949 }
5950 
5951 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5952 {
5953     assert(bdrv_iostatus_is_enabled(bs));
5954     if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5955         bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5956                                          BLOCK_DEVICE_IO_STATUS_FAILED;
5957     }
5958 }
5959 
5960 void bdrv_img_create(const char *filename, const char *fmt,
5961                      const char *base_filename, const char *base_fmt,
5962                      char *options, uint64_t img_size, int flags,
5963                      Error **errp, bool quiet)
5964 {
5965     QemuOptsList *create_opts = NULL;
5966     QemuOpts *opts = NULL;
5967     const char *backing_fmt, *backing_file;
5968     int64_t size;
5969     BlockDriver *drv, *proto_drv;
5970     BlockDriver *backing_drv = NULL;
5971     Error *local_err = NULL;
5972     int ret = 0;
5973 
5974     /* Find driver and parse its options */
5975     drv = bdrv_find_format(fmt);
5976     if (!drv) {
5977         error_setg(errp, "Unknown file format '%s'", fmt);
5978         return;
5979     }
5980 
5981     proto_drv = bdrv_find_protocol(filename, true, errp);
5982     if (!proto_drv) {
5983         return;
5984     }
5985 
5986     if (!drv->create_opts) {
5987         error_setg(errp, "Format driver '%s' does not support image creation",
5988                    drv->format_name);
5989         return;
5990     }
5991 
5992     if (!proto_drv->create_opts) {
5993         error_setg(errp, "Protocol driver '%s' does not support image creation",
5994                    proto_drv->format_name);
5995         return;
5996     }
5997 
5998     create_opts = qemu_opts_append(create_opts, drv->create_opts);
5999     create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
6000 
6001     /* Create parameter list with default values */
6002     opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
6003     qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size, &error_abort);
6004 
6005     /* Parse -o options */
6006     if (options) {
6007         qemu_opts_do_parse(opts, options, NULL, &local_err);
6008         if (local_err) {
6009             error_report_err(local_err);
6010             local_err = NULL;
6011             error_setg(errp, "Invalid options for file format '%s'", fmt);
6012             goto out;
6013         }
6014     }
6015 
6016     if (base_filename) {
6017         qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename, &local_err);
6018         if (local_err) {
6019             error_setg(errp, "Backing file not supported for file format '%s'",
6020                        fmt);
6021             goto out;
6022         }
6023     }
6024 
6025     if (base_fmt) {
6026         qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt, &local_err);
6027         if (local_err) {
6028             error_setg(errp, "Backing file format not supported for file "
6029                              "format '%s'", fmt);
6030             goto out;
6031         }
6032     }
6033 
6034     backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
6035     if (backing_file) {
6036         if (!strcmp(filename, backing_file)) {
6037             error_setg(errp, "Error: Trying to create an image with the "
6038                              "same filename as the backing file");
6039             goto out;
6040         }
6041     }
6042 
6043     backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
6044     if (backing_fmt) {
6045         backing_drv = bdrv_find_format(backing_fmt);
6046         if (!backing_drv) {
6047             error_setg(errp, "Unknown backing file format '%s'",
6048                        backing_fmt);
6049             goto out;
6050         }
6051     }
6052 
6053     // The size for the image must always be specified, with one exception:
6054     // If we are using a backing file, we can obtain the size from there
6055     size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
6056     if (size == -1) {
6057         if (backing_file) {
6058             BlockDriverState *bs;
6059             char *full_backing = g_new0(char, PATH_MAX);
6060             int64_t size;
6061             int back_flags;
6062 
6063             bdrv_get_full_backing_filename_from_filename(filename, backing_file,
6064                                                          full_backing, PATH_MAX,
6065                                                          &local_err);
6066             if (local_err) {
6067                 g_free(full_backing);
6068                 goto out;
6069             }
6070 
6071             /* backing files always opened read-only */
6072             back_flags =
6073                 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
6074 
6075             bs = NULL;
6076             ret = bdrv_open(&bs, full_backing, NULL, NULL, back_flags,
6077                             backing_drv, &local_err);
6078             g_free(full_backing);
6079             if (ret < 0) {
6080                 goto out;
6081             }
6082             size = bdrv_getlength(bs);
6083             if (size < 0) {
6084                 error_setg_errno(errp, -size, "Could not get size of '%s'",
6085                                  backing_file);
6086                 bdrv_unref(bs);
6087                 goto out;
6088             }
6089 
6090             qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size, &error_abort);
6091 
6092             bdrv_unref(bs);
6093         } else {
6094             error_setg(errp, "Image creation needs a size parameter");
6095             goto out;
6096         }
6097     }
6098 
6099     if (!quiet) {
6100         printf("Formatting '%s', fmt=%s", filename, fmt);
6101         qemu_opts_print(opts, " ");
6102         puts("");
6103     }
6104 
6105     ret = bdrv_create(drv, filename, opts, &local_err);
6106 
6107     if (ret == -EFBIG) {
6108         /* This is generally a better message than whatever the driver would
6109          * deliver (especially because of the cluster_size_hint), since that
6110          * is most probably not much different from "image too large". */
6111         const char *cluster_size_hint = "";
6112         if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
6113             cluster_size_hint = " (try using a larger cluster size)";
6114         }
6115         error_setg(errp, "The image size is too large for file format '%s'"
6116                    "%s", fmt, cluster_size_hint);
6117         error_free(local_err);
6118         local_err = NULL;
6119     }
6120 
6121 out:
6122     qemu_opts_del(opts);
6123     qemu_opts_free(create_opts);
6124     if (local_err) {
6125         error_propagate(errp, local_err);
6126     }
6127 }
6128 
6129 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
6130 {
6131     return bs->aio_context;
6132 }
6133 
6134 void bdrv_detach_aio_context(BlockDriverState *bs)
6135 {
6136     BdrvAioNotifier *baf;
6137 
6138     if (!bs->drv) {
6139         return;
6140     }
6141 
6142     QLIST_FOREACH(baf, &bs->aio_notifiers, list) {
6143         baf->detach_aio_context(baf->opaque);
6144     }
6145 
6146     if (bs->io_limits_enabled) {
6147         throttle_detach_aio_context(&bs->throttle_state);
6148     }
6149     if (bs->drv->bdrv_detach_aio_context) {
6150         bs->drv->bdrv_detach_aio_context(bs);
6151     }
6152     if (bs->file) {
6153         bdrv_detach_aio_context(bs->file);
6154     }
6155     if (bs->backing_hd) {
6156         bdrv_detach_aio_context(bs->backing_hd);
6157     }
6158 
6159     bs->aio_context = NULL;
6160 }
6161 
6162 void bdrv_attach_aio_context(BlockDriverState *bs,
6163                              AioContext *new_context)
6164 {
6165     BdrvAioNotifier *ban;
6166 
6167     if (!bs->drv) {
6168         return;
6169     }
6170 
6171     bs->aio_context = new_context;
6172 
6173     if (bs->backing_hd) {
6174         bdrv_attach_aio_context(bs->backing_hd, new_context);
6175     }
6176     if (bs->file) {
6177         bdrv_attach_aio_context(bs->file, new_context);
6178     }
6179     if (bs->drv->bdrv_attach_aio_context) {
6180         bs->drv->bdrv_attach_aio_context(bs, new_context);
6181     }
6182     if (bs->io_limits_enabled) {
6183         throttle_attach_aio_context(&bs->throttle_state, new_context);
6184     }
6185 
6186     QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
6187         ban->attached_aio_context(new_context, ban->opaque);
6188     }
6189 }
6190 
6191 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
6192 {
6193     bdrv_drain_all(); /* ensure there are no in-flight requests */
6194 
6195     bdrv_detach_aio_context(bs);
6196 
6197     /* This function executes in the old AioContext so acquire the new one in
6198      * case it runs in a different thread.
6199      */
6200     aio_context_acquire(new_context);
6201     bdrv_attach_aio_context(bs, new_context);
6202     aio_context_release(new_context);
6203 }
6204 
6205 void bdrv_add_aio_context_notifier(BlockDriverState *bs,
6206         void (*attached_aio_context)(AioContext *new_context, void *opaque),
6207         void (*detach_aio_context)(void *opaque), void *opaque)
6208 {
6209     BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1);
6210     *ban = (BdrvAioNotifier){
6211         .attached_aio_context = attached_aio_context,
6212         .detach_aio_context   = detach_aio_context,
6213         .opaque               = opaque
6214     };
6215 
6216     QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list);
6217 }
6218 
6219 void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
6220                                       void (*attached_aio_context)(AioContext *,
6221                                                                    void *),
6222                                       void (*detach_aio_context)(void *),
6223                                       void *opaque)
6224 {
6225     BdrvAioNotifier *ban, *ban_next;
6226 
6227     QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
6228         if (ban->attached_aio_context == attached_aio_context &&
6229             ban->detach_aio_context   == detach_aio_context   &&
6230             ban->opaque               == opaque)
6231         {
6232             QLIST_REMOVE(ban, list);
6233             g_free(ban);
6234 
6235             return;
6236         }
6237     }
6238 
6239     abort();
6240 }
6241 
6242 void bdrv_add_before_write_notifier(BlockDriverState *bs,
6243                                     NotifierWithReturn *notifier)
6244 {
6245     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
6246 }
6247 
6248 int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts,
6249                        BlockDriverAmendStatusCB *status_cb)
6250 {
6251     if (!bs->drv->bdrv_amend_options) {
6252         return -ENOTSUP;
6253     }
6254     return bs->drv->bdrv_amend_options(bs, opts, status_cb);
6255 }
6256 
6257 /* This function will be called by the bdrv_recurse_is_first_non_filter method
6258  * of block filter and by bdrv_is_first_non_filter.
6259  * It is used to test if the given bs is the candidate or recurse more in the
6260  * node graph.
6261  */
6262 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
6263                                       BlockDriverState *candidate)
6264 {
6265     /* return false if basic checks fails */
6266     if (!bs || !bs->drv) {
6267         return false;
6268     }
6269 
6270     /* the code reached a non block filter driver -> check if the bs is
6271      * the same as the candidate. It's the recursion termination condition.
6272      */
6273     if (!bs->drv->is_filter) {
6274         return bs == candidate;
6275     }
6276     /* Down this path the driver is a block filter driver */
6277 
6278     /* If the block filter recursion method is defined use it to recurse down
6279      * the node graph.
6280      */
6281     if (bs->drv->bdrv_recurse_is_first_non_filter) {
6282         return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
6283     }
6284 
6285     /* the driver is a block filter but don't allow to recurse -> return false
6286      */
6287     return false;
6288 }
6289 
6290 /* This function checks if the candidate is the first non filter bs down it's
6291  * bs chain. Since we don't have pointers to parents it explore all bs chains
6292  * from the top. Some filters can choose not to pass down the recursion.
6293  */
6294 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
6295 {
6296     BlockDriverState *bs;
6297 
6298     /* walk down the bs forest recursively */
6299     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
6300         bool perm;
6301 
6302         /* try to recurse in this top level bs */
6303         perm = bdrv_recurse_is_first_non_filter(bs, candidate);
6304 
6305         /* candidate is the first non filter */
6306         if (perm) {
6307             return true;
6308         }
6309     }
6310 
6311     return false;
6312 }
6313 
6314 BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
6315 {
6316     BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
6317     AioContext *aio_context;
6318 
6319     if (!to_replace_bs) {
6320         error_setg(errp, "Node name '%s' not found", node_name);
6321         return NULL;
6322     }
6323 
6324     aio_context = bdrv_get_aio_context(to_replace_bs);
6325     aio_context_acquire(aio_context);
6326 
6327     if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
6328         to_replace_bs = NULL;
6329         goto out;
6330     }
6331 
6332     /* We don't want arbitrary node of the BDS chain to be replaced only the top
6333      * most non filter in order to prevent data corruption.
6334      * Another benefit is that this tests exclude backing files which are
6335      * blocked by the backing blockers.
6336      */
6337     if (!bdrv_is_first_non_filter(to_replace_bs)) {
6338         error_setg(errp, "Only top most non filter can be replaced");
6339         to_replace_bs = NULL;
6340         goto out;
6341     }
6342 
6343 out:
6344     aio_context_release(aio_context);
6345     return to_replace_bs;
6346 }
6347 
6348 void bdrv_io_plug(BlockDriverState *bs)
6349 {
6350     BlockDriver *drv = bs->drv;
6351     if (drv && drv->bdrv_io_plug) {
6352         drv->bdrv_io_plug(bs);
6353     } else if (bs->file) {
6354         bdrv_io_plug(bs->file);
6355     }
6356 }
6357 
6358 void bdrv_io_unplug(BlockDriverState *bs)
6359 {
6360     BlockDriver *drv = bs->drv;
6361     if (drv && drv->bdrv_io_unplug) {
6362         drv->bdrv_io_unplug(bs);
6363     } else if (bs->file) {
6364         bdrv_io_unplug(bs->file);
6365     }
6366 }
6367 
6368 void bdrv_flush_io_queue(BlockDriverState *bs)
6369 {
6370     BlockDriver *drv = bs->drv;
6371     if (drv && drv->bdrv_flush_io_queue) {
6372         drv->bdrv_flush_io_queue(bs);
6373     } else if (bs->file) {
6374         bdrv_flush_io_queue(bs->file);
6375     }
6376 }
6377 
6378 static bool append_open_options(QDict *d, BlockDriverState *bs)
6379 {
6380     const QDictEntry *entry;
6381     bool found_any = false;
6382 
6383     for (entry = qdict_first(bs->options); entry;
6384          entry = qdict_next(bs->options, entry))
6385     {
6386         /* Only take options for this level and exclude all non-driver-specific
6387          * options */
6388         if (!strchr(qdict_entry_key(entry), '.') &&
6389             strcmp(qdict_entry_key(entry), "node-name"))
6390         {
6391             qobject_incref(qdict_entry_value(entry));
6392             qdict_put_obj(d, qdict_entry_key(entry), qdict_entry_value(entry));
6393             found_any = true;
6394         }
6395     }
6396 
6397     return found_any;
6398 }
6399 
6400 /* Updates the following BDS fields:
6401  *  - exact_filename: A filename which may be used for opening a block device
6402  *                    which (mostly) equals the given BDS (even without any
6403  *                    other options; so reading and writing must return the same
6404  *                    results, but caching etc. may be different)
6405  *  - full_open_options: Options which, when given when opening a block device
6406  *                       (without a filename), result in a BDS (mostly)
6407  *                       equalling the given one
6408  *  - filename: If exact_filename is set, it is copied here. Otherwise,
6409  *              full_open_options is converted to a JSON object, prefixed with
6410  *              "json:" (for use through the JSON pseudo protocol) and put here.
6411  */
6412 void bdrv_refresh_filename(BlockDriverState *bs)
6413 {
6414     BlockDriver *drv = bs->drv;
6415     QDict *opts;
6416 
6417     if (!drv) {
6418         return;
6419     }
6420 
6421     /* This BDS's file name will most probably depend on its file's name, so
6422      * refresh that first */
6423     if (bs->file) {
6424         bdrv_refresh_filename(bs->file);
6425     }
6426 
6427     if (drv->bdrv_refresh_filename) {
6428         /* Obsolete information is of no use here, so drop the old file name
6429          * information before refreshing it */
6430         bs->exact_filename[0] = '\0';
6431         if (bs->full_open_options) {
6432             QDECREF(bs->full_open_options);
6433             bs->full_open_options = NULL;
6434         }
6435 
6436         drv->bdrv_refresh_filename(bs);
6437     } else if (bs->file) {
6438         /* Try to reconstruct valid information from the underlying file */
6439         bool has_open_options;
6440 
6441         bs->exact_filename[0] = '\0';
6442         if (bs->full_open_options) {
6443             QDECREF(bs->full_open_options);
6444             bs->full_open_options = NULL;
6445         }
6446 
6447         opts = qdict_new();
6448         has_open_options = append_open_options(opts, bs);
6449 
6450         /* If no specific options have been given for this BDS, the filename of
6451          * the underlying file should suffice for this one as well */
6452         if (bs->file->exact_filename[0] && !has_open_options) {
6453             strcpy(bs->exact_filename, bs->file->exact_filename);
6454         }
6455         /* Reconstructing the full options QDict is simple for most format block
6456          * drivers, as long as the full options are known for the underlying
6457          * file BDS. The full options QDict of that file BDS should somehow
6458          * contain a representation of the filename, therefore the following
6459          * suffices without querying the (exact_)filename of this BDS. */
6460         if (bs->file->full_open_options) {
6461             qdict_put_obj(opts, "driver",
6462                           QOBJECT(qstring_from_str(drv->format_name)));
6463             QINCREF(bs->file->full_open_options);
6464             qdict_put_obj(opts, "file", QOBJECT(bs->file->full_open_options));
6465 
6466             bs->full_open_options = opts;
6467         } else {
6468             QDECREF(opts);
6469         }
6470     } else if (!bs->full_open_options && qdict_size(bs->options)) {
6471         /* There is no underlying file BDS (at least referenced by BDS.file),
6472          * so the full options QDict should be equal to the options given
6473          * specifically for this block device when it was opened (plus the
6474          * driver specification).
6475          * Because those options don't change, there is no need to update
6476          * full_open_options when it's already set. */
6477 
6478         opts = qdict_new();
6479         append_open_options(opts, bs);
6480         qdict_put_obj(opts, "driver",
6481                       QOBJECT(qstring_from_str(drv->format_name)));
6482 
6483         if (bs->exact_filename[0]) {
6484             /* This may not work for all block protocol drivers (some may
6485              * require this filename to be parsed), but we have to find some
6486              * default solution here, so just include it. If some block driver
6487              * does not support pure options without any filename at all or
6488              * needs some special format of the options QDict, it needs to
6489              * implement the driver-specific bdrv_refresh_filename() function.
6490              */
6491             qdict_put_obj(opts, "filename",
6492                           QOBJECT(qstring_from_str(bs->exact_filename)));
6493         }
6494 
6495         bs->full_open_options = opts;
6496     }
6497 
6498     if (bs->exact_filename[0]) {
6499         pstrcpy(bs->filename, sizeof(bs->filename), bs->exact_filename);
6500     } else if (bs->full_open_options) {
6501         QString *json = qobject_to_json(QOBJECT(bs->full_open_options));
6502         snprintf(bs->filename, sizeof(bs->filename), "json:%s",
6503                  qstring_get_str(json));
6504         QDECREF(json);
6505     }
6506 }
6507 
6508 /* This accessor function purpose is to allow the device models to access the
6509  * BlockAcctStats structure embedded inside a BlockDriverState without being
6510  * aware of the BlockDriverState structure layout.
6511  * It will go away when the BlockAcctStats structure will be moved inside
6512  * the device models.
6513  */
6514 BlockAcctStats *bdrv_get_stats(BlockDriverState *bs)
6515 {
6516     return &bs->stats;
6517 }
6518