xref: /openbmc/qemu/block.c (revision a06e4355)
1 /*
2  * QEMU System Emulator block driver
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "block/block_int.h"
28 #include "block/blockjob.h"
29 #include "qemu/module.h"
30 #include "qapi/qmp/qjson.h"
31 #include "sysemu/block-backend.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
38 #include "qapi-event.h"
39 
40 #ifdef CONFIG_BSD
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <sys/ioctl.h>
44 #include <sys/queue.h>
45 #ifndef __DragonFly__
46 #include <sys/disk.h>
47 #endif
48 #endif
49 
50 #ifdef _WIN32
51 #include <windows.h>
52 #endif
53 
54 struct BdrvDirtyBitmap {
55     HBitmap *bitmap;
56     QLIST_ENTRY(BdrvDirtyBitmap) list;
57 };
58 
59 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60 
61 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63         BlockCompletionFunc *cb, void *opaque);
64 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66         BlockCompletionFunc *cb, void *opaque);
67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68                                          int64_t sector_num, int nb_sectors,
69                                          QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71                                          int64_t sector_num, int nb_sectors,
72                                          QEMUIOVector *iov);
73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75     BdrvRequestFlags flags);
76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78     BdrvRequestFlags flags);
79 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80                                          int64_t sector_num,
81                                          QEMUIOVector *qiov,
82                                          int nb_sectors,
83                                          BdrvRequestFlags flags,
84                                          BlockCompletionFunc *cb,
85                                          void *opaque,
86                                          bool is_write);
87 static void coroutine_fn bdrv_co_do_rw(void *opaque);
88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
90 
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92     QTAILQ_HEAD_INITIALIZER(bdrv_states);
93 
94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95     QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96 
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98     QLIST_HEAD_INITIALIZER(bdrv_drivers);
99 
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
102 
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
105 {
106     return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107              (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108             filename[1] == ':');
109 }
110 
111 int is_windows_drive(const char *filename)
112 {
113     if (is_windows_drive_prefix(filename) &&
114         filename[2] == '\0')
115         return 1;
116     if (strstart(filename, "\\\\.\\", NULL) ||
117         strstart(filename, "//./", NULL))
118         return 1;
119     return 0;
120 }
121 #endif
122 
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState *bs,
125                         ThrottleConfig *cfg)
126 {
127     int i;
128 
129     throttle_config(&bs->throttle_state, cfg);
130 
131     for (i = 0; i < 2; i++) {
132         qemu_co_enter_next(&bs->throttled_reqs[i]);
133     }
134 }
135 
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
138 {
139     bool drained = false;
140     bool enabled = bs->io_limits_enabled;
141     int i;
142 
143     bs->io_limits_enabled = false;
144 
145     for (i = 0; i < 2; i++) {
146         while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147             drained = true;
148         }
149     }
150 
151     bs->io_limits_enabled = enabled;
152 
153     return drained;
154 }
155 
156 void bdrv_io_limits_disable(BlockDriverState *bs)
157 {
158     bs->io_limits_enabled = false;
159 
160     bdrv_start_throttled_reqs(bs);
161 
162     throttle_destroy(&bs->throttle_state);
163 }
164 
165 static void bdrv_throttle_read_timer_cb(void *opaque)
166 {
167     BlockDriverState *bs = opaque;
168     qemu_co_enter_next(&bs->throttled_reqs[0]);
169 }
170 
171 static void bdrv_throttle_write_timer_cb(void *opaque)
172 {
173     BlockDriverState *bs = opaque;
174     qemu_co_enter_next(&bs->throttled_reqs[1]);
175 }
176 
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState *bs)
179 {
180     assert(!bs->io_limits_enabled);
181     throttle_init(&bs->throttle_state,
182                   bdrv_get_aio_context(bs),
183                   QEMU_CLOCK_VIRTUAL,
184                   bdrv_throttle_read_timer_cb,
185                   bdrv_throttle_write_timer_cb,
186                   bs);
187     bs->io_limits_enabled = true;
188 }
189 
190 /* This function makes an IO wait if needed
191  *
192  * @nb_sectors: the number of sectors of the IO
193  * @is_write:   is the IO a write
194  */
195 static void bdrv_io_limits_intercept(BlockDriverState *bs,
196                                      unsigned int bytes,
197                                      bool is_write)
198 {
199     /* does this io must wait */
200     bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
201 
202     /* if must wait or any request of this type throttled queue the IO */
203     if (must_wait ||
204         !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
205         qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
206     }
207 
208     /* the IO will be executed, do the accounting */
209     throttle_account(&bs->throttle_state, is_write, bytes);
210 
211 
212     /* if the next request must wait -> do nothing */
213     if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
214         return;
215     }
216 
217     /* else queue next request for execution */
218     qemu_co_queue_next(&bs->throttled_reqs[is_write]);
219 }
220 
221 size_t bdrv_opt_mem_align(BlockDriverState *bs)
222 {
223     if (!bs || !bs->drv) {
224         /* 4k should be on the safe side */
225         return 4096;
226     }
227 
228     return bs->bl.opt_mem_alignment;
229 }
230 
231 /* check if the path starts with "<protocol>:" */
232 int path_has_protocol(const char *path)
233 {
234     const char *p;
235 
236 #ifdef _WIN32
237     if (is_windows_drive(path) ||
238         is_windows_drive_prefix(path)) {
239         return 0;
240     }
241     p = path + strcspn(path, ":/\\");
242 #else
243     p = path + strcspn(path, ":/");
244 #endif
245 
246     return *p == ':';
247 }
248 
249 int path_is_absolute(const char *path)
250 {
251 #ifdef _WIN32
252     /* specific case for names like: "\\.\d:" */
253     if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
254         return 1;
255     }
256     return (*path == '/' || *path == '\\');
257 #else
258     return (*path == '/');
259 #endif
260 }
261 
262 /* if filename is absolute, just copy it to dest. Otherwise, build a
263    path to it by considering it is relative to base_path. URL are
264    supported. */
265 void path_combine(char *dest, int dest_size,
266                   const char *base_path,
267                   const char *filename)
268 {
269     const char *p, *p1;
270     int len;
271 
272     if (dest_size <= 0)
273         return;
274     if (path_is_absolute(filename)) {
275         pstrcpy(dest, dest_size, filename);
276     } else {
277         p = strchr(base_path, ':');
278         if (p)
279             p++;
280         else
281             p = base_path;
282         p1 = strrchr(base_path, '/');
283 #ifdef _WIN32
284         {
285             const char *p2;
286             p2 = strrchr(base_path, '\\');
287             if (!p1 || p2 > p1)
288                 p1 = p2;
289         }
290 #endif
291         if (p1)
292             p1++;
293         else
294             p1 = base_path;
295         if (p1 > p)
296             p = p1;
297         len = p - base_path;
298         if (len > dest_size - 1)
299             len = dest_size - 1;
300         memcpy(dest, base_path, len);
301         dest[len] = '\0';
302         pstrcat(dest, dest_size, filename);
303     }
304 }
305 
306 void bdrv_get_full_backing_filename_from_filename(const char *backed,
307                                                   const char *backing,
308                                                   char *dest, size_t sz,
309                                                   Error **errp)
310 {
311     if (backing[0] == '\0' || path_has_protocol(backing) ||
312         path_is_absolute(backing))
313     {
314         pstrcpy(dest, sz, backing);
315     } else if (backed[0] == '\0' || strstart(backed, "json:", NULL)) {
316         error_setg(errp, "Cannot use relative backing file names for '%s'",
317                    backed);
318     } else {
319         path_combine(dest, sz, backed, backing);
320     }
321 }
322 
323 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz,
324                                     Error **errp)
325 {
326     char *backed = bs->exact_filename[0] ? bs->exact_filename : bs->filename;
327 
328     bdrv_get_full_backing_filename_from_filename(backed, bs->backing_file,
329                                                  dest, sz, errp);
330 }
331 
332 void bdrv_register(BlockDriver *bdrv)
333 {
334     /* Block drivers without coroutine functions need emulation */
335     if (!bdrv->bdrv_co_readv) {
336         bdrv->bdrv_co_readv = bdrv_co_readv_em;
337         bdrv->bdrv_co_writev = bdrv_co_writev_em;
338 
339         /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
340          * the block driver lacks aio we need to emulate that too.
341          */
342         if (!bdrv->bdrv_aio_readv) {
343             /* add AIO emulation layer */
344             bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
345             bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
346         }
347     }
348 
349     QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
350 }
351 
352 BlockDriverState *bdrv_new_root(void)
353 {
354     BlockDriverState *bs = bdrv_new();
355 
356     QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
357     return bs;
358 }
359 
360 BlockDriverState *bdrv_new(void)
361 {
362     BlockDriverState *bs;
363     int i;
364 
365     bs = g_new0(BlockDriverState, 1);
366     QLIST_INIT(&bs->dirty_bitmaps);
367     for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
368         QLIST_INIT(&bs->op_blockers[i]);
369     }
370     bdrv_iostatus_disable(bs);
371     notifier_list_init(&bs->close_notifiers);
372     notifier_with_return_list_init(&bs->before_write_notifiers);
373     qemu_co_queue_init(&bs->throttled_reqs[0]);
374     qemu_co_queue_init(&bs->throttled_reqs[1]);
375     bs->refcnt = 1;
376     bs->aio_context = qemu_get_aio_context();
377 
378     return bs;
379 }
380 
381 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
382 {
383     notifier_list_add(&bs->close_notifiers, notify);
384 }
385 
386 BlockDriver *bdrv_find_format(const char *format_name)
387 {
388     BlockDriver *drv1;
389     QLIST_FOREACH(drv1, &bdrv_drivers, list) {
390         if (!strcmp(drv1->format_name, format_name)) {
391             return drv1;
392         }
393     }
394     return NULL;
395 }
396 
397 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
398 {
399     static const char *whitelist_rw[] = {
400         CONFIG_BDRV_RW_WHITELIST
401     };
402     static const char *whitelist_ro[] = {
403         CONFIG_BDRV_RO_WHITELIST
404     };
405     const char **p;
406 
407     if (!whitelist_rw[0] && !whitelist_ro[0]) {
408         return 1;               /* no whitelist, anything goes */
409     }
410 
411     for (p = whitelist_rw; *p; p++) {
412         if (!strcmp(drv->format_name, *p)) {
413             return 1;
414         }
415     }
416     if (read_only) {
417         for (p = whitelist_ro; *p; p++) {
418             if (!strcmp(drv->format_name, *p)) {
419                 return 1;
420             }
421         }
422     }
423     return 0;
424 }
425 
426 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
427                                           bool read_only)
428 {
429     BlockDriver *drv = bdrv_find_format(format_name);
430     return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
431 }
432 
433 typedef struct CreateCo {
434     BlockDriver *drv;
435     char *filename;
436     QemuOpts *opts;
437     int ret;
438     Error *err;
439 } CreateCo;
440 
441 static void coroutine_fn bdrv_create_co_entry(void *opaque)
442 {
443     Error *local_err = NULL;
444     int ret;
445 
446     CreateCo *cco = opaque;
447     assert(cco->drv);
448 
449     ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
450     if (local_err) {
451         error_propagate(&cco->err, local_err);
452     }
453     cco->ret = ret;
454 }
455 
456 int bdrv_create(BlockDriver *drv, const char* filename,
457                 QemuOpts *opts, Error **errp)
458 {
459     int ret;
460 
461     Coroutine *co;
462     CreateCo cco = {
463         .drv = drv,
464         .filename = g_strdup(filename),
465         .opts = opts,
466         .ret = NOT_DONE,
467         .err = NULL,
468     };
469 
470     if (!drv->bdrv_create) {
471         error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
472         ret = -ENOTSUP;
473         goto out;
474     }
475 
476     if (qemu_in_coroutine()) {
477         /* Fast-path if already in coroutine context */
478         bdrv_create_co_entry(&cco);
479     } else {
480         co = qemu_coroutine_create(bdrv_create_co_entry);
481         qemu_coroutine_enter(co, &cco);
482         while (cco.ret == NOT_DONE) {
483             aio_poll(qemu_get_aio_context(), true);
484         }
485     }
486 
487     ret = cco.ret;
488     if (ret < 0) {
489         if (cco.err) {
490             error_propagate(errp, cco.err);
491         } else {
492             error_setg_errno(errp, -ret, "Could not create image");
493         }
494     }
495 
496 out:
497     g_free(cco.filename);
498     return ret;
499 }
500 
501 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
502 {
503     BlockDriver *drv;
504     Error *local_err = NULL;
505     int ret;
506 
507     drv = bdrv_find_protocol(filename, true);
508     if (drv == NULL) {
509         error_setg(errp, "Could not find protocol for file '%s'", filename);
510         return -ENOENT;
511     }
512 
513     ret = bdrv_create(drv, filename, opts, &local_err);
514     if (local_err) {
515         error_propagate(errp, local_err);
516     }
517     return ret;
518 }
519 
520 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
521 {
522     BlockDriver *drv = bs->drv;
523     Error *local_err = NULL;
524 
525     memset(&bs->bl, 0, sizeof(bs->bl));
526 
527     if (!drv) {
528         return;
529     }
530 
531     /* Take some limits from the children as a default */
532     if (bs->file) {
533         bdrv_refresh_limits(bs->file, &local_err);
534         if (local_err) {
535             error_propagate(errp, local_err);
536             return;
537         }
538         bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
539         bs->bl.max_transfer_length = bs->file->bl.max_transfer_length;
540         bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
541     } else {
542         bs->bl.opt_mem_alignment = 512;
543     }
544 
545     if (bs->backing_hd) {
546         bdrv_refresh_limits(bs->backing_hd, &local_err);
547         if (local_err) {
548             error_propagate(errp, local_err);
549             return;
550         }
551         bs->bl.opt_transfer_length =
552             MAX(bs->bl.opt_transfer_length,
553                 bs->backing_hd->bl.opt_transfer_length);
554         bs->bl.max_transfer_length =
555             MIN_NON_ZERO(bs->bl.max_transfer_length,
556                          bs->backing_hd->bl.max_transfer_length);
557         bs->bl.opt_mem_alignment =
558             MAX(bs->bl.opt_mem_alignment,
559                 bs->backing_hd->bl.opt_mem_alignment);
560     }
561 
562     /* Then let the driver override it */
563     if (drv->bdrv_refresh_limits) {
564         drv->bdrv_refresh_limits(bs, errp);
565     }
566 }
567 
568 /*
569  * Create a uniquely-named empty temporary file.
570  * Return 0 upon success, otherwise a negative errno value.
571  */
572 int get_tmp_filename(char *filename, int size)
573 {
574 #ifdef _WIN32
575     char temp_dir[MAX_PATH];
576     /* GetTempFileName requires that its output buffer (4th param)
577        have length MAX_PATH or greater.  */
578     assert(size >= MAX_PATH);
579     return (GetTempPath(MAX_PATH, temp_dir)
580             && GetTempFileName(temp_dir, "qem", 0, filename)
581             ? 0 : -GetLastError());
582 #else
583     int fd;
584     const char *tmpdir;
585     tmpdir = getenv("TMPDIR");
586     if (!tmpdir) {
587         tmpdir = "/var/tmp";
588     }
589     if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
590         return -EOVERFLOW;
591     }
592     fd = mkstemp(filename);
593     if (fd < 0) {
594         return -errno;
595     }
596     if (close(fd) != 0) {
597         unlink(filename);
598         return -errno;
599     }
600     return 0;
601 #endif
602 }
603 
604 /*
605  * Detect host devices. By convention, /dev/cdrom[N] is always
606  * recognized as a host CDROM.
607  */
608 static BlockDriver *find_hdev_driver(const char *filename)
609 {
610     int score_max = 0, score;
611     BlockDriver *drv = NULL, *d;
612 
613     QLIST_FOREACH(d, &bdrv_drivers, list) {
614         if (d->bdrv_probe_device) {
615             score = d->bdrv_probe_device(filename);
616             if (score > score_max) {
617                 score_max = score;
618                 drv = d;
619             }
620         }
621     }
622 
623     return drv;
624 }
625 
626 BlockDriver *bdrv_find_protocol(const char *filename,
627                                 bool allow_protocol_prefix)
628 {
629     BlockDriver *drv1;
630     char protocol[128];
631     int len;
632     const char *p;
633 
634     /* TODO Drivers without bdrv_file_open must be specified explicitly */
635 
636     /*
637      * XXX(hch): we really should not let host device detection
638      * override an explicit protocol specification, but moving this
639      * later breaks access to device names with colons in them.
640      * Thanks to the brain-dead persistent naming schemes on udev-
641      * based Linux systems those actually are quite common.
642      */
643     drv1 = find_hdev_driver(filename);
644     if (drv1) {
645         return drv1;
646     }
647 
648     if (!path_has_protocol(filename) || !allow_protocol_prefix) {
649         return &bdrv_file;
650     }
651 
652     p = strchr(filename, ':');
653     assert(p != NULL);
654     len = p - filename;
655     if (len > sizeof(protocol) - 1)
656         len = sizeof(protocol) - 1;
657     memcpy(protocol, filename, len);
658     protocol[len] = '\0';
659     QLIST_FOREACH(drv1, &bdrv_drivers, list) {
660         if (drv1->protocol_name &&
661             !strcmp(drv1->protocol_name, protocol)) {
662             return drv1;
663         }
664     }
665     return NULL;
666 }
667 
668 /*
669  * Guess image format by probing its contents.
670  * This is not a good idea when your image is raw (CVE-2008-2004), but
671  * we do it anyway for backward compatibility.
672  *
673  * @buf         contains the image's first @buf_size bytes.
674  * @buf_size    is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE,
675  *              but can be smaller if the image file is smaller)
676  * @filename    is its filename.
677  *
678  * For all block drivers, call the bdrv_probe() method to get its
679  * probing score.
680  * Return the first block driver with the highest probing score.
681  */
682 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
683                             const char *filename)
684 {
685     int score_max = 0, score;
686     BlockDriver *drv = NULL, *d;
687 
688     QLIST_FOREACH(d, &bdrv_drivers, list) {
689         if (d->bdrv_probe) {
690             score = d->bdrv_probe(buf, buf_size, filename);
691             if (score > score_max) {
692                 score_max = score;
693                 drv = d;
694             }
695         }
696     }
697 
698     return drv;
699 }
700 
701 static int find_image_format(BlockDriverState *bs, const char *filename,
702                              BlockDriver **pdrv, Error **errp)
703 {
704     BlockDriver *drv;
705     uint8_t buf[BLOCK_PROBE_BUF_SIZE];
706     int ret = 0;
707 
708     /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
709     if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
710         *pdrv = &bdrv_raw;
711         return ret;
712     }
713 
714     ret = bdrv_pread(bs, 0, buf, sizeof(buf));
715     if (ret < 0) {
716         error_setg_errno(errp, -ret, "Could not read image for determining its "
717                          "format");
718         *pdrv = NULL;
719         return ret;
720     }
721 
722     drv = bdrv_probe_all(buf, ret, filename);
723     if (!drv) {
724         error_setg(errp, "Could not determine image format: No compatible "
725                    "driver found");
726         ret = -ENOENT;
727     }
728     *pdrv = drv;
729     return ret;
730 }
731 
732 /**
733  * Set the current 'total_sectors' value
734  * Return 0 on success, -errno on error.
735  */
736 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
737 {
738     BlockDriver *drv = bs->drv;
739 
740     /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
741     if (bs->sg)
742         return 0;
743 
744     /* query actual device if possible, otherwise just trust the hint */
745     if (drv->bdrv_getlength) {
746         int64_t length = drv->bdrv_getlength(bs);
747         if (length < 0) {
748             return length;
749         }
750         hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
751     }
752 
753     bs->total_sectors = hint;
754     return 0;
755 }
756 
757 /**
758  * Set open flags for a given discard mode
759  *
760  * Return 0 on success, -1 if the discard mode was invalid.
761  */
762 int bdrv_parse_discard_flags(const char *mode, int *flags)
763 {
764     *flags &= ~BDRV_O_UNMAP;
765 
766     if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
767         /* do nothing */
768     } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
769         *flags |= BDRV_O_UNMAP;
770     } else {
771         return -1;
772     }
773 
774     return 0;
775 }
776 
777 /**
778  * Set open flags for a given cache mode
779  *
780  * Return 0 on success, -1 if the cache mode was invalid.
781  */
782 int bdrv_parse_cache_flags(const char *mode, int *flags)
783 {
784     *flags &= ~BDRV_O_CACHE_MASK;
785 
786     if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
787         *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
788     } else if (!strcmp(mode, "directsync")) {
789         *flags |= BDRV_O_NOCACHE;
790     } else if (!strcmp(mode, "writeback")) {
791         *flags |= BDRV_O_CACHE_WB;
792     } else if (!strcmp(mode, "unsafe")) {
793         *flags |= BDRV_O_CACHE_WB;
794         *flags |= BDRV_O_NO_FLUSH;
795     } else if (!strcmp(mode, "writethrough")) {
796         /* this is the default */
797     } else {
798         return -1;
799     }
800 
801     return 0;
802 }
803 
804 /**
805  * The copy-on-read flag is actually a reference count so multiple users may
806  * use the feature without worrying about clobbering its previous state.
807  * Copy-on-read stays enabled until all users have called to disable it.
808  */
809 void bdrv_enable_copy_on_read(BlockDriverState *bs)
810 {
811     bs->copy_on_read++;
812 }
813 
814 void bdrv_disable_copy_on_read(BlockDriverState *bs)
815 {
816     assert(bs->copy_on_read > 0);
817     bs->copy_on_read--;
818 }
819 
820 /*
821  * Returns the flags that a temporary snapshot should get, based on the
822  * originally requested flags (the originally requested image will have flags
823  * like a backing file)
824  */
825 static int bdrv_temp_snapshot_flags(int flags)
826 {
827     return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
828 }
829 
830 /*
831  * Returns the flags that bs->file should get, based on the given flags for
832  * the parent BDS
833  */
834 static int bdrv_inherited_flags(int flags)
835 {
836     /* Enable protocol handling, disable format probing for bs->file */
837     flags |= BDRV_O_PROTOCOL;
838 
839     /* Our block drivers take care to send flushes and respect unmap policy,
840      * so we can enable both unconditionally on lower layers. */
841     flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
842 
843     /* Clear flags that only apply to the top layer */
844     flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
845 
846     return flags;
847 }
848 
849 /*
850  * Returns the flags that bs->backing_hd should get, based on the given flags
851  * for the parent BDS
852  */
853 static int bdrv_backing_flags(int flags)
854 {
855     /* backing files always opened read-only */
856     flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
857 
858     /* snapshot=on is handled on the top layer */
859     flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
860 
861     return flags;
862 }
863 
864 static int bdrv_open_flags(BlockDriverState *bs, int flags)
865 {
866     int open_flags = flags | BDRV_O_CACHE_WB;
867 
868     /*
869      * Clear flags that are internal to the block layer before opening the
870      * image.
871      */
872     open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL);
873 
874     /*
875      * Snapshots should be writable.
876      */
877     if (flags & BDRV_O_TEMPORARY) {
878         open_flags |= BDRV_O_RDWR;
879     }
880 
881     return open_flags;
882 }
883 
884 static void bdrv_assign_node_name(BlockDriverState *bs,
885                                   const char *node_name,
886                                   Error **errp)
887 {
888     if (!node_name) {
889         return;
890     }
891 
892     /* Check for empty string or invalid characters */
893     if (!id_wellformed(node_name)) {
894         error_setg(errp, "Invalid node name");
895         return;
896     }
897 
898     /* takes care of avoiding namespaces collisions */
899     if (blk_by_name(node_name)) {
900         error_setg(errp, "node-name=%s is conflicting with a device id",
901                    node_name);
902         return;
903     }
904 
905     /* takes care of avoiding duplicates node names */
906     if (bdrv_find_node(node_name)) {
907         error_setg(errp, "Duplicate node name");
908         return;
909     }
910 
911     /* copy node name into the bs and insert it into the graph list */
912     pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
913     QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
914 }
915 
916 /*
917  * Common part for opening disk images and files
918  *
919  * Removes all processed options from *options.
920  */
921 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
922     QDict *options, int flags, BlockDriver *drv, Error **errp)
923 {
924     int ret, open_flags;
925     const char *filename;
926     const char *node_name = NULL;
927     Error *local_err = NULL;
928 
929     assert(drv != NULL);
930     assert(bs->file == NULL);
931     assert(options != NULL && bs->options != options);
932 
933     if (file != NULL) {
934         filename = file->filename;
935     } else {
936         filename = qdict_get_try_str(options, "filename");
937     }
938 
939     if (drv->bdrv_needs_filename && !filename) {
940         error_setg(errp, "The '%s' block driver requires a file name",
941                    drv->format_name);
942         return -EINVAL;
943     }
944 
945     trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
946 
947     node_name = qdict_get_try_str(options, "node-name");
948     bdrv_assign_node_name(bs, node_name, &local_err);
949     if (local_err) {
950         error_propagate(errp, local_err);
951         return -EINVAL;
952     }
953     qdict_del(options, "node-name");
954 
955     /* bdrv_open() with directly using a protocol as drv. This layer is already
956      * opened, so assign it to bs (while file becomes a closed BlockDriverState)
957      * and return immediately. */
958     if (file != NULL && drv->bdrv_file_open) {
959         bdrv_swap(file, bs);
960         return 0;
961     }
962 
963     bs->open_flags = flags;
964     bs->guest_block_size = 512;
965     bs->request_alignment = 512;
966     bs->zero_beyond_eof = true;
967     open_flags = bdrv_open_flags(bs, flags);
968     bs->read_only = !(open_flags & BDRV_O_RDWR);
969     bs->growable = !!(flags & BDRV_O_PROTOCOL);
970 
971     if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
972         error_setg(errp,
973                    !bs->read_only && bdrv_is_whitelisted(drv, true)
974                         ? "Driver '%s' can only be used for read-only devices"
975                         : "Driver '%s' is not whitelisted",
976                    drv->format_name);
977         return -ENOTSUP;
978     }
979 
980     assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
981     if (flags & BDRV_O_COPY_ON_READ) {
982         if (!bs->read_only) {
983             bdrv_enable_copy_on_read(bs);
984         } else {
985             error_setg(errp, "Can't use copy-on-read on read-only device");
986             return -EINVAL;
987         }
988     }
989 
990     if (filename != NULL) {
991         pstrcpy(bs->filename, sizeof(bs->filename), filename);
992     } else {
993         bs->filename[0] = '\0';
994     }
995     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename);
996 
997     bs->drv = drv;
998     bs->opaque = g_malloc0(drv->instance_size);
999 
1000     bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
1001 
1002     /* Open the image, either directly or using a protocol */
1003     if (drv->bdrv_file_open) {
1004         assert(file == NULL);
1005         assert(!drv->bdrv_needs_filename || filename != NULL);
1006         ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
1007     } else {
1008         if (file == NULL) {
1009             error_setg(errp, "Can't use '%s' as a block driver for the "
1010                        "protocol level", drv->format_name);
1011             ret = -EINVAL;
1012             goto free_and_fail;
1013         }
1014         bs->file = file;
1015         ret = drv->bdrv_open(bs, options, open_flags, &local_err);
1016     }
1017 
1018     if (ret < 0) {
1019         if (local_err) {
1020             error_propagate(errp, local_err);
1021         } else if (bs->filename[0]) {
1022             error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
1023         } else {
1024             error_setg_errno(errp, -ret, "Could not open image");
1025         }
1026         goto free_and_fail;
1027     }
1028 
1029     ret = refresh_total_sectors(bs, bs->total_sectors);
1030     if (ret < 0) {
1031         error_setg_errno(errp, -ret, "Could not refresh total sector count");
1032         goto free_and_fail;
1033     }
1034 
1035     bdrv_refresh_limits(bs, &local_err);
1036     if (local_err) {
1037         error_propagate(errp, local_err);
1038         ret = -EINVAL;
1039         goto free_and_fail;
1040     }
1041 
1042     assert(bdrv_opt_mem_align(bs) != 0);
1043     assert((bs->request_alignment != 0) || bs->sg);
1044     return 0;
1045 
1046 free_and_fail:
1047     bs->file = NULL;
1048     g_free(bs->opaque);
1049     bs->opaque = NULL;
1050     bs->drv = NULL;
1051     return ret;
1052 }
1053 
1054 static QDict *parse_json_filename(const char *filename, Error **errp)
1055 {
1056     QObject *options_obj;
1057     QDict *options;
1058     int ret;
1059 
1060     ret = strstart(filename, "json:", &filename);
1061     assert(ret);
1062 
1063     options_obj = qobject_from_json(filename);
1064     if (!options_obj) {
1065         error_setg(errp, "Could not parse the JSON options");
1066         return NULL;
1067     }
1068 
1069     if (qobject_type(options_obj) != QTYPE_QDICT) {
1070         qobject_decref(options_obj);
1071         error_setg(errp, "Invalid JSON object given");
1072         return NULL;
1073     }
1074 
1075     options = qobject_to_qdict(options_obj);
1076     qdict_flatten(options);
1077 
1078     return options;
1079 }
1080 
1081 /*
1082  * Fills in default options for opening images and converts the legacy
1083  * filename/flags pair to option QDict entries.
1084  */
1085 static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
1086                              BlockDriver *drv, Error **errp)
1087 {
1088     const char *filename = *pfilename;
1089     const char *drvname;
1090     bool protocol = flags & BDRV_O_PROTOCOL;
1091     bool parse_filename = false;
1092     Error *local_err = NULL;
1093 
1094     /* Parse json: pseudo-protocol */
1095     if (filename && g_str_has_prefix(filename, "json:")) {
1096         QDict *json_options = parse_json_filename(filename, &local_err);
1097         if (local_err) {
1098             error_propagate(errp, local_err);
1099             return -EINVAL;
1100         }
1101 
1102         /* Options given in the filename have lower priority than options
1103          * specified directly */
1104         qdict_join(*options, json_options, false);
1105         QDECREF(json_options);
1106         *pfilename = filename = NULL;
1107     }
1108 
1109     /* Fetch the file name from the options QDict if necessary */
1110     if (protocol && filename) {
1111         if (!qdict_haskey(*options, "filename")) {
1112             qdict_put(*options, "filename", qstring_from_str(filename));
1113             parse_filename = true;
1114         } else {
1115             error_setg(errp, "Can't specify 'file' and 'filename' options at "
1116                              "the same time");
1117             return -EINVAL;
1118         }
1119     }
1120 
1121     /* Find the right block driver */
1122     filename = qdict_get_try_str(*options, "filename");
1123     drvname = qdict_get_try_str(*options, "driver");
1124 
1125     if (drv) {
1126         if (drvname) {
1127             error_setg(errp, "Driver specified twice");
1128             return -EINVAL;
1129         }
1130         drvname = drv->format_name;
1131         qdict_put(*options, "driver", qstring_from_str(drvname));
1132     } else {
1133         if (!drvname && protocol) {
1134             if (filename) {
1135                 drv = bdrv_find_protocol(filename, parse_filename);
1136                 if (!drv) {
1137                     error_setg(errp, "Unknown protocol");
1138                     return -EINVAL;
1139                 }
1140 
1141                 drvname = drv->format_name;
1142                 qdict_put(*options, "driver", qstring_from_str(drvname));
1143             } else {
1144                 error_setg(errp, "Must specify either driver or file");
1145                 return -EINVAL;
1146             }
1147         } else if (drvname) {
1148             drv = bdrv_find_format(drvname);
1149             if (!drv) {
1150                 error_setg(errp, "Unknown driver '%s'", drvname);
1151                 return -ENOENT;
1152             }
1153         }
1154     }
1155 
1156     assert(drv || !protocol);
1157 
1158     /* Driver-specific filename parsing */
1159     if (drv && drv->bdrv_parse_filename && parse_filename) {
1160         drv->bdrv_parse_filename(filename, *options, &local_err);
1161         if (local_err) {
1162             error_propagate(errp, local_err);
1163             return -EINVAL;
1164         }
1165 
1166         if (!drv->bdrv_needs_filename) {
1167             qdict_del(*options, "filename");
1168         }
1169     }
1170 
1171     return 0;
1172 }
1173 
1174 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1175 {
1176 
1177     if (bs->backing_hd) {
1178         assert(bs->backing_blocker);
1179         bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1180     } else if (backing_hd) {
1181         error_setg(&bs->backing_blocker,
1182                    "device is used as backing hd of '%s'",
1183                    bdrv_get_device_name(bs));
1184     }
1185 
1186     bs->backing_hd = backing_hd;
1187     if (!backing_hd) {
1188         error_free(bs->backing_blocker);
1189         bs->backing_blocker = NULL;
1190         goto out;
1191     }
1192     bs->open_flags &= ~BDRV_O_NO_BACKING;
1193     pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1194     pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1195             backing_hd->drv ? backing_hd->drv->format_name : "");
1196 
1197     bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1198     /* Otherwise we won't be able to commit due to check in bdrv_commit */
1199     bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1200                     bs->backing_blocker);
1201 out:
1202     bdrv_refresh_limits(bs, NULL);
1203 }
1204 
1205 /*
1206  * Opens the backing file for a BlockDriverState if not yet open
1207  *
1208  * options is a QDict of options to pass to the block drivers, or NULL for an
1209  * empty set of options. The reference to the QDict is transferred to this
1210  * function (even on failure), so if the caller intends to reuse the dictionary,
1211  * it needs to use QINCREF() before calling bdrv_file_open.
1212  */
1213 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1214 {
1215     char *backing_filename = g_malloc0(PATH_MAX);
1216     int ret = 0;
1217     BlockDriverState *backing_hd;
1218     Error *local_err = NULL;
1219 
1220     if (bs->backing_hd != NULL) {
1221         QDECREF(options);
1222         goto free_exit;
1223     }
1224 
1225     /* NULL means an empty set of options */
1226     if (options == NULL) {
1227         options = qdict_new();
1228     }
1229 
1230     bs->open_flags &= ~BDRV_O_NO_BACKING;
1231     if (qdict_haskey(options, "file.filename")) {
1232         backing_filename[0] = '\0';
1233     } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1234         QDECREF(options);
1235         goto free_exit;
1236     } else {
1237         bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX,
1238                                        &local_err);
1239         if (local_err) {
1240             ret = -EINVAL;
1241             error_propagate(errp, local_err);
1242             QDECREF(options);
1243             goto free_exit;
1244         }
1245     }
1246 
1247     if (!bs->drv || !bs->drv->supports_backing) {
1248         ret = -EINVAL;
1249         error_setg(errp, "Driver doesn't support backing files");
1250         QDECREF(options);
1251         goto free_exit;
1252     }
1253 
1254     backing_hd = bdrv_new();
1255 
1256     if (bs->backing_format[0] != '\0' && !qdict_haskey(options, "driver")) {
1257         qdict_put(options, "driver", qstring_from_str(bs->backing_format));
1258     }
1259 
1260     assert(bs->backing_hd == NULL);
1261     ret = bdrv_open(&backing_hd,
1262                     *backing_filename ? backing_filename : NULL, NULL, options,
1263                     bdrv_backing_flags(bs->open_flags), NULL, &local_err);
1264     if (ret < 0) {
1265         bdrv_unref(backing_hd);
1266         backing_hd = NULL;
1267         bs->open_flags |= BDRV_O_NO_BACKING;
1268         error_setg(errp, "Could not open backing file: %s",
1269                    error_get_pretty(local_err));
1270         error_free(local_err);
1271         goto free_exit;
1272     }
1273     bdrv_set_backing_hd(bs, backing_hd);
1274 
1275 free_exit:
1276     g_free(backing_filename);
1277     return ret;
1278 }
1279 
1280 /*
1281  * Opens a disk image whose options are given as BlockdevRef in another block
1282  * device's options.
1283  *
1284  * If allow_none is true, no image will be opened if filename is false and no
1285  * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1286  *
1287  * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1288  * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1289  * itself, all options starting with "${bdref_key}." are considered part of the
1290  * BlockdevRef.
1291  *
1292  * The BlockdevRef will be removed from the options QDict.
1293  *
1294  * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1295  */
1296 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1297                     QDict *options, const char *bdref_key, int flags,
1298                     bool allow_none, Error **errp)
1299 {
1300     QDict *image_options;
1301     int ret;
1302     char *bdref_key_dot;
1303     const char *reference;
1304 
1305     assert(pbs);
1306     assert(*pbs == NULL);
1307 
1308     bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1309     qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1310     g_free(bdref_key_dot);
1311 
1312     reference = qdict_get_try_str(options, bdref_key);
1313     if (!filename && !reference && !qdict_size(image_options)) {
1314         if (allow_none) {
1315             ret = 0;
1316         } else {
1317             error_setg(errp, "A block device must be specified for \"%s\"",
1318                        bdref_key);
1319             ret = -EINVAL;
1320         }
1321         QDECREF(image_options);
1322         goto done;
1323     }
1324 
1325     ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1326 
1327 done:
1328     qdict_del(options, bdref_key);
1329     return ret;
1330 }
1331 
1332 int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1333 {
1334     /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1335     char *tmp_filename = g_malloc0(PATH_MAX + 1);
1336     int64_t total_size;
1337     QemuOpts *opts = NULL;
1338     QDict *snapshot_options;
1339     BlockDriverState *bs_snapshot;
1340     Error *local_err;
1341     int ret;
1342 
1343     /* if snapshot, we create a temporary backing file and open it
1344        instead of opening 'filename' directly */
1345 
1346     /* Get the required size from the image */
1347     total_size = bdrv_getlength(bs);
1348     if (total_size < 0) {
1349         ret = total_size;
1350         error_setg_errno(errp, -total_size, "Could not get image size");
1351         goto out;
1352     }
1353 
1354     /* Create the temporary image */
1355     ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1356     if (ret < 0) {
1357         error_setg_errno(errp, -ret, "Could not get temporary filename");
1358         goto out;
1359     }
1360 
1361     opts = qemu_opts_create(bdrv_qcow2.create_opts, NULL, 0,
1362                             &error_abort);
1363     qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
1364     ret = bdrv_create(&bdrv_qcow2, tmp_filename, opts, &local_err);
1365     qemu_opts_del(opts);
1366     if (ret < 0) {
1367         error_setg_errno(errp, -ret, "Could not create temporary overlay "
1368                          "'%s': %s", tmp_filename,
1369                          error_get_pretty(local_err));
1370         error_free(local_err);
1371         goto out;
1372     }
1373 
1374     /* Prepare a new options QDict for the temporary file */
1375     snapshot_options = qdict_new();
1376     qdict_put(snapshot_options, "file.driver",
1377               qstring_from_str("file"));
1378     qdict_put(snapshot_options, "file.filename",
1379               qstring_from_str(tmp_filename));
1380 
1381     bs_snapshot = bdrv_new();
1382 
1383     ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1384                     flags, &bdrv_qcow2, &local_err);
1385     if (ret < 0) {
1386         error_propagate(errp, local_err);
1387         goto out;
1388     }
1389 
1390     bdrv_append(bs_snapshot, bs);
1391 
1392 out:
1393     g_free(tmp_filename);
1394     return ret;
1395 }
1396 
1397 /*
1398  * Opens a disk image (raw, qcow2, vmdk, ...)
1399  *
1400  * options is a QDict of options to pass to the block drivers, or NULL for an
1401  * empty set of options. The reference to the QDict belongs to the block layer
1402  * after the call (even on failure), so if the caller intends to reuse the
1403  * dictionary, it needs to use QINCREF() before calling bdrv_open.
1404  *
1405  * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1406  * If it is not NULL, the referenced BDS will be reused.
1407  *
1408  * The reference parameter may be used to specify an existing block device which
1409  * should be opened. If specified, neither options nor a filename may be given,
1410  * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1411  */
1412 int bdrv_open(BlockDriverState **pbs, const char *filename,
1413               const char *reference, QDict *options, int flags,
1414               BlockDriver *drv, Error **errp)
1415 {
1416     int ret;
1417     BlockDriverState *file = NULL, *bs;
1418     const char *drvname;
1419     Error *local_err = NULL;
1420     int snapshot_flags = 0;
1421 
1422     assert(pbs);
1423 
1424     if (reference) {
1425         bool options_non_empty = options ? qdict_size(options) : false;
1426         QDECREF(options);
1427 
1428         if (*pbs) {
1429             error_setg(errp, "Cannot reuse an existing BDS when referencing "
1430                        "another block device");
1431             return -EINVAL;
1432         }
1433 
1434         if (filename || options_non_empty) {
1435             error_setg(errp, "Cannot reference an existing block device with "
1436                        "additional options or a new filename");
1437             return -EINVAL;
1438         }
1439 
1440         bs = bdrv_lookup_bs(reference, reference, errp);
1441         if (!bs) {
1442             return -ENODEV;
1443         }
1444         bdrv_ref(bs);
1445         *pbs = bs;
1446         return 0;
1447     }
1448 
1449     if (*pbs) {
1450         bs = *pbs;
1451     } else {
1452         bs = bdrv_new();
1453     }
1454 
1455     /* NULL means an empty set of options */
1456     if (options == NULL) {
1457         options = qdict_new();
1458     }
1459 
1460     ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err);
1461     if (local_err) {
1462         goto fail;
1463     }
1464 
1465     /* Find the right image format driver */
1466     drv = NULL;
1467     drvname = qdict_get_try_str(options, "driver");
1468     if (drvname) {
1469         drv = bdrv_find_format(drvname);
1470         qdict_del(options, "driver");
1471         if (!drv) {
1472             error_setg(errp, "Unknown driver: '%s'", drvname);
1473             ret = -EINVAL;
1474             goto fail;
1475         }
1476     }
1477 
1478     assert(drvname || !(flags & BDRV_O_PROTOCOL));
1479     if (drv && !drv->bdrv_file_open) {
1480         /* If the user explicitly wants a format driver here, we'll need to add
1481          * another layer for the protocol in bs->file */
1482         flags &= ~BDRV_O_PROTOCOL;
1483     }
1484 
1485     bs->options = options;
1486     options = qdict_clone_shallow(options);
1487 
1488     /* Open image file without format layer */
1489     if ((flags & BDRV_O_PROTOCOL) == 0) {
1490         if (flags & BDRV_O_RDWR) {
1491             flags |= BDRV_O_ALLOW_RDWR;
1492         }
1493         if (flags & BDRV_O_SNAPSHOT) {
1494             snapshot_flags = bdrv_temp_snapshot_flags(flags);
1495             flags = bdrv_backing_flags(flags);
1496         }
1497 
1498         assert(file == NULL);
1499         ret = bdrv_open_image(&file, filename, options, "file",
1500                               bdrv_inherited_flags(flags),
1501                               true, &local_err);
1502         if (ret < 0) {
1503             goto fail;
1504         }
1505     }
1506 
1507     /* Image format probing */
1508     bs->probed = !drv;
1509     if (!drv && file) {
1510         ret = find_image_format(file, filename, &drv, &local_err);
1511         if (ret < 0) {
1512             goto fail;
1513         }
1514     } else if (!drv) {
1515         error_setg(errp, "Must specify either driver or file");
1516         ret = -EINVAL;
1517         goto fail;
1518     }
1519 
1520     /* Open the image */
1521     ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1522     if (ret < 0) {
1523         goto fail;
1524     }
1525 
1526     if (file && (bs->file != file)) {
1527         bdrv_unref(file);
1528         file = NULL;
1529     }
1530 
1531     /* If there is a backing file, use it */
1532     if ((flags & BDRV_O_NO_BACKING) == 0) {
1533         QDict *backing_options;
1534 
1535         qdict_extract_subqdict(options, &backing_options, "backing.");
1536         ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1537         if (ret < 0) {
1538             goto close_and_fail;
1539         }
1540     }
1541 
1542     bdrv_refresh_filename(bs);
1543 
1544     /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1545      * temporary snapshot afterwards. */
1546     if (snapshot_flags) {
1547         ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1548         if (local_err) {
1549             goto close_and_fail;
1550         }
1551     }
1552 
1553     /* Check if any unknown options were used */
1554     if (options && (qdict_size(options) != 0)) {
1555         const QDictEntry *entry = qdict_first(options);
1556         if (flags & BDRV_O_PROTOCOL) {
1557             error_setg(errp, "Block protocol '%s' doesn't support the option "
1558                        "'%s'", drv->format_name, entry->key);
1559         } else {
1560             error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1561                        "support the option '%s'", drv->format_name,
1562                        bdrv_get_device_name(bs), entry->key);
1563         }
1564 
1565         ret = -EINVAL;
1566         goto close_and_fail;
1567     }
1568 
1569     if (!bdrv_key_required(bs)) {
1570         if (bs->blk) {
1571             blk_dev_change_media_cb(bs->blk, true);
1572         }
1573     } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1574                && !runstate_check(RUN_STATE_INMIGRATE)
1575                && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1576         error_setg(errp,
1577                    "Guest must be stopped for opening of encrypted image");
1578         ret = -EBUSY;
1579         goto close_and_fail;
1580     }
1581 
1582     QDECREF(options);
1583     *pbs = bs;
1584     return 0;
1585 
1586 fail:
1587     if (file != NULL) {
1588         bdrv_unref(file);
1589     }
1590     QDECREF(bs->options);
1591     QDECREF(options);
1592     bs->options = NULL;
1593     if (!*pbs) {
1594         /* If *pbs is NULL, a new BDS has been created in this function and
1595            needs to be freed now. Otherwise, it does not need to be closed,
1596            since it has not really been opened yet. */
1597         bdrv_unref(bs);
1598     }
1599     if (local_err) {
1600         error_propagate(errp, local_err);
1601     }
1602     return ret;
1603 
1604 close_and_fail:
1605     /* See fail path, but now the BDS has to be always closed */
1606     if (*pbs) {
1607         bdrv_close(bs);
1608     } else {
1609         bdrv_unref(bs);
1610     }
1611     QDECREF(options);
1612     if (local_err) {
1613         error_propagate(errp, local_err);
1614     }
1615     return ret;
1616 }
1617 
1618 typedef struct BlockReopenQueueEntry {
1619      bool prepared;
1620      BDRVReopenState state;
1621      QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1622 } BlockReopenQueueEntry;
1623 
1624 /*
1625  * Adds a BlockDriverState to a simple queue for an atomic, transactional
1626  * reopen of multiple devices.
1627  *
1628  * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1629  * already performed, or alternatively may be NULL a new BlockReopenQueue will
1630  * be created and initialized. This newly created BlockReopenQueue should be
1631  * passed back in for subsequent calls that are intended to be of the same
1632  * atomic 'set'.
1633  *
1634  * bs is the BlockDriverState to add to the reopen queue.
1635  *
1636  * flags contains the open flags for the associated bs
1637  *
1638  * returns a pointer to bs_queue, which is either the newly allocated
1639  * bs_queue, or the existing bs_queue being used.
1640  *
1641  */
1642 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1643                                     BlockDriverState *bs, int flags)
1644 {
1645     assert(bs != NULL);
1646 
1647     BlockReopenQueueEntry *bs_entry;
1648     if (bs_queue == NULL) {
1649         bs_queue = g_new0(BlockReopenQueue, 1);
1650         QSIMPLEQ_INIT(bs_queue);
1651     }
1652 
1653     /* bdrv_open() masks this flag out */
1654     flags &= ~BDRV_O_PROTOCOL;
1655 
1656     if (bs->file) {
1657         bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1658     }
1659 
1660     bs_entry = g_new0(BlockReopenQueueEntry, 1);
1661     QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1662 
1663     bs_entry->state.bs = bs;
1664     bs_entry->state.flags = flags;
1665 
1666     return bs_queue;
1667 }
1668 
1669 /*
1670  * Reopen multiple BlockDriverStates atomically & transactionally.
1671  *
1672  * The queue passed in (bs_queue) must have been built up previous
1673  * via bdrv_reopen_queue().
1674  *
1675  * Reopens all BDS specified in the queue, with the appropriate
1676  * flags.  All devices are prepared for reopen, and failure of any
1677  * device will cause all device changes to be abandonded, and intermediate
1678  * data cleaned up.
1679  *
1680  * If all devices prepare successfully, then the changes are committed
1681  * to all devices.
1682  *
1683  */
1684 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1685 {
1686     int ret = -1;
1687     BlockReopenQueueEntry *bs_entry, *next;
1688     Error *local_err = NULL;
1689 
1690     assert(bs_queue != NULL);
1691 
1692     bdrv_drain_all();
1693 
1694     QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1695         if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1696             error_propagate(errp, local_err);
1697             goto cleanup;
1698         }
1699         bs_entry->prepared = true;
1700     }
1701 
1702     /* If we reach this point, we have success and just need to apply the
1703      * changes
1704      */
1705     QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1706         bdrv_reopen_commit(&bs_entry->state);
1707     }
1708 
1709     ret = 0;
1710 
1711 cleanup:
1712     QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1713         if (ret && bs_entry->prepared) {
1714             bdrv_reopen_abort(&bs_entry->state);
1715         }
1716         g_free(bs_entry);
1717     }
1718     g_free(bs_queue);
1719     return ret;
1720 }
1721 
1722 
1723 /* Reopen a single BlockDriverState with the specified flags. */
1724 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1725 {
1726     int ret = -1;
1727     Error *local_err = NULL;
1728     BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1729 
1730     ret = bdrv_reopen_multiple(queue, &local_err);
1731     if (local_err != NULL) {
1732         error_propagate(errp, local_err);
1733     }
1734     return ret;
1735 }
1736 
1737 
1738 /*
1739  * Prepares a BlockDriverState for reopen. All changes are staged in the
1740  * 'opaque' field of the BDRVReopenState, which is used and allocated by
1741  * the block driver layer .bdrv_reopen_prepare()
1742  *
1743  * bs is the BlockDriverState to reopen
1744  * flags are the new open flags
1745  * queue is the reopen queue
1746  *
1747  * Returns 0 on success, non-zero on error.  On error errp will be set
1748  * as well.
1749  *
1750  * On failure, bdrv_reopen_abort() will be called to clean up any data.
1751  * It is the responsibility of the caller to then call the abort() or
1752  * commit() for any other BDS that have been left in a prepare() state
1753  *
1754  */
1755 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1756                         Error **errp)
1757 {
1758     int ret = -1;
1759     Error *local_err = NULL;
1760     BlockDriver *drv;
1761 
1762     assert(reopen_state != NULL);
1763     assert(reopen_state->bs->drv != NULL);
1764     drv = reopen_state->bs->drv;
1765 
1766     /* if we are to stay read-only, do not allow permission change
1767      * to r/w */
1768     if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1769         reopen_state->flags & BDRV_O_RDWR) {
1770         error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1771                   bdrv_get_device_name(reopen_state->bs));
1772         goto error;
1773     }
1774 
1775 
1776     ret = bdrv_flush(reopen_state->bs);
1777     if (ret) {
1778         error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1779                   strerror(-ret));
1780         goto error;
1781     }
1782 
1783     if (drv->bdrv_reopen_prepare) {
1784         ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1785         if (ret) {
1786             if (local_err != NULL) {
1787                 error_propagate(errp, local_err);
1788             } else {
1789                 error_setg(errp, "failed while preparing to reopen image '%s'",
1790                            reopen_state->bs->filename);
1791             }
1792             goto error;
1793         }
1794     } else {
1795         /* It is currently mandatory to have a bdrv_reopen_prepare()
1796          * handler for each supported drv. */
1797         error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1798                   drv->format_name, bdrv_get_device_name(reopen_state->bs),
1799                  "reopening of file");
1800         ret = -1;
1801         goto error;
1802     }
1803 
1804     ret = 0;
1805 
1806 error:
1807     return ret;
1808 }
1809 
1810 /*
1811  * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1812  * makes them final by swapping the staging BlockDriverState contents into
1813  * the active BlockDriverState contents.
1814  */
1815 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1816 {
1817     BlockDriver *drv;
1818 
1819     assert(reopen_state != NULL);
1820     drv = reopen_state->bs->drv;
1821     assert(drv != NULL);
1822 
1823     /* If there are any driver level actions to take */
1824     if (drv->bdrv_reopen_commit) {
1825         drv->bdrv_reopen_commit(reopen_state);
1826     }
1827 
1828     /* set BDS specific flags now */
1829     reopen_state->bs->open_flags         = reopen_state->flags;
1830     reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1831                                               BDRV_O_CACHE_WB);
1832     reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1833 
1834     bdrv_refresh_limits(reopen_state->bs, NULL);
1835 }
1836 
1837 /*
1838  * Abort the reopen, and delete and free the staged changes in
1839  * reopen_state
1840  */
1841 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1842 {
1843     BlockDriver *drv;
1844 
1845     assert(reopen_state != NULL);
1846     drv = reopen_state->bs->drv;
1847     assert(drv != NULL);
1848 
1849     if (drv->bdrv_reopen_abort) {
1850         drv->bdrv_reopen_abort(reopen_state);
1851     }
1852 }
1853 
1854 
1855 void bdrv_close(BlockDriverState *bs)
1856 {
1857     BdrvAioNotifier *ban, *ban_next;
1858 
1859     if (bs->job) {
1860         block_job_cancel_sync(bs->job);
1861     }
1862     bdrv_drain_all(); /* complete I/O */
1863     bdrv_flush(bs);
1864     bdrv_drain_all(); /* in case flush left pending I/O */
1865     notifier_list_notify(&bs->close_notifiers, bs);
1866 
1867     if (bs->drv) {
1868         if (bs->backing_hd) {
1869             BlockDriverState *backing_hd = bs->backing_hd;
1870             bdrv_set_backing_hd(bs, NULL);
1871             bdrv_unref(backing_hd);
1872         }
1873         bs->drv->bdrv_close(bs);
1874         g_free(bs->opaque);
1875         bs->opaque = NULL;
1876         bs->drv = NULL;
1877         bs->copy_on_read = 0;
1878         bs->backing_file[0] = '\0';
1879         bs->backing_format[0] = '\0';
1880         bs->total_sectors = 0;
1881         bs->encrypted = 0;
1882         bs->valid_key = 0;
1883         bs->sg = 0;
1884         bs->growable = 0;
1885         bs->zero_beyond_eof = false;
1886         QDECREF(bs->options);
1887         bs->options = NULL;
1888         QDECREF(bs->full_open_options);
1889         bs->full_open_options = NULL;
1890 
1891         if (bs->file != NULL) {
1892             bdrv_unref(bs->file);
1893             bs->file = NULL;
1894         }
1895     }
1896 
1897     if (bs->blk) {
1898         blk_dev_change_media_cb(bs->blk, false);
1899     }
1900 
1901     /*throttling disk I/O limits*/
1902     if (bs->io_limits_enabled) {
1903         bdrv_io_limits_disable(bs);
1904     }
1905 
1906     QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
1907         g_free(ban);
1908     }
1909     QLIST_INIT(&bs->aio_notifiers);
1910 }
1911 
1912 void bdrv_close_all(void)
1913 {
1914     BlockDriverState *bs;
1915 
1916     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1917         AioContext *aio_context = bdrv_get_aio_context(bs);
1918 
1919         aio_context_acquire(aio_context);
1920         bdrv_close(bs);
1921         aio_context_release(aio_context);
1922     }
1923 }
1924 
1925 /* Check if any requests are in-flight (including throttled requests) */
1926 static bool bdrv_requests_pending(BlockDriverState *bs)
1927 {
1928     if (!QLIST_EMPTY(&bs->tracked_requests)) {
1929         return true;
1930     }
1931     if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1932         return true;
1933     }
1934     if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1935         return true;
1936     }
1937     if (bs->file && bdrv_requests_pending(bs->file)) {
1938         return true;
1939     }
1940     if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1941         return true;
1942     }
1943     return false;
1944 }
1945 
1946 static bool bdrv_drain_one(BlockDriverState *bs)
1947 {
1948     bool bs_busy;
1949 
1950     bdrv_flush_io_queue(bs);
1951     bdrv_start_throttled_reqs(bs);
1952     bs_busy = bdrv_requests_pending(bs);
1953     bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy);
1954     return bs_busy;
1955 }
1956 
1957 /*
1958  * Wait for pending requests to complete on a single BlockDriverState subtree
1959  *
1960  * See the warning in bdrv_drain_all().  This function can only be called if
1961  * you are sure nothing can generate I/O because you have op blockers
1962  * installed.
1963  *
1964  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
1965  * AioContext.
1966  */
1967 void bdrv_drain(BlockDriverState *bs)
1968 {
1969     while (bdrv_drain_one(bs)) {
1970         /* Keep iterating */
1971     }
1972 }
1973 
1974 /*
1975  * Wait for pending requests to complete across all BlockDriverStates
1976  *
1977  * This function does not flush data to disk, use bdrv_flush_all() for that
1978  * after calling this function.
1979  *
1980  * Note that completion of an asynchronous I/O operation can trigger any
1981  * number of other I/O operations on other devices---for example a coroutine
1982  * can be arbitrarily complex and a constant flow of I/O can come until the
1983  * coroutine is complete.  Because of this, it is not possible to have a
1984  * function to drain a single device's I/O queue.
1985  */
1986 void bdrv_drain_all(void)
1987 {
1988     /* Always run first iteration so any pending completion BHs run */
1989     bool busy = true;
1990     BlockDriverState *bs;
1991 
1992     while (busy) {
1993         busy = false;
1994 
1995         QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1996             AioContext *aio_context = bdrv_get_aio_context(bs);
1997 
1998             aio_context_acquire(aio_context);
1999             busy |= bdrv_drain_one(bs);
2000             aio_context_release(aio_context);
2001         }
2002     }
2003 }
2004 
2005 /* make a BlockDriverState anonymous by removing from bdrv_state and
2006  * graph_bdrv_state list.
2007    Also, NULL terminate the device_name to prevent double remove */
2008 void bdrv_make_anon(BlockDriverState *bs)
2009 {
2010     /*
2011      * Take care to remove bs from bdrv_states only when it's actually
2012      * in it.  Note that bs->device_list.tqe_prev is initially null,
2013      * and gets set to non-null by QTAILQ_INSERT_TAIL().  Establish
2014      * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by
2015      * resetting it to null on remove.
2016      */
2017     if (bs->device_list.tqe_prev) {
2018         QTAILQ_REMOVE(&bdrv_states, bs, device_list);
2019         bs->device_list.tqe_prev = NULL;
2020     }
2021     if (bs->node_name[0] != '\0') {
2022         QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
2023     }
2024     bs->node_name[0] = '\0';
2025 }
2026 
2027 static void bdrv_rebind(BlockDriverState *bs)
2028 {
2029     if (bs->drv && bs->drv->bdrv_rebind) {
2030         bs->drv->bdrv_rebind(bs);
2031     }
2032 }
2033 
2034 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
2035                                      BlockDriverState *bs_src)
2036 {
2037     /* move some fields that need to stay attached to the device */
2038 
2039     /* dev info */
2040     bs_dest->guest_block_size   = bs_src->guest_block_size;
2041     bs_dest->copy_on_read       = bs_src->copy_on_read;
2042 
2043     bs_dest->enable_write_cache = bs_src->enable_write_cache;
2044 
2045     /* i/o throttled req */
2046     memcpy(&bs_dest->throttle_state,
2047            &bs_src->throttle_state,
2048            sizeof(ThrottleState));
2049     bs_dest->throttled_reqs[0]  = bs_src->throttled_reqs[0];
2050     bs_dest->throttled_reqs[1]  = bs_src->throttled_reqs[1];
2051     bs_dest->io_limits_enabled  = bs_src->io_limits_enabled;
2052 
2053     /* r/w error */
2054     bs_dest->on_read_error      = bs_src->on_read_error;
2055     bs_dest->on_write_error     = bs_src->on_write_error;
2056 
2057     /* i/o status */
2058     bs_dest->iostatus_enabled   = bs_src->iostatus_enabled;
2059     bs_dest->iostatus           = bs_src->iostatus;
2060 
2061     /* dirty bitmap */
2062     bs_dest->dirty_bitmaps      = bs_src->dirty_bitmaps;
2063 
2064     /* reference count */
2065     bs_dest->refcnt             = bs_src->refcnt;
2066 
2067     /* job */
2068     bs_dest->job                = bs_src->job;
2069 
2070     /* keep the same entry in bdrv_states */
2071     bs_dest->device_list = bs_src->device_list;
2072     bs_dest->blk = bs_src->blk;
2073 
2074     memcpy(bs_dest->op_blockers, bs_src->op_blockers,
2075            sizeof(bs_dest->op_blockers));
2076 }
2077 
2078 /*
2079  * Swap bs contents for two image chains while they are live,
2080  * while keeping required fields on the BlockDriverState that is
2081  * actually attached to a device.
2082  *
2083  * This will modify the BlockDriverState fields, and swap contents
2084  * between bs_new and bs_old. Both bs_new and bs_old are modified.
2085  *
2086  * bs_new must not be attached to a BlockBackend.
2087  *
2088  * This function does not create any image files.
2089  */
2090 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2091 {
2092     BlockDriverState tmp;
2093 
2094     /* The code needs to swap the node_name but simply swapping node_list won't
2095      * work so first remove the nodes from the graph list, do the swap then
2096      * insert them back if needed.
2097      */
2098     if (bs_new->node_name[0] != '\0') {
2099         QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2100     }
2101     if (bs_old->node_name[0] != '\0') {
2102         QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2103     }
2104 
2105     /* bs_new must be unattached and shouldn't have anything fancy enabled */
2106     assert(!bs_new->blk);
2107     assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2108     assert(bs_new->job == NULL);
2109     assert(bs_new->io_limits_enabled == false);
2110     assert(!throttle_have_timer(&bs_new->throttle_state));
2111 
2112     tmp = *bs_new;
2113     *bs_new = *bs_old;
2114     *bs_old = tmp;
2115 
2116     /* there are some fields that should not be swapped, move them back */
2117     bdrv_move_feature_fields(&tmp, bs_old);
2118     bdrv_move_feature_fields(bs_old, bs_new);
2119     bdrv_move_feature_fields(bs_new, &tmp);
2120 
2121     /* bs_new must remain unattached */
2122     assert(!bs_new->blk);
2123 
2124     /* Check a few fields that should remain attached to the device */
2125     assert(bs_new->job == NULL);
2126     assert(bs_new->io_limits_enabled == false);
2127     assert(!throttle_have_timer(&bs_new->throttle_state));
2128 
2129     /* insert the nodes back into the graph node list if needed */
2130     if (bs_new->node_name[0] != '\0') {
2131         QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2132     }
2133     if (bs_old->node_name[0] != '\0') {
2134         QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2135     }
2136 
2137     bdrv_rebind(bs_new);
2138     bdrv_rebind(bs_old);
2139 }
2140 
2141 /*
2142  * Add new bs contents at the top of an image chain while the chain is
2143  * live, while keeping required fields on the top layer.
2144  *
2145  * This will modify the BlockDriverState fields, and swap contents
2146  * between bs_new and bs_top. Both bs_new and bs_top are modified.
2147  *
2148  * bs_new must not be attached to a BlockBackend.
2149  *
2150  * This function does not create any image files.
2151  */
2152 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2153 {
2154     bdrv_swap(bs_new, bs_top);
2155 
2156     /* The contents of 'tmp' will become bs_top, as we are
2157      * swapping bs_new and bs_top contents. */
2158     bdrv_set_backing_hd(bs_top, bs_new);
2159 }
2160 
2161 static void bdrv_delete(BlockDriverState *bs)
2162 {
2163     assert(!bs->job);
2164     assert(bdrv_op_blocker_is_empty(bs));
2165     assert(!bs->refcnt);
2166     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2167 
2168     bdrv_close(bs);
2169 
2170     /* remove from list, if necessary */
2171     bdrv_make_anon(bs);
2172 
2173     g_free(bs);
2174 }
2175 
2176 /*
2177  * Run consistency checks on an image
2178  *
2179  * Returns 0 if the check could be completed (it doesn't mean that the image is
2180  * free of errors) or -errno when an internal error occurred. The results of the
2181  * check are stored in res.
2182  */
2183 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2184 {
2185     if (bs->drv == NULL) {
2186         return -ENOMEDIUM;
2187     }
2188     if (bs->drv->bdrv_check == NULL) {
2189         return -ENOTSUP;
2190     }
2191 
2192     memset(res, 0, sizeof(*res));
2193     return bs->drv->bdrv_check(bs, res, fix);
2194 }
2195 
2196 #define COMMIT_BUF_SECTORS 2048
2197 
2198 /* commit COW file into the raw image */
2199 int bdrv_commit(BlockDriverState *bs)
2200 {
2201     BlockDriver *drv = bs->drv;
2202     int64_t sector, total_sectors, length, backing_length;
2203     int n, ro, open_flags;
2204     int ret = 0;
2205     uint8_t *buf = NULL;
2206     char filename[PATH_MAX];
2207 
2208     if (!drv)
2209         return -ENOMEDIUM;
2210 
2211     if (!bs->backing_hd) {
2212         return -ENOTSUP;
2213     }
2214 
2215     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2216         bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
2217         return -EBUSY;
2218     }
2219 
2220     ro = bs->backing_hd->read_only;
2221     /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2222     pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2223     open_flags =  bs->backing_hd->open_flags;
2224 
2225     if (ro) {
2226         if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2227             return -EACCES;
2228         }
2229     }
2230 
2231     length = bdrv_getlength(bs);
2232     if (length < 0) {
2233         ret = length;
2234         goto ro_cleanup;
2235     }
2236 
2237     backing_length = bdrv_getlength(bs->backing_hd);
2238     if (backing_length < 0) {
2239         ret = backing_length;
2240         goto ro_cleanup;
2241     }
2242 
2243     /* If our top snapshot is larger than the backing file image,
2244      * grow the backing file image if possible.  If not possible,
2245      * we must return an error */
2246     if (length > backing_length) {
2247         ret = bdrv_truncate(bs->backing_hd, length);
2248         if (ret < 0) {
2249             goto ro_cleanup;
2250         }
2251     }
2252 
2253     total_sectors = length >> BDRV_SECTOR_BITS;
2254 
2255     /* qemu_try_blockalign() for bs will choose an alignment that works for
2256      * bs->backing_hd as well, so no need to compare the alignment manually. */
2257     buf = qemu_try_blockalign(bs, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2258     if (buf == NULL) {
2259         ret = -ENOMEM;
2260         goto ro_cleanup;
2261     }
2262 
2263     for (sector = 0; sector < total_sectors; sector += n) {
2264         ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2265         if (ret < 0) {
2266             goto ro_cleanup;
2267         }
2268         if (ret) {
2269             ret = bdrv_read(bs, sector, buf, n);
2270             if (ret < 0) {
2271                 goto ro_cleanup;
2272             }
2273 
2274             ret = bdrv_write(bs->backing_hd, sector, buf, n);
2275             if (ret < 0) {
2276                 goto ro_cleanup;
2277             }
2278         }
2279     }
2280 
2281     if (drv->bdrv_make_empty) {
2282         ret = drv->bdrv_make_empty(bs);
2283         if (ret < 0) {
2284             goto ro_cleanup;
2285         }
2286         bdrv_flush(bs);
2287     }
2288 
2289     /*
2290      * Make sure all data we wrote to the backing device is actually
2291      * stable on disk.
2292      */
2293     if (bs->backing_hd) {
2294         bdrv_flush(bs->backing_hd);
2295     }
2296 
2297     ret = 0;
2298 ro_cleanup:
2299     qemu_vfree(buf);
2300 
2301     if (ro) {
2302         /* ignoring error return here */
2303         bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2304     }
2305 
2306     return ret;
2307 }
2308 
2309 int bdrv_commit_all(void)
2310 {
2311     BlockDriverState *bs;
2312 
2313     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2314         AioContext *aio_context = bdrv_get_aio_context(bs);
2315 
2316         aio_context_acquire(aio_context);
2317         if (bs->drv && bs->backing_hd) {
2318             int ret = bdrv_commit(bs);
2319             if (ret < 0) {
2320                 aio_context_release(aio_context);
2321                 return ret;
2322             }
2323         }
2324         aio_context_release(aio_context);
2325     }
2326     return 0;
2327 }
2328 
2329 /**
2330  * Remove an active request from the tracked requests list
2331  *
2332  * This function should be called when a tracked request is completing.
2333  */
2334 static void tracked_request_end(BdrvTrackedRequest *req)
2335 {
2336     if (req->serialising) {
2337         req->bs->serialising_in_flight--;
2338     }
2339 
2340     QLIST_REMOVE(req, list);
2341     qemu_co_queue_restart_all(&req->wait_queue);
2342 }
2343 
2344 /**
2345  * Add an active request to the tracked requests list
2346  */
2347 static void tracked_request_begin(BdrvTrackedRequest *req,
2348                                   BlockDriverState *bs,
2349                                   int64_t offset,
2350                                   unsigned int bytes, bool is_write)
2351 {
2352     *req = (BdrvTrackedRequest){
2353         .bs = bs,
2354         .offset         = offset,
2355         .bytes          = bytes,
2356         .is_write       = is_write,
2357         .co             = qemu_coroutine_self(),
2358         .serialising    = false,
2359         .overlap_offset = offset,
2360         .overlap_bytes  = bytes,
2361     };
2362 
2363     qemu_co_queue_init(&req->wait_queue);
2364 
2365     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2366 }
2367 
2368 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2369 {
2370     int64_t overlap_offset = req->offset & ~(align - 1);
2371     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2372                                - overlap_offset;
2373 
2374     if (!req->serialising) {
2375         req->bs->serialising_in_flight++;
2376         req->serialising = true;
2377     }
2378 
2379     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2380     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2381 }
2382 
2383 /**
2384  * Round a region to cluster boundaries
2385  */
2386 void bdrv_round_to_clusters(BlockDriverState *bs,
2387                             int64_t sector_num, int nb_sectors,
2388                             int64_t *cluster_sector_num,
2389                             int *cluster_nb_sectors)
2390 {
2391     BlockDriverInfo bdi;
2392 
2393     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2394         *cluster_sector_num = sector_num;
2395         *cluster_nb_sectors = nb_sectors;
2396     } else {
2397         int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2398         *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2399         *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2400                                             nb_sectors, c);
2401     }
2402 }
2403 
2404 static int bdrv_get_cluster_size(BlockDriverState *bs)
2405 {
2406     BlockDriverInfo bdi;
2407     int ret;
2408 
2409     ret = bdrv_get_info(bs, &bdi);
2410     if (ret < 0 || bdi.cluster_size == 0) {
2411         return bs->request_alignment;
2412     } else {
2413         return bdi.cluster_size;
2414     }
2415 }
2416 
2417 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2418                                      int64_t offset, unsigned int bytes)
2419 {
2420     /*        aaaa   bbbb */
2421     if (offset >= req->overlap_offset + req->overlap_bytes) {
2422         return false;
2423     }
2424     /* bbbb   aaaa        */
2425     if (req->overlap_offset >= offset + bytes) {
2426         return false;
2427     }
2428     return true;
2429 }
2430 
2431 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2432 {
2433     BlockDriverState *bs = self->bs;
2434     BdrvTrackedRequest *req;
2435     bool retry;
2436     bool waited = false;
2437 
2438     if (!bs->serialising_in_flight) {
2439         return false;
2440     }
2441 
2442     do {
2443         retry = false;
2444         QLIST_FOREACH(req, &bs->tracked_requests, list) {
2445             if (req == self || (!req->serialising && !self->serialising)) {
2446                 continue;
2447             }
2448             if (tracked_request_overlaps(req, self->overlap_offset,
2449                                          self->overlap_bytes))
2450             {
2451                 /* Hitting this means there was a reentrant request, for
2452                  * example, a block driver issuing nested requests.  This must
2453                  * never happen since it means deadlock.
2454                  */
2455                 assert(qemu_coroutine_self() != req->co);
2456 
2457                 /* If the request is already (indirectly) waiting for us, or
2458                  * will wait for us as soon as it wakes up, then just go on
2459                  * (instead of producing a deadlock in the former case). */
2460                 if (!req->waiting_for) {
2461                     self->waiting_for = req;
2462                     qemu_co_queue_wait(&req->wait_queue);
2463                     self->waiting_for = NULL;
2464                     retry = true;
2465                     waited = true;
2466                     break;
2467                 }
2468             }
2469         }
2470     } while (retry);
2471 
2472     return waited;
2473 }
2474 
2475 /*
2476  * Return values:
2477  * 0        - success
2478  * -EINVAL  - backing format specified, but no file
2479  * -ENOSPC  - can't update the backing file because no space is left in the
2480  *            image file header
2481  * -ENOTSUP - format driver doesn't support changing the backing file
2482  */
2483 int bdrv_change_backing_file(BlockDriverState *bs,
2484     const char *backing_file, const char *backing_fmt)
2485 {
2486     BlockDriver *drv = bs->drv;
2487     int ret;
2488 
2489     /* Backing file format doesn't make sense without a backing file */
2490     if (backing_fmt && !backing_file) {
2491         return -EINVAL;
2492     }
2493 
2494     if (drv->bdrv_change_backing_file != NULL) {
2495         ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2496     } else {
2497         ret = -ENOTSUP;
2498     }
2499 
2500     if (ret == 0) {
2501         pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2502         pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2503     }
2504     return ret;
2505 }
2506 
2507 /*
2508  * Finds the image layer in the chain that has 'bs' as its backing file.
2509  *
2510  * active is the current topmost image.
2511  *
2512  * Returns NULL if bs is not found in active's image chain,
2513  * or if active == bs.
2514  *
2515  * Returns the bottommost base image if bs == NULL.
2516  */
2517 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2518                                     BlockDriverState *bs)
2519 {
2520     while (active && bs != active->backing_hd) {
2521         active = active->backing_hd;
2522     }
2523 
2524     return active;
2525 }
2526 
2527 /* Given a BDS, searches for the base layer. */
2528 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
2529 {
2530     return bdrv_find_overlay(bs, NULL);
2531 }
2532 
2533 typedef struct BlkIntermediateStates {
2534     BlockDriverState *bs;
2535     QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2536 } BlkIntermediateStates;
2537 
2538 
2539 /*
2540  * Drops images above 'base' up to and including 'top', and sets the image
2541  * above 'top' to have base as its backing file.
2542  *
2543  * Requires that the overlay to 'top' is opened r/w, so that the backing file
2544  * information in 'bs' can be properly updated.
2545  *
2546  * E.g., this will convert the following chain:
2547  * bottom <- base <- intermediate <- top <- active
2548  *
2549  * to
2550  *
2551  * bottom <- base <- active
2552  *
2553  * It is allowed for bottom==base, in which case it converts:
2554  *
2555  * base <- intermediate <- top <- active
2556  *
2557  * to
2558  *
2559  * base <- active
2560  *
2561  * If backing_file_str is non-NULL, it will be used when modifying top's
2562  * overlay image metadata.
2563  *
2564  * Error conditions:
2565  *  if active == top, that is considered an error
2566  *
2567  */
2568 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2569                            BlockDriverState *base, const char *backing_file_str)
2570 {
2571     BlockDriverState *intermediate;
2572     BlockDriverState *base_bs = NULL;
2573     BlockDriverState *new_top_bs = NULL;
2574     BlkIntermediateStates *intermediate_state, *next;
2575     int ret = -EIO;
2576 
2577     QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2578     QSIMPLEQ_INIT(&states_to_delete);
2579 
2580     if (!top->drv || !base->drv) {
2581         goto exit;
2582     }
2583 
2584     new_top_bs = bdrv_find_overlay(active, top);
2585 
2586     if (new_top_bs == NULL) {
2587         /* we could not find the image above 'top', this is an error */
2588         goto exit;
2589     }
2590 
2591     /* special case of new_top_bs->backing_hd already pointing to base - nothing
2592      * to do, no intermediate images */
2593     if (new_top_bs->backing_hd == base) {
2594         ret = 0;
2595         goto exit;
2596     }
2597 
2598     intermediate = top;
2599 
2600     /* now we will go down through the list, and add each BDS we find
2601      * into our deletion queue, until we hit the 'base'
2602      */
2603     while (intermediate) {
2604         intermediate_state = g_new0(BlkIntermediateStates, 1);
2605         intermediate_state->bs = intermediate;
2606         QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2607 
2608         if (intermediate->backing_hd == base) {
2609             base_bs = intermediate->backing_hd;
2610             break;
2611         }
2612         intermediate = intermediate->backing_hd;
2613     }
2614     if (base_bs == NULL) {
2615         /* something went wrong, we did not end at the base. safely
2616          * unravel everything, and exit with error */
2617         goto exit;
2618     }
2619 
2620     /* success - we can delete the intermediate states, and link top->base */
2621     backing_file_str = backing_file_str ? backing_file_str : base_bs->filename;
2622     ret = bdrv_change_backing_file(new_top_bs, backing_file_str,
2623                                    base_bs->drv ? base_bs->drv->format_name : "");
2624     if (ret) {
2625         goto exit;
2626     }
2627     bdrv_set_backing_hd(new_top_bs, base_bs);
2628 
2629     QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2630         /* so that bdrv_close() does not recursively close the chain */
2631         bdrv_set_backing_hd(intermediate_state->bs, NULL);
2632         bdrv_unref(intermediate_state->bs);
2633     }
2634     ret = 0;
2635 
2636 exit:
2637     QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2638         g_free(intermediate_state);
2639     }
2640     return ret;
2641 }
2642 
2643 
2644 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2645                                    size_t size)
2646 {
2647     int64_t len;
2648 
2649     if (size > INT_MAX) {
2650         return -EIO;
2651     }
2652 
2653     if (!bdrv_is_inserted(bs))
2654         return -ENOMEDIUM;
2655 
2656     if (bs->growable)
2657         return 0;
2658 
2659     len = bdrv_getlength(bs);
2660 
2661     if (offset < 0)
2662         return -EIO;
2663 
2664     if ((offset > len) || (len - offset < size))
2665         return -EIO;
2666 
2667     return 0;
2668 }
2669 
2670 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2671                               int nb_sectors)
2672 {
2673     if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2674         return -EIO;
2675     }
2676 
2677     return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2678                                    nb_sectors * BDRV_SECTOR_SIZE);
2679 }
2680 
2681 typedef struct RwCo {
2682     BlockDriverState *bs;
2683     int64_t offset;
2684     QEMUIOVector *qiov;
2685     bool is_write;
2686     int ret;
2687     BdrvRequestFlags flags;
2688 } RwCo;
2689 
2690 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2691 {
2692     RwCo *rwco = opaque;
2693 
2694     if (!rwco->is_write) {
2695         rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2696                                       rwco->qiov->size, rwco->qiov,
2697                                       rwco->flags);
2698     } else {
2699         rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2700                                        rwco->qiov->size, rwco->qiov,
2701                                        rwco->flags);
2702     }
2703 }
2704 
2705 /*
2706  * Process a vectored synchronous request using coroutines
2707  */
2708 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2709                         QEMUIOVector *qiov, bool is_write,
2710                         BdrvRequestFlags flags)
2711 {
2712     Coroutine *co;
2713     RwCo rwco = {
2714         .bs = bs,
2715         .offset = offset,
2716         .qiov = qiov,
2717         .is_write = is_write,
2718         .ret = NOT_DONE,
2719         .flags = flags,
2720     };
2721 
2722     /**
2723      * In sync call context, when the vcpu is blocked, this throttling timer
2724      * will not fire; so the I/O throttling function has to be disabled here
2725      * if it has been enabled.
2726      */
2727     if (bs->io_limits_enabled) {
2728         fprintf(stderr, "Disabling I/O throttling on '%s' due "
2729                         "to synchronous I/O.\n", bdrv_get_device_name(bs));
2730         bdrv_io_limits_disable(bs);
2731     }
2732 
2733     if (qemu_in_coroutine()) {
2734         /* Fast-path if already in coroutine context */
2735         bdrv_rw_co_entry(&rwco);
2736     } else {
2737         AioContext *aio_context = bdrv_get_aio_context(bs);
2738 
2739         co = qemu_coroutine_create(bdrv_rw_co_entry);
2740         qemu_coroutine_enter(co, &rwco);
2741         while (rwco.ret == NOT_DONE) {
2742             aio_poll(aio_context, true);
2743         }
2744     }
2745     return rwco.ret;
2746 }
2747 
2748 /*
2749  * Process a synchronous request using coroutines
2750  */
2751 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2752                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
2753 {
2754     QEMUIOVector qiov;
2755     struct iovec iov = {
2756         .iov_base = (void *)buf,
2757         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2758     };
2759 
2760     if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2761         return -EINVAL;
2762     }
2763 
2764     qemu_iovec_init_external(&qiov, &iov, 1);
2765     return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2766                         &qiov, is_write, flags);
2767 }
2768 
2769 /* return < 0 if error. See bdrv_write() for the return codes */
2770 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2771               uint8_t *buf, int nb_sectors)
2772 {
2773     return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2774 }
2775 
2776 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2777 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2778                           uint8_t *buf, int nb_sectors)
2779 {
2780     bool enabled;
2781     int ret;
2782 
2783     enabled = bs->io_limits_enabled;
2784     bs->io_limits_enabled = false;
2785     ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2786     bs->io_limits_enabled = enabled;
2787     return ret;
2788 }
2789 
2790 /* Return < 0 if error. Important errors are:
2791   -EIO         generic I/O error (may happen for all errors)
2792   -ENOMEDIUM   No media inserted.
2793   -EINVAL      Invalid sector number or nb_sectors
2794   -EACCES      Trying to write a read-only device
2795 */
2796 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2797                const uint8_t *buf, int nb_sectors)
2798 {
2799     return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2800 }
2801 
2802 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2803                       int nb_sectors, BdrvRequestFlags flags)
2804 {
2805     return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2806                       BDRV_REQ_ZERO_WRITE | flags);
2807 }
2808 
2809 /*
2810  * Completely zero out a block device with the help of bdrv_write_zeroes.
2811  * The operation is sped up by checking the block status and only writing
2812  * zeroes to the device if they currently do not return zeroes. Optional
2813  * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2814  *
2815  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2816  */
2817 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2818 {
2819     int64_t target_sectors, ret, nb_sectors, sector_num = 0;
2820     int n;
2821 
2822     target_sectors = bdrv_nb_sectors(bs);
2823     if (target_sectors < 0) {
2824         return target_sectors;
2825     }
2826 
2827     for (;;) {
2828         nb_sectors = target_sectors - sector_num;
2829         if (nb_sectors <= 0) {
2830             return 0;
2831         }
2832         if (nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2833             nb_sectors = INT_MAX / BDRV_SECTOR_SIZE;
2834         }
2835         ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2836         if (ret < 0) {
2837             error_report("error getting block status at sector %" PRId64 ": %s",
2838                          sector_num, strerror(-ret));
2839             return ret;
2840         }
2841         if (ret & BDRV_BLOCK_ZERO) {
2842             sector_num += n;
2843             continue;
2844         }
2845         ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2846         if (ret < 0) {
2847             error_report("error writing zeroes at sector %" PRId64 ": %s",
2848                          sector_num, strerror(-ret));
2849             return ret;
2850         }
2851         sector_num += n;
2852     }
2853 }
2854 
2855 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2856 {
2857     QEMUIOVector qiov;
2858     struct iovec iov = {
2859         .iov_base = (void *)buf,
2860         .iov_len = bytes,
2861     };
2862     int ret;
2863 
2864     if (bytes < 0) {
2865         return -EINVAL;
2866     }
2867 
2868     qemu_iovec_init_external(&qiov, &iov, 1);
2869     ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2870     if (ret < 0) {
2871         return ret;
2872     }
2873 
2874     return bytes;
2875 }
2876 
2877 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2878 {
2879     int ret;
2880 
2881     ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2882     if (ret < 0) {
2883         return ret;
2884     }
2885 
2886     return qiov->size;
2887 }
2888 
2889 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2890                 const void *buf, int bytes)
2891 {
2892     QEMUIOVector qiov;
2893     struct iovec iov = {
2894         .iov_base   = (void *) buf,
2895         .iov_len    = bytes,
2896     };
2897 
2898     if (bytes < 0) {
2899         return -EINVAL;
2900     }
2901 
2902     qemu_iovec_init_external(&qiov, &iov, 1);
2903     return bdrv_pwritev(bs, offset, &qiov);
2904 }
2905 
2906 /*
2907  * Writes to the file and ensures that no writes are reordered across this
2908  * request (acts as a barrier)
2909  *
2910  * Returns 0 on success, -errno in error cases.
2911  */
2912 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2913     const void *buf, int count)
2914 {
2915     int ret;
2916 
2917     ret = bdrv_pwrite(bs, offset, buf, count);
2918     if (ret < 0) {
2919         return ret;
2920     }
2921 
2922     /* No flush needed for cache modes that already do it */
2923     if (bs->enable_write_cache) {
2924         bdrv_flush(bs);
2925     }
2926 
2927     return 0;
2928 }
2929 
2930 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2931         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2932 {
2933     /* Perform I/O through a temporary buffer so that users who scribble over
2934      * their read buffer while the operation is in progress do not end up
2935      * modifying the image file.  This is critical for zero-copy guest I/O
2936      * where anything might happen inside guest memory.
2937      */
2938     void *bounce_buffer;
2939 
2940     BlockDriver *drv = bs->drv;
2941     struct iovec iov;
2942     QEMUIOVector bounce_qiov;
2943     int64_t cluster_sector_num;
2944     int cluster_nb_sectors;
2945     size_t skip_bytes;
2946     int ret;
2947 
2948     /* Cover entire cluster so no additional backing file I/O is required when
2949      * allocating cluster in the image file.
2950      */
2951     bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2952                            &cluster_sector_num, &cluster_nb_sectors);
2953 
2954     trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2955                                    cluster_sector_num, cluster_nb_sectors);
2956 
2957     iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2958     iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
2959     if (bounce_buffer == NULL) {
2960         ret = -ENOMEM;
2961         goto err;
2962     }
2963 
2964     qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2965 
2966     ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2967                              &bounce_qiov);
2968     if (ret < 0) {
2969         goto err;
2970     }
2971 
2972     if (drv->bdrv_co_write_zeroes &&
2973         buffer_is_zero(bounce_buffer, iov.iov_len)) {
2974         ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
2975                                       cluster_nb_sectors, 0);
2976     } else {
2977         /* This does not change the data on the disk, it is not necessary
2978          * to flush even in cache=writethrough mode.
2979          */
2980         ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
2981                                   &bounce_qiov);
2982     }
2983 
2984     if (ret < 0) {
2985         /* It might be okay to ignore write errors for guest requests.  If this
2986          * is a deliberate copy-on-read then we don't want to ignore the error.
2987          * Simply report it in all cases.
2988          */
2989         goto err;
2990     }
2991 
2992     skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
2993     qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2994                         nb_sectors * BDRV_SECTOR_SIZE);
2995 
2996 err:
2997     qemu_vfree(bounce_buffer);
2998     return ret;
2999 }
3000 
3001 /*
3002  * Forwards an already correctly aligned request to the BlockDriver. This
3003  * handles copy on read and zeroing after EOF; any other features must be
3004  * implemented by the caller.
3005  */
3006 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3007     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3008     int64_t align, QEMUIOVector *qiov, int flags)
3009 {
3010     BlockDriver *drv = bs->drv;
3011     int ret;
3012 
3013     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3014     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3015 
3016     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3017     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3018     assert(!qiov || bytes == qiov->size);
3019 
3020     /* Handle Copy on Read and associated serialisation */
3021     if (flags & BDRV_REQ_COPY_ON_READ) {
3022         /* If we touch the same cluster it counts as an overlap.  This
3023          * guarantees that allocating writes will be serialized and not race
3024          * with each other for the same cluster.  For example, in copy-on-read
3025          * it ensures that the CoR read and write operations are atomic and
3026          * guest writes cannot interleave between them. */
3027         mark_request_serialising(req, bdrv_get_cluster_size(bs));
3028     }
3029 
3030     wait_serialising_requests(req);
3031 
3032     if (flags & BDRV_REQ_COPY_ON_READ) {
3033         int pnum;
3034 
3035         ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3036         if (ret < 0) {
3037             goto out;
3038         }
3039 
3040         if (!ret || pnum != nb_sectors) {
3041             ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3042             goto out;
3043         }
3044     }
3045 
3046     /* Forward the request to the BlockDriver */
3047     if (!(bs->zero_beyond_eof && bs->growable)) {
3048         ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3049     } else {
3050         /* Read zeros after EOF of growable BDSes */
3051         int64_t total_sectors, max_nb_sectors;
3052 
3053         total_sectors = bdrv_nb_sectors(bs);
3054         if (total_sectors < 0) {
3055             ret = total_sectors;
3056             goto out;
3057         }
3058 
3059         max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3060                                   align >> BDRV_SECTOR_BITS);
3061         if (nb_sectors < max_nb_sectors) {
3062             ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3063         } else if (max_nb_sectors > 0) {
3064             QEMUIOVector local_qiov;
3065 
3066             qemu_iovec_init(&local_qiov, qiov->niov);
3067             qemu_iovec_concat(&local_qiov, qiov, 0,
3068                               max_nb_sectors * BDRV_SECTOR_SIZE);
3069 
3070             ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
3071                                      &local_qiov);
3072 
3073             qemu_iovec_destroy(&local_qiov);
3074         } else {
3075             ret = 0;
3076         }
3077 
3078         /* Reading beyond end of file is supposed to produce zeroes */
3079         if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3080             uint64_t offset = MAX(0, total_sectors - sector_num);
3081             uint64_t bytes = (sector_num + nb_sectors - offset) *
3082                               BDRV_SECTOR_SIZE;
3083             qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3084         }
3085     }
3086 
3087 out:
3088     return ret;
3089 }
3090 
3091 /*
3092  * Handle a read request in coroutine context
3093  */
3094 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3095     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3096     BdrvRequestFlags flags)
3097 {
3098     BlockDriver *drv = bs->drv;
3099     BdrvTrackedRequest req;
3100 
3101     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3102     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3103     uint8_t *head_buf = NULL;
3104     uint8_t *tail_buf = NULL;
3105     QEMUIOVector local_qiov;
3106     bool use_local_qiov = false;
3107     int ret;
3108 
3109     if (!drv) {
3110         return -ENOMEDIUM;
3111     }
3112     if (bdrv_check_byte_request(bs, offset, bytes)) {
3113         return -EIO;
3114     }
3115 
3116     if (bs->copy_on_read) {
3117         flags |= BDRV_REQ_COPY_ON_READ;
3118     }
3119 
3120     /* throttling disk I/O */
3121     if (bs->io_limits_enabled) {
3122         bdrv_io_limits_intercept(bs, bytes, false);
3123     }
3124 
3125     /* Align read if necessary by padding qiov */
3126     if (offset & (align - 1)) {
3127         head_buf = qemu_blockalign(bs, align);
3128         qemu_iovec_init(&local_qiov, qiov->niov + 2);
3129         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3130         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3131         use_local_qiov = true;
3132 
3133         bytes += offset & (align - 1);
3134         offset = offset & ~(align - 1);
3135     }
3136 
3137     if ((offset + bytes) & (align - 1)) {
3138         if (!use_local_qiov) {
3139             qemu_iovec_init(&local_qiov, qiov->niov + 1);
3140             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3141             use_local_qiov = true;
3142         }
3143         tail_buf = qemu_blockalign(bs, align);
3144         qemu_iovec_add(&local_qiov, tail_buf,
3145                        align - ((offset + bytes) & (align - 1)));
3146 
3147         bytes = ROUND_UP(bytes, align);
3148     }
3149 
3150     tracked_request_begin(&req, bs, offset, bytes, false);
3151     ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3152                               use_local_qiov ? &local_qiov : qiov,
3153                               flags);
3154     tracked_request_end(&req);
3155 
3156     if (use_local_qiov) {
3157         qemu_iovec_destroy(&local_qiov);
3158         qemu_vfree(head_buf);
3159         qemu_vfree(tail_buf);
3160     }
3161 
3162     return ret;
3163 }
3164 
3165 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3166     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3167     BdrvRequestFlags flags)
3168 {
3169     if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3170         return -EINVAL;
3171     }
3172 
3173     return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3174                              nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3175 }
3176 
3177 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3178     int nb_sectors, QEMUIOVector *qiov)
3179 {
3180     trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3181 
3182     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3183 }
3184 
3185 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3186     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3187 {
3188     trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3189 
3190     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3191                             BDRV_REQ_COPY_ON_READ);
3192 }
3193 
3194 /* if no limit is specified in the BlockLimits use a default
3195  * of 32768 512-byte sectors (16 MiB) per request.
3196  */
3197 #define MAX_WRITE_ZEROES_DEFAULT 32768
3198 
3199 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3200     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3201 {
3202     BlockDriver *drv = bs->drv;
3203     QEMUIOVector qiov;
3204     struct iovec iov = {0};
3205     int ret = 0;
3206 
3207     int max_write_zeroes = bs->bl.max_write_zeroes ?
3208                            bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3209 
3210     while (nb_sectors > 0 && !ret) {
3211         int num = nb_sectors;
3212 
3213         /* Align request.  Block drivers can expect the "bulk" of the request
3214          * to be aligned.
3215          */
3216         if (bs->bl.write_zeroes_alignment
3217             && num > bs->bl.write_zeroes_alignment) {
3218             if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3219                 /* Make a small request up to the first aligned sector.  */
3220                 num = bs->bl.write_zeroes_alignment;
3221                 num -= sector_num % bs->bl.write_zeroes_alignment;
3222             } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3223                 /* Shorten the request to the last aligned sector.  num cannot
3224                  * underflow because num > bs->bl.write_zeroes_alignment.
3225                  */
3226                 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3227             }
3228         }
3229 
3230         /* limit request size */
3231         if (num > max_write_zeroes) {
3232             num = max_write_zeroes;
3233         }
3234 
3235         ret = -ENOTSUP;
3236         /* First try the efficient write zeroes operation */
3237         if (drv->bdrv_co_write_zeroes) {
3238             ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3239         }
3240 
3241         if (ret == -ENOTSUP) {
3242             /* Fall back to bounce buffer if write zeroes is unsupported */
3243             iov.iov_len = num * BDRV_SECTOR_SIZE;
3244             if (iov.iov_base == NULL) {
3245                 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
3246                 if (iov.iov_base == NULL) {
3247                     ret = -ENOMEM;
3248                     goto fail;
3249                 }
3250                 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3251             }
3252             qemu_iovec_init_external(&qiov, &iov, 1);
3253 
3254             ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3255 
3256             /* Keep bounce buffer around if it is big enough for all
3257              * all future requests.
3258              */
3259             if (num < max_write_zeroes) {
3260                 qemu_vfree(iov.iov_base);
3261                 iov.iov_base = NULL;
3262             }
3263         }
3264 
3265         sector_num += num;
3266         nb_sectors -= num;
3267     }
3268 
3269 fail:
3270     qemu_vfree(iov.iov_base);
3271     return ret;
3272 }
3273 
3274 /*
3275  * Forwards an already correctly aligned write request to the BlockDriver.
3276  */
3277 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3278     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3279     QEMUIOVector *qiov, int flags)
3280 {
3281     BlockDriver *drv = bs->drv;
3282     bool waited;
3283     int ret;
3284 
3285     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3286     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3287 
3288     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3289     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3290     assert(!qiov || bytes == qiov->size);
3291 
3292     waited = wait_serialising_requests(req);
3293     assert(!waited || !req->serialising);
3294     assert(req->overlap_offset <= offset);
3295     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3296 
3297     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3298 
3299     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3300         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3301         qemu_iovec_is_zero(qiov)) {
3302         flags |= BDRV_REQ_ZERO_WRITE;
3303         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3304             flags |= BDRV_REQ_MAY_UNMAP;
3305         }
3306     }
3307 
3308     if (ret < 0) {
3309         /* Do nothing, write notifier decided to fail this request */
3310     } else if (flags & BDRV_REQ_ZERO_WRITE) {
3311         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3312         ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3313     } else {
3314         BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3315         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3316     }
3317     BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3318 
3319     if (ret == 0 && !bs->enable_write_cache) {
3320         ret = bdrv_co_flush(bs);
3321     }
3322 
3323     bdrv_set_dirty(bs, sector_num, nb_sectors);
3324 
3325     block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
3326 
3327     if (bs->growable && ret >= 0) {
3328         bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3329     }
3330 
3331     return ret;
3332 }
3333 
3334 /*
3335  * Handle a write request in coroutine context
3336  */
3337 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3338     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3339     BdrvRequestFlags flags)
3340 {
3341     BdrvTrackedRequest req;
3342     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3343     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3344     uint8_t *head_buf = NULL;
3345     uint8_t *tail_buf = NULL;
3346     QEMUIOVector local_qiov;
3347     bool use_local_qiov = false;
3348     int ret;
3349 
3350     if (!bs->drv) {
3351         return -ENOMEDIUM;
3352     }
3353     if (bs->read_only) {
3354         return -EACCES;
3355     }
3356     if (bdrv_check_byte_request(bs, offset, bytes)) {
3357         return -EIO;
3358     }
3359 
3360     /* throttling disk I/O */
3361     if (bs->io_limits_enabled) {
3362         bdrv_io_limits_intercept(bs, bytes, true);
3363     }
3364 
3365     /*
3366      * Align write if necessary by performing a read-modify-write cycle.
3367      * Pad qiov with the read parts and be sure to have a tracked request not
3368      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3369      */
3370     tracked_request_begin(&req, bs, offset, bytes, true);
3371 
3372     if (offset & (align - 1)) {
3373         QEMUIOVector head_qiov;
3374         struct iovec head_iov;
3375 
3376         mark_request_serialising(&req, align);
3377         wait_serialising_requests(&req);
3378 
3379         head_buf = qemu_blockalign(bs, align);
3380         head_iov = (struct iovec) {
3381             .iov_base   = head_buf,
3382             .iov_len    = align,
3383         };
3384         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3385 
3386         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3387         ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3388                                   align, &head_qiov, 0);
3389         if (ret < 0) {
3390             goto fail;
3391         }
3392         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3393 
3394         qemu_iovec_init(&local_qiov, qiov->niov + 2);
3395         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3396         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3397         use_local_qiov = true;
3398 
3399         bytes += offset & (align - 1);
3400         offset = offset & ~(align - 1);
3401     }
3402 
3403     if ((offset + bytes) & (align - 1)) {
3404         QEMUIOVector tail_qiov;
3405         struct iovec tail_iov;
3406         size_t tail_bytes;
3407         bool waited;
3408 
3409         mark_request_serialising(&req, align);
3410         waited = wait_serialising_requests(&req);
3411         assert(!waited || !use_local_qiov);
3412 
3413         tail_buf = qemu_blockalign(bs, align);
3414         tail_iov = (struct iovec) {
3415             .iov_base   = tail_buf,
3416             .iov_len    = align,
3417         };
3418         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3419 
3420         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3421         ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3422                                   align, &tail_qiov, 0);
3423         if (ret < 0) {
3424             goto fail;
3425         }
3426         BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3427 
3428         if (!use_local_qiov) {
3429             qemu_iovec_init(&local_qiov, qiov->niov + 1);
3430             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3431             use_local_qiov = true;
3432         }
3433 
3434         tail_bytes = (offset + bytes) & (align - 1);
3435         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3436 
3437         bytes = ROUND_UP(bytes, align);
3438     }
3439 
3440     ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3441                                use_local_qiov ? &local_qiov : qiov,
3442                                flags);
3443 
3444 fail:
3445     tracked_request_end(&req);
3446 
3447     if (use_local_qiov) {
3448         qemu_iovec_destroy(&local_qiov);
3449     }
3450     qemu_vfree(head_buf);
3451     qemu_vfree(tail_buf);
3452 
3453     return ret;
3454 }
3455 
3456 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3457     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3458     BdrvRequestFlags flags)
3459 {
3460     if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3461         return -EINVAL;
3462     }
3463 
3464     return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3465                               nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3466 }
3467 
3468 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3469     int nb_sectors, QEMUIOVector *qiov)
3470 {
3471     trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3472 
3473     return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3474 }
3475 
3476 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3477                                       int64_t sector_num, int nb_sectors,
3478                                       BdrvRequestFlags flags)
3479 {
3480     trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3481 
3482     if (!(bs->open_flags & BDRV_O_UNMAP)) {
3483         flags &= ~BDRV_REQ_MAY_UNMAP;
3484     }
3485 
3486     return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3487                              BDRV_REQ_ZERO_WRITE | flags);
3488 }
3489 
3490 /**
3491  * Truncate file to 'offset' bytes (needed only for file protocols)
3492  */
3493 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3494 {
3495     BlockDriver *drv = bs->drv;
3496     int ret;
3497     if (!drv)
3498         return -ENOMEDIUM;
3499     if (!drv->bdrv_truncate)
3500         return -ENOTSUP;
3501     if (bs->read_only)
3502         return -EACCES;
3503 
3504     ret = drv->bdrv_truncate(bs, offset);
3505     if (ret == 0) {
3506         ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3507         if (bs->blk) {
3508             blk_dev_resize_cb(bs->blk);
3509         }
3510     }
3511     return ret;
3512 }
3513 
3514 /**
3515  * Length of a allocated file in bytes. Sparse files are counted by actual
3516  * allocated space. Return < 0 if error or unknown.
3517  */
3518 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3519 {
3520     BlockDriver *drv = bs->drv;
3521     if (!drv) {
3522         return -ENOMEDIUM;
3523     }
3524     if (drv->bdrv_get_allocated_file_size) {
3525         return drv->bdrv_get_allocated_file_size(bs);
3526     }
3527     if (bs->file) {
3528         return bdrv_get_allocated_file_size(bs->file);
3529     }
3530     return -ENOTSUP;
3531 }
3532 
3533 /**
3534  * Return number of sectors on success, -errno on error.
3535  */
3536 int64_t bdrv_nb_sectors(BlockDriverState *bs)
3537 {
3538     BlockDriver *drv = bs->drv;
3539 
3540     if (!drv)
3541         return -ENOMEDIUM;
3542 
3543     if (drv->has_variable_length) {
3544         int ret = refresh_total_sectors(bs, bs->total_sectors);
3545         if (ret < 0) {
3546             return ret;
3547         }
3548     }
3549     return bs->total_sectors;
3550 }
3551 
3552 /**
3553  * Return length in bytes on success, -errno on error.
3554  * The length is always a multiple of BDRV_SECTOR_SIZE.
3555  */
3556 int64_t bdrv_getlength(BlockDriverState *bs)
3557 {
3558     int64_t ret = bdrv_nb_sectors(bs);
3559 
3560     return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE;
3561 }
3562 
3563 /* return 0 as number of sectors if no device present or error */
3564 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3565 {
3566     int64_t nb_sectors = bdrv_nb_sectors(bs);
3567 
3568     *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
3569 }
3570 
3571 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3572                        BlockdevOnError on_write_error)
3573 {
3574     bs->on_read_error = on_read_error;
3575     bs->on_write_error = on_write_error;
3576 }
3577 
3578 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3579 {
3580     return is_read ? bs->on_read_error : bs->on_write_error;
3581 }
3582 
3583 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3584 {
3585     BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3586 
3587     switch (on_err) {
3588     case BLOCKDEV_ON_ERROR_ENOSPC:
3589         return (error == ENOSPC) ?
3590                BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
3591     case BLOCKDEV_ON_ERROR_STOP:
3592         return BLOCK_ERROR_ACTION_STOP;
3593     case BLOCKDEV_ON_ERROR_REPORT:
3594         return BLOCK_ERROR_ACTION_REPORT;
3595     case BLOCKDEV_ON_ERROR_IGNORE:
3596         return BLOCK_ERROR_ACTION_IGNORE;
3597     default:
3598         abort();
3599     }
3600 }
3601 
3602 static void send_qmp_error_event(BlockDriverState *bs,
3603                                  BlockErrorAction action,
3604                                  bool is_read, int error)
3605 {
3606     IoOperationType optype;
3607 
3608     optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
3609     qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action,
3610                                    bdrv_iostatus_is_enabled(bs),
3611                                    error == ENOSPC, strerror(error),
3612                                    &error_abort);
3613 }
3614 
3615 /* This is done by device models because, while the block layer knows
3616  * about the error, it does not know whether an operation comes from
3617  * the device or the block layer (from a job, for example).
3618  */
3619 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3620                        bool is_read, int error)
3621 {
3622     assert(error >= 0);
3623 
3624     if (action == BLOCK_ERROR_ACTION_STOP) {
3625         /* First set the iostatus, so that "info block" returns an iostatus
3626          * that matches the events raised so far (an additional error iostatus
3627          * is fine, but not a lost one).
3628          */
3629         bdrv_iostatus_set_err(bs, error);
3630 
3631         /* Then raise the request to stop the VM and the event.
3632          * qemu_system_vmstop_request_prepare has two effects.  First,
3633          * it ensures that the STOP event always comes after the
3634          * BLOCK_IO_ERROR event.  Second, it ensures that even if management
3635          * can observe the STOP event and do a "cont" before the STOP
3636          * event is issued, the VM will not stop.  In this case, vm_start()
3637          * also ensures that the STOP/RESUME pair of events is emitted.
3638          */
3639         qemu_system_vmstop_request_prepare();
3640         send_qmp_error_event(bs, action, is_read, error);
3641         qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
3642     } else {
3643         send_qmp_error_event(bs, action, is_read, error);
3644     }
3645 }
3646 
3647 int bdrv_is_read_only(BlockDriverState *bs)
3648 {
3649     return bs->read_only;
3650 }
3651 
3652 int bdrv_is_sg(BlockDriverState *bs)
3653 {
3654     return bs->sg;
3655 }
3656 
3657 int bdrv_enable_write_cache(BlockDriverState *bs)
3658 {
3659     return bs->enable_write_cache;
3660 }
3661 
3662 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3663 {
3664     bs->enable_write_cache = wce;
3665 
3666     /* so a reopen() will preserve wce */
3667     if (wce) {
3668         bs->open_flags |= BDRV_O_CACHE_WB;
3669     } else {
3670         bs->open_flags &= ~BDRV_O_CACHE_WB;
3671     }
3672 }
3673 
3674 int bdrv_is_encrypted(BlockDriverState *bs)
3675 {
3676     if (bs->backing_hd && bs->backing_hd->encrypted)
3677         return 1;
3678     return bs->encrypted;
3679 }
3680 
3681 int bdrv_key_required(BlockDriverState *bs)
3682 {
3683     BlockDriverState *backing_hd = bs->backing_hd;
3684 
3685     if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3686         return 1;
3687     return (bs->encrypted && !bs->valid_key);
3688 }
3689 
3690 int bdrv_set_key(BlockDriverState *bs, const char *key)
3691 {
3692     int ret;
3693     if (bs->backing_hd && bs->backing_hd->encrypted) {
3694         ret = bdrv_set_key(bs->backing_hd, key);
3695         if (ret < 0)
3696             return ret;
3697         if (!bs->encrypted)
3698             return 0;
3699     }
3700     if (!bs->encrypted) {
3701         return -EINVAL;
3702     } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3703         return -ENOMEDIUM;
3704     }
3705     ret = bs->drv->bdrv_set_key(bs, key);
3706     if (ret < 0) {
3707         bs->valid_key = 0;
3708     } else if (!bs->valid_key) {
3709         bs->valid_key = 1;
3710         if (bs->blk) {
3711             /* call the change callback now, we skipped it on open */
3712             blk_dev_change_media_cb(bs->blk, true);
3713         }
3714     }
3715     return ret;
3716 }
3717 
3718 const char *bdrv_get_format_name(BlockDriverState *bs)
3719 {
3720     return bs->drv ? bs->drv->format_name : NULL;
3721 }
3722 
3723 static int qsort_strcmp(const void *a, const void *b)
3724 {
3725     return strcmp(a, b);
3726 }
3727 
3728 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3729                          void *opaque)
3730 {
3731     BlockDriver *drv;
3732     int count = 0;
3733     int i;
3734     const char **formats = NULL;
3735 
3736     QLIST_FOREACH(drv, &bdrv_drivers, list) {
3737         if (drv->format_name) {
3738             bool found = false;
3739             int i = count;
3740             while (formats && i && !found) {
3741                 found = !strcmp(formats[--i], drv->format_name);
3742             }
3743 
3744             if (!found) {
3745                 formats = g_renew(const char *, formats, count + 1);
3746                 formats[count++] = drv->format_name;
3747             }
3748         }
3749     }
3750 
3751     qsort(formats, count, sizeof(formats[0]), qsort_strcmp);
3752 
3753     for (i = 0; i < count; i++) {
3754         it(opaque, formats[i]);
3755     }
3756 
3757     g_free(formats);
3758 }
3759 
3760 /* This function is to find block backend bs */
3761 /* TODO convert callers to blk_by_name(), then remove */
3762 BlockDriverState *bdrv_find(const char *name)
3763 {
3764     BlockBackend *blk = blk_by_name(name);
3765 
3766     return blk ? blk_bs(blk) : NULL;
3767 }
3768 
3769 /* This function is to find a node in the bs graph */
3770 BlockDriverState *bdrv_find_node(const char *node_name)
3771 {
3772     BlockDriverState *bs;
3773 
3774     assert(node_name);
3775 
3776     QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3777         if (!strcmp(node_name, bs->node_name)) {
3778             return bs;
3779         }
3780     }
3781     return NULL;
3782 }
3783 
3784 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3785 BlockDeviceInfoList *bdrv_named_nodes_list(void)
3786 {
3787     BlockDeviceInfoList *list, *entry;
3788     BlockDriverState *bs;
3789 
3790     list = NULL;
3791     QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3792         entry = g_malloc0(sizeof(*entry));
3793         entry->value = bdrv_block_device_info(bs);
3794         entry->next = list;
3795         list = entry;
3796     }
3797 
3798     return list;
3799 }
3800 
3801 BlockDriverState *bdrv_lookup_bs(const char *device,
3802                                  const char *node_name,
3803                                  Error **errp)
3804 {
3805     BlockBackend *blk;
3806     BlockDriverState *bs;
3807 
3808     if (device) {
3809         blk = blk_by_name(device);
3810 
3811         if (blk) {
3812             return blk_bs(blk);
3813         }
3814     }
3815 
3816     if (node_name) {
3817         bs = bdrv_find_node(node_name);
3818 
3819         if (bs) {
3820             return bs;
3821         }
3822     }
3823 
3824     error_setg(errp, "Cannot find device=%s nor node_name=%s",
3825                      device ? device : "",
3826                      node_name ? node_name : "");
3827     return NULL;
3828 }
3829 
3830 /* If 'base' is in the same chain as 'top', return true. Otherwise,
3831  * return false.  If either argument is NULL, return false. */
3832 bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base)
3833 {
3834     while (top && top != base) {
3835         top = top->backing_hd;
3836     }
3837 
3838     return top != NULL;
3839 }
3840 
3841 BlockDriverState *bdrv_next_node(BlockDriverState *bs)
3842 {
3843     if (!bs) {
3844         return QTAILQ_FIRST(&graph_bdrv_states);
3845     }
3846     return QTAILQ_NEXT(bs, node_list);
3847 }
3848 
3849 BlockDriverState *bdrv_next(BlockDriverState *bs)
3850 {
3851     if (!bs) {
3852         return QTAILQ_FIRST(&bdrv_states);
3853     }
3854     return QTAILQ_NEXT(bs, device_list);
3855 }
3856 
3857 const char *bdrv_get_node_name(const BlockDriverState *bs)
3858 {
3859     return bs->node_name;
3860 }
3861 
3862 /* TODO check what callers really want: bs->node_name or blk_name() */
3863 const char *bdrv_get_device_name(const BlockDriverState *bs)
3864 {
3865     return bs->blk ? blk_name(bs->blk) : "";
3866 }
3867 
3868 int bdrv_get_flags(BlockDriverState *bs)
3869 {
3870     return bs->open_flags;
3871 }
3872 
3873 int bdrv_flush_all(void)
3874 {
3875     BlockDriverState *bs;
3876     int result = 0;
3877 
3878     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3879         AioContext *aio_context = bdrv_get_aio_context(bs);
3880         int ret;
3881 
3882         aio_context_acquire(aio_context);
3883         ret = bdrv_flush(bs);
3884         if (ret < 0 && !result) {
3885             result = ret;
3886         }
3887         aio_context_release(aio_context);
3888     }
3889 
3890     return result;
3891 }
3892 
3893 int bdrv_has_zero_init_1(BlockDriverState *bs)
3894 {
3895     return 1;
3896 }
3897 
3898 int bdrv_has_zero_init(BlockDriverState *bs)
3899 {
3900     assert(bs->drv);
3901 
3902     /* If BS is a copy on write image, it is initialized to
3903        the contents of the base image, which may not be zeroes.  */
3904     if (bs->backing_hd) {
3905         return 0;
3906     }
3907     if (bs->drv->bdrv_has_zero_init) {
3908         return bs->drv->bdrv_has_zero_init(bs);
3909     }
3910 
3911     /* safe default */
3912     return 0;
3913 }
3914 
3915 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3916 {
3917     BlockDriverInfo bdi;
3918 
3919     if (bs->backing_hd) {
3920         return false;
3921     }
3922 
3923     if (bdrv_get_info(bs, &bdi) == 0) {
3924         return bdi.unallocated_blocks_are_zero;
3925     }
3926 
3927     return false;
3928 }
3929 
3930 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3931 {
3932     BlockDriverInfo bdi;
3933 
3934     if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3935         return false;
3936     }
3937 
3938     if (bdrv_get_info(bs, &bdi) == 0) {
3939         return bdi.can_write_zeroes_with_unmap;
3940     }
3941 
3942     return false;
3943 }
3944 
3945 typedef struct BdrvCoGetBlockStatusData {
3946     BlockDriverState *bs;
3947     BlockDriverState *base;
3948     int64_t sector_num;
3949     int nb_sectors;
3950     int *pnum;
3951     int64_t ret;
3952     bool done;
3953 } BdrvCoGetBlockStatusData;
3954 
3955 /*
3956  * Returns the allocation status of the specified sectors.
3957  * Drivers not implementing the functionality are assumed to not support
3958  * backing files, hence all their sectors are reported as allocated.
3959  *
3960  * If 'sector_num' is beyond the end of the disk image the return value is 0
3961  * and 'pnum' is set to 0.
3962  *
3963  * 'pnum' is set to the number of sectors (including and immediately following
3964  * the specified sector) that are known to be in the same
3965  * allocated/unallocated state.
3966  *
3967  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
3968  * beyond the end of the disk image it will be clamped.
3969  */
3970 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3971                                                      int64_t sector_num,
3972                                                      int nb_sectors, int *pnum)
3973 {
3974     int64_t total_sectors;
3975     int64_t n;
3976     int64_t ret, ret2;
3977 
3978     total_sectors = bdrv_nb_sectors(bs);
3979     if (total_sectors < 0) {
3980         return total_sectors;
3981     }
3982 
3983     if (sector_num >= total_sectors) {
3984         *pnum = 0;
3985         return 0;
3986     }
3987 
3988     n = total_sectors - sector_num;
3989     if (n < nb_sectors) {
3990         nb_sectors = n;
3991     }
3992 
3993     if (!bs->drv->bdrv_co_get_block_status) {
3994         *pnum = nb_sectors;
3995         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
3996         if (bs->drv->protocol_name) {
3997             ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3998         }
3999         return ret;
4000     }
4001 
4002     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
4003     if (ret < 0) {
4004         *pnum = 0;
4005         return ret;
4006     }
4007 
4008     if (ret & BDRV_BLOCK_RAW) {
4009         assert(ret & BDRV_BLOCK_OFFSET_VALID);
4010         return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4011                                      *pnum, pnum);
4012     }
4013 
4014     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
4015         ret |= BDRV_BLOCK_ALLOCATED;
4016     }
4017 
4018     if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
4019         if (bdrv_unallocated_blocks_are_zero(bs)) {
4020             ret |= BDRV_BLOCK_ZERO;
4021         } else if (bs->backing_hd) {
4022             BlockDriverState *bs2 = bs->backing_hd;
4023             int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
4024             if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
4025                 ret |= BDRV_BLOCK_ZERO;
4026             }
4027         }
4028     }
4029 
4030     if (bs->file &&
4031         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
4032         (ret & BDRV_BLOCK_OFFSET_VALID)) {
4033         int file_pnum;
4034 
4035         ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4036                                         *pnum, &file_pnum);
4037         if (ret2 >= 0) {
4038             /* Ignore errors.  This is just providing extra information, it
4039              * is useful but not necessary.
4040              */
4041             if (!file_pnum) {
4042                 /* !file_pnum indicates an offset at or beyond the EOF; it is
4043                  * perfectly valid for the format block driver to point to such
4044                  * offsets, so catch it and mark everything as zero */
4045                 ret |= BDRV_BLOCK_ZERO;
4046             } else {
4047                 /* Limit request to the range reported by the protocol driver */
4048                 *pnum = file_pnum;
4049                 ret |= (ret2 & BDRV_BLOCK_ZERO);
4050             }
4051         }
4052     }
4053 
4054     return ret;
4055 }
4056 
4057 /* Coroutine wrapper for bdrv_get_block_status() */
4058 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
4059 {
4060     BdrvCoGetBlockStatusData *data = opaque;
4061     BlockDriverState *bs = data->bs;
4062 
4063     data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4064                                          data->pnum);
4065     data->done = true;
4066 }
4067 
4068 /*
4069  * Synchronous wrapper around bdrv_co_get_block_status().
4070  *
4071  * See bdrv_co_get_block_status() for details.
4072  */
4073 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4074                               int nb_sectors, int *pnum)
4075 {
4076     Coroutine *co;
4077     BdrvCoGetBlockStatusData data = {
4078         .bs = bs,
4079         .sector_num = sector_num,
4080         .nb_sectors = nb_sectors,
4081         .pnum = pnum,
4082         .done = false,
4083     };
4084 
4085     if (qemu_in_coroutine()) {
4086         /* Fast-path if already in coroutine context */
4087         bdrv_get_block_status_co_entry(&data);
4088     } else {
4089         AioContext *aio_context = bdrv_get_aio_context(bs);
4090 
4091         co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4092         qemu_coroutine_enter(co, &data);
4093         while (!data.done) {
4094             aio_poll(aio_context, true);
4095         }
4096     }
4097     return data.ret;
4098 }
4099 
4100 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4101                                    int nb_sectors, int *pnum)
4102 {
4103     int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4104     if (ret < 0) {
4105         return ret;
4106     }
4107     return !!(ret & BDRV_BLOCK_ALLOCATED);
4108 }
4109 
4110 /*
4111  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4112  *
4113  * Return true if the given sector is allocated in any image between
4114  * BASE and TOP (inclusive).  BASE can be NULL to check if the given
4115  * sector is allocated in any image of the chain.  Return false otherwise.
4116  *
4117  * 'pnum' is set to the number of sectors (including and immediately following
4118  *  the specified sector) that are known to be in the same
4119  *  allocated/unallocated state.
4120  *
4121  */
4122 int bdrv_is_allocated_above(BlockDriverState *top,
4123                             BlockDriverState *base,
4124                             int64_t sector_num,
4125                             int nb_sectors, int *pnum)
4126 {
4127     BlockDriverState *intermediate;
4128     int ret, n = nb_sectors;
4129 
4130     intermediate = top;
4131     while (intermediate && intermediate != base) {
4132         int pnum_inter;
4133         ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4134                                 &pnum_inter);
4135         if (ret < 0) {
4136             return ret;
4137         } else if (ret) {
4138             *pnum = pnum_inter;
4139             return 1;
4140         }
4141 
4142         /*
4143          * [sector_num, nb_sectors] is unallocated on top but intermediate
4144          * might have
4145          *
4146          * [sector_num+x, nr_sectors] allocated.
4147          */
4148         if (n > pnum_inter &&
4149             (intermediate == top ||
4150              sector_num + pnum_inter < intermediate->total_sectors)) {
4151             n = pnum_inter;
4152         }
4153 
4154         intermediate = intermediate->backing_hd;
4155     }
4156 
4157     *pnum = n;
4158     return 0;
4159 }
4160 
4161 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4162 {
4163     if (bs->backing_hd && bs->backing_hd->encrypted)
4164         return bs->backing_file;
4165     else if (bs->encrypted)
4166         return bs->filename;
4167     else
4168         return NULL;
4169 }
4170 
4171 void bdrv_get_backing_filename(BlockDriverState *bs,
4172                                char *filename, int filename_size)
4173 {
4174     pstrcpy(filename, filename_size, bs->backing_file);
4175 }
4176 
4177 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4178                           const uint8_t *buf, int nb_sectors)
4179 {
4180     BlockDriver *drv = bs->drv;
4181     if (!drv)
4182         return -ENOMEDIUM;
4183     if (!drv->bdrv_write_compressed)
4184         return -ENOTSUP;
4185     if (bdrv_check_request(bs, sector_num, nb_sectors))
4186         return -EIO;
4187 
4188     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4189 
4190     return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4191 }
4192 
4193 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4194 {
4195     BlockDriver *drv = bs->drv;
4196     if (!drv)
4197         return -ENOMEDIUM;
4198     if (!drv->bdrv_get_info)
4199         return -ENOTSUP;
4200     memset(bdi, 0, sizeof(*bdi));
4201     return drv->bdrv_get_info(bs, bdi);
4202 }
4203 
4204 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4205 {
4206     BlockDriver *drv = bs->drv;
4207     if (drv && drv->bdrv_get_specific_info) {
4208         return drv->bdrv_get_specific_info(bs);
4209     }
4210     return NULL;
4211 }
4212 
4213 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4214                       int64_t pos, int size)
4215 {
4216     QEMUIOVector qiov;
4217     struct iovec iov = {
4218         .iov_base   = (void *) buf,
4219         .iov_len    = size,
4220     };
4221 
4222     qemu_iovec_init_external(&qiov, &iov, 1);
4223     return bdrv_writev_vmstate(bs, &qiov, pos);
4224 }
4225 
4226 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4227 {
4228     BlockDriver *drv = bs->drv;
4229 
4230     if (!drv) {
4231         return -ENOMEDIUM;
4232     } else if (drv->bdrv_save_vmstate) {
4233         return drv->bdrv_save_vmstate(bs, qiov, pos);
4234     } else if (bs->file) {
4235         return bdrv_writev_vmstate(bs->file, qiov, pos);
4236     }
4237 
4238     return -ENOTSUP;
4239 }
4240 
4241 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4242                       int64_t pos, int size)
4243 {
4244     BlockDriver *drv = bs->drv;
4245     if (!drv)
4246         return -ENOMEDIUM;
4247     if (drv->bdrv_load_vmstate)
4248         return drv->bdrv_load_vmstate(bs, buf, pos, size);
4249     if (bs->file)
4250         return bdrv_load_vmstate(bs->file, buf, pos, size);
4251     return -ENOTSUP;
4252 }
4253 
4254 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4255 {
4256     if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4257         return;
4258     }
4259 
4260     bs->drv->bdrv_debug_event(bs, event);
4261 }
4262 
4263 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4264                           const char *tag)
4265 {
4266     while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4267         bs = bs->file;
4268     }
4269 
4270     if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4271         return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4272     }
4273 
4274     return -ENOTSUP;
4275 }
4276 
4277 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4278 {
4279     while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4280         bs = bs->file;
4281     }
4282 
4283     if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4284         return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4285     }
4286 
4287     return -ENOTSUP;
4288 }
4289 
4290 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4291 {
4292     while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4293         bs = bs->file;
4294     }
4295 
4296     if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4297         return bs->drv->bdrv_debug_resume(bs, tag);
4298     }
4299 
4300     return -ENOTSUP;
4301 }
4302 
4303 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4304 {
4305     while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4306         bs = bs->file;
4307     }
4308 
4309     if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4310         return bs->drv->bdrv_debug_is_suspended(bs, tag);
4311     }
4312 
4313     return false;
4314 }
4315 
4316 int bdrv_is_snapshot(BlockDriverState *bs)
4317 {
4318     return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4319 }
4320 
4321 /* backing_file can either be relative, or absolute, or a protocol.  If it is
4322  * relative, it must be relative to the chain.  So, passing in bs->filename
4323  * from a BDS as backing_file should not be done, as that may be relative to
4324  * the CWD rather than the chain. */
4325 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4326         const char *backing_file)
4327 {
4328     char *filename_full = NULL;
4329     char *backing_file_full = NULL;
4330     char *filename_tmp = NULL;
4331     int is_protocol = 0;
4332     BlockDriverState *curr_bs = NULL;
4333     BlockDriverState *retval = NULL;
4334 
4335     if (!bs || !bs->drv || !backing_file) {
4336         return NULL;
4337     }
4338 
4339     filename_full     = g_malloc(PATH_MAX);
4340     backing_file_full = g_malloc(PATH_MAX);
4341     filename_tmp      = g_malloc(PATH_MAX);
4342 
4343     is_protocol = path_has_protocol(backing_file);
4344 
4345     for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4346 
4347         /* If either of the filename paths is actually a protocol, then
4348          * compare unmodified paths; otherwise make paths relative */
4349         if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4350             if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4351                 retval = curr_bs->backing_hd;
4352                 break;
4353             }
4354         } else {
4355             /* If not an absolute filename path, make it relative to the current
4356              * image's filename path */
4357             path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4358                          backing_file);
4359 
4360             /* We are going to compare absolute pathnames */
4361             if (!realpath(filename_tmp, filename_full)) {
4362                 continue;
4363             }
4364 
4365             /* We need to make sure the backing filename we are comparing against
4366              * is relative to the current image filename (or absolute) */
4367             path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4368                          curr_bs->backing_file);
4369 
4370             if (!realpath(filename_tmp, backing_file_full)) {
4371                 continue;
4372             }
4373 
4374             if (strcmp(backing_file_full, filename_full) == 0) {
4375                 retval = curr_bs->backing_hd;
4376                 break;
4377             }
4378         }
4379     }
4380 
4381     g_free(filename_full);
4382     g_free(backing_file_full);
4383     g_free(filename_tmp);
4384     return retval;
4385 }
4386 
4387 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4388 {
4389     if (!bs->drv) {
4390         return 0;
4391     }
4392 
4393     if (!bs->backing_hd) {
4394         return 0;
4395     }
4396 
4397     return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4398 }
4399 
4400 /**************************************************************/
4401 /* async I/Os */
4402 
4403 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4404                            QEMUIOVector *qiov, int nb_sectors,
4405                            BlockCompletionFunc *cb, void *opaque)
4406 {
4407     trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4408 
4409     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4410                                  cb, opaque, false);
4411 }
4412 
4413 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4414                             QEMUIOVector *qiov, int nb_sectors,
4415                             BlockCompletionFunc *cb, void *opaque)
4416 {
4417     trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4418 
4419     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4420                                  cb, opaque, true);
4421 }
4422 
4423 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4424         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4425         BlockCompletionFunc *cb, void *opaque)
4426 {
4427     trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4428 
4429     return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4430                                  BDRV_REQ_ZERO_WRITE | flags,
4431                                  cb, opaque, true);
4432 }
4433 
4434 
4435 typedef struct MultiwriteCB {
4436     int error;
4437     int num_requests;
4438     int num_callbacks;
4439     struct {
4440         BlockCompletionFunc *cb;
4441         void *opaque;
4442         QEMUIOVector *free_qiov;
4443     } callbacks[];
4444 } MultiwriteCB;
4445 
4446 static void multiwrite_user_cb(MultiwriteCB *mcb)
4447 {
4448     int i;
4449 
4450     for (i = 0; i < mcb->num_callbacks; i++) {
4451         mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4452         if (mcb->callbacks[i].free_qiov) {
4453             qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4454         }
4455         g_free(mcb->callbacks[i].free_qiov);
4456     }
4457 }
4458 
4459 static void multiwrite_cb(void *opaque, int ret)
4460 {
4461     MultiwriteCB *mcb = opaque;
4462 
4463     trace_multiwrite_cb(mcb, ret);
4464 
4465     if (ret < 0 && !mcb->error) {
4466         mcb->error = ret;
4467     }
4468 
4469     mcb->num_requests--;
4470     if (mcb->num_requests == 0) {
4471         multiwrite_user_cb(mcb);
4472         g_free(mcb);
4473     }
4474 }
4475 
4476 static int multiwrite_req_compare(const void *a, const void *b)
4477 {
4478     const BlockRequest *req1 = a, *req2 = b;
4479 
4480     /*
4481      * Note that we can't simply subtract req2->sector from req1->sector
4482      * here as that could overflow the return value.
4483      */
4484     if (req1->sector > req2->sector) {
4485         return 1;
4486     } else if (req1->sector < req2->sector) {
4487         return -1;
4488     } else {
4489         return 0;
4490     }
4491 }
4492 
4493 /*
4494  * Takes a bunch of requests and tries to merge them. Returns the number of
4495  * requests that remain after merging.
4496  */
4497 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4498     int num_reqs, MultiwriteCB *mcb)
4499 {
4500     int i, outidx;
4501 
4502     // Sort requests by start sector
4503     qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4504 
4505     // Check if adjacent requests touch the same clusters. If so, combine them,
4506     // filling up gaps with zero sectors.
4507     outidx = 0;
4508     for (i = 1; i < num_reqs; i++) {
4509         int merge = 0;
4510         int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4511 
4512         // Handle exactly sequential writes and overlapping writes.
4513         if (reqs[i].sector <= oldreq_last) {
4514             merge = 1;
4515         }
4516 
4517         if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4518             merge = 0;
4519         }
4520 
4521         if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
4522             reqs[i].nb_sectors > bs->bl.max_transfer_length) {
4523             merge = 0;
4524         }
4525 
4526         if (merge) {
4527             size_t size;
4528             QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4529             qemu_iovec_init(qiov,
4530                 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4531 
4532             // Add the first request to the merged one. If the requests are
4533             // overlapping, drop the last sectors of the first request.
4534             size = (reqs[i].sector - reqs[outidx].sector) << 9;
4535             qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4536 
4537             // We should need to add any zeros between the two requests
4538             assert (reqs[i].sector <= oldreq_last);
4539 
4540             // Add the second request
4541             qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4542 
4543             // Add tail of first request, if necessary
4544             if (qiov->size < reqs[outidx].qiov->size) {
4545                 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
4546                                   reqs[outidx].qiov->size - qiov->size);
4547             }
4548 
4549             reqs[outidx].nb_sectors = qiov->size >> 9;
4550             reqs[outidx].qiov = qiov;
4551 
4552             mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4553         } else {
4554             outidx++;
4555             reqs[outidx].sector     = reqs[i].sector;
4556             reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4557             reqs[outidx].qiov       = reqs[i].qiov;
4558         }
4559     }
4560 
4561     return outidx + 1;
4562 }
4563 
4564 /*
4565  * Submit multiple AIO write requests at once.
4566  *
4567  * On success, the function returns 0 and all requests in the reqs array have
4568  * been submitted. In error case this function returns -1, and any of the
4569  * requests may or may not be submitted yet. In particular, this means that the
4570  * callback will be called for some of the requests, for others it won't. The
4571  * caller must check the error field of the BlockRequest to wait for the right
4572  * callbacks (if error != 0, no callback will be called).
4573  *
4574  * The implementation may modify the contents of the reqs array, e.g. to merge
4575  * requests. However, the fields opaque and error are left unmodified as they
4576  * are used to signal failure for a single request to the caller.
4577  */
4578 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4579 {
4580     MultiwriteCB *mcb;
4581     int i;
4582 
4583     /* don't submit writes if we don't have a medium */
4584     if (bs->drv == NULL) {
4585         for (i = 0; i < num_reqs; i++) {
4586             reqs[i].error = -ENOMEDIUM;
4587         }
4588         return -1;
4589     }
4590 
4591     if (num_reqs == 0) {
4592         return 0;
4593     }
4594 
4595     // Create MultiwriteCB structure
4596     mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4597     mcb->num_requests = 0;
4598     mcb->num_callbacks = num_reqs;
4599 
4600     for (i = 0; i < num_reqs; i++) {
4601         mcb->callbacks[i].cb = reqs[i].cb;
4602         mcb->callbacks[i].opaque = reqs[i].opaque;
4603     }
4604 
4605     // Check for mergable requests
4606     num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4607 
4608     trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4609 
4610     /* Run the aio requests. */
4611     mcb->num_requests = num_reqs;
4612     for (i = 0; i < num_reqs; i++) {
4613         bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4614                               reqs[i].nb_sectors, reqs[i].flags,
4615                               multiwrite_cb, mcb,
4616                               true);
4617     }
4618 
4619     return 0;
4620 }
4621 
4622 void bdrv_aio_cancel(BlockAIOCB *acb)
4623 {
4624     qemu_aio_ref(acb);
4625     bdrv_aio_cancel_async(acb);
4626     while (acb->refcnt > 1) {
4627         if (acb->aiocb_info->get_aio_context) {
4628             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
4629         } else if (acb->bs) {
4630             aio_poll(bdrv_get_aio_context(acb->bs), true);
4631         } else {
4632             abort();
4633         }
4634     }
4635     qemu_aio_unref(acb);
4636 }
4637 
4638 /* Async version of aio cancel. The caller is not blocked if the acb implements
4639  * cancel_async, otherwise we do nothing and let the request normally complete.
4640  * In either case the completion callback must be called. */
4641 void bdrv_aio_cancel_async(BlockAIOCB *acb)
4642 {
4643     if (acb->aiocb_info->cancel_async) {
4644         acb->aiocb_info->cancel_async(acb);
4645     }
4646 }
4647 
4648 /**************************************************************/
4649 /* async block device emulation */
4650 
4651 typedef struct BlockAIOCBSync {
4652     BlockAIOCB common;
4653     QEMUBH *bh;
4654     int ret;
4655     /* vector translation state */
4656     QEMUIOVector *qiov;
4657     uint8_t *bounce;
4658     int is_write;
4659 } BlockAIOCBSync;
4660 
4661 static const AIOCBInfo bdrv_em_aiocb_info = {
4662     .aiocb_size         = sizeof(BlockAIOCBSync),
4663 };
4664 
4665 static void bdrv_aio_bh_cb(void *opaque)
4666 {
4667     BlockAIOCBSync *acb = opaque;
4668 
4669     if (!acb->is_write && acb->ret >= 0) {
4670         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4671     }
4672     qemu_vfree(acb->bounce);
4673     acb->common.cb(acb->common.opaque, acb->ret);
4674     qemu_bh_delete(acb->bh);
4675     acb->bh = NULL;
4676     qemu_aio_unref(acb);
4677 }
4678 
4679 static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4680                                       int64_t sector_num,
4681                                       QEMUIOVector *qiov,
4682                                       int nb_sectors,
4683                                       BlockCompletionFunc *cb,
4684                                       void *opaque,
4685                                       int is_write)
4686 
4687 {
4688     BlockAIOCBSync *acb;
4689 
4690     acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4691     acb->is_write = is_write;
4692     acb->qiov = qiov;
4693     acb->bounce = qemu_try_blockalign(bs, qiov->size);
4694     acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4695 
4696     if (acb->bounce == NULL) {
4697         acb->ret = -ENOMEM;
4698     } else if (is_write) {
4699         qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4700         acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4701     } else {
4702         acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4703     }
4704 
4705     qemu_bh_schedule(acb->bh);
4706 
4707     return &acb->common;
4708 }
4709 
4710 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4711         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4712         BlockCompletionFunc *cb, void *opaque)
4713 {
4714     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4715 }
4716 
4717 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4718         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4719         BlockCompletionFunc *cb, void *opaque)
4720 {
4721     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4722 }
4723 
4724 
4725 typedef struct BlockAIOCBCoroutine {
4726     BlockAIOCB common;
4727     BlockRequest req;
4728     bool is_write;
4729     bool *done;
4730     QEMUBH* bh;
4731 } BlockAIOCBCoroutine;
4732 
4733 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4734     .aiocb_size         = sizeof(BlockAIOCBCoroutine),
4735 };
4736 
4737 static void bdrv_co_em_bh(void *opaque)
4738 {
4739     BlockAIOCBCoroutine *acb = opaque;
4740 
4741     acb->common.cb(acb->common.opaque, acb->req.error);
4742 
4743     qemu_bh_delete(acb->bh);
4744     qemu_aio_unref(acb);
4745 }
4746 
4747 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4748 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4749 {
4750     BlockAIOCBCoroutine *acb = opaque;
4751     BlockDriverState *bs = acb->common.bs;
4752 
4753     if (!acb->is_write) {
4754         acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4755             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4756     } else {
4757         acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4758             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4759     }
4760 
4761     acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4762     qemu_bh_schedule(acb->bh);
4763 }
4764 
4765 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4766                                          int64_t sector_num,
4767                                          QEMUIOVector *qiov,
4768                                          int nb_sectors,
4769                                          BdrvRequestFlags flags,
4770                                          BlockCompletionFunc *cb,
4771                                          void *opaque,
4772                                          bool is_write)
4773 {
4774     Coroutine *co;
4775     BlockAIOCBCoroutine *acb;
4776 
4777     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4778     acb->req.sector = sector_num;
4779     acb->req.nb_sectors = nb_sectors;
4780     acb->req.qiov = qiov;
4781     acb->req.flags = flags;
4782     acb->is_write = is_write;
4783 
4784     co = qemu_coroutine_create(bdrv_co_do_rw);
4785     qemu_coroutine_enter(co, acb);
4786 
4787     return &acb->common;
4788 }
4789 
4790 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4791 {
4792     BlockAIOCBCoroutine *acb = opaque;
4793     BlockDriverState *bs = acb->common.bs;
4794 
4795     acb->req.error = bdrv_co_flush(bs);
4796     acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4797     qemu_bh_schedule(acb->bh);
4798 }
4799 
4800 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4801         BlockCompletionFunc *cb, void *opaque)
4802 {
4803     trace_bdrv_aio_flush(bs, opaque);
4804 
4805     Coroutine *co;
4806     BlockAIOCBCoroutine *acb;
4807 
4808     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4809 
4810     co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4811     qemu_coroutine_enter(co, acb);
4812 
4813     return &acb->common;
4814 }
4815 
4816 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4817 {
4818     BlockAIOCBCoroutine *acb = opaque;
4819     BlockDriverState *bs = acb->common.bs;
4820 
4821     acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4822     acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4823     qemu_bh_schedule(acb->bh);
4824 }
4825 
4826 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4827         int64_t sector_num, int nb_sectors,
4828         BlockCompletionFunc *cb, void *opaque)
4829 {
4830     Coroutine *co;
4831     BlockAIOCBCoroutine *acb;
4832 
4833     trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4834 
4835     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4836     acb->req.sector = sector_num;
4837     acb->req.nb_sectors = nb_sectors;
4838     co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4839     qemu_coroutine_enter(co, acb);
4840 
4841     return &acb->common;
4842 }
4843 
4844 void bdrv_init(void)
4845 {
4846     module_call_init(MODULE_INIT_BLOCK);
4847 }
4848 
4849 void bdrv_init_with_whitelist(void)
4850 {
4851     use_bdrv_whitelist = 1;
4852     bdrv_init();
4853 }
4854 
4855 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4856                    BlockCompletionFunc *cb, void *opaque)
4857 {
4858     BlockAIOCB *acb;
4859 
4860     acb = g_slice_alloc(aiocb_info->aiocb_size);
4861     acb->aiocb_info = aiocb_info;
4862     acb->bs = bs;
4863     acb->cb = cb;
4864     acb->opaque = opaque;
4865     acb->refcnt = 1;
4866     return acb;
4867 }
4868 
4869 void qemu_aio_ref(void *p)
4870 {
4871     BlockAIOCB *acb = p;
4872     acb->refcnt++;
4873 }
4874 
4875 void qemu_aio_unref(void *p)
4876 {
4877     BlockAIOCB *acb = p;
4878     assert(acb->refcnt > 0);
4879     if (--acb->refcnt == 0) {
4880         g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4881     }
4882 }
4883 
4884 /**************************************************************/
4885 /* Coroutine block device emulation */
4886 
4887 typedef struct CoroutineIOCompletion {
4888     Coroutine *coroutine;
4889     int ret;
4890 } CoroutineIOCompletion;
4891 
4892 static void bdrv_co_io_em_complete(void *opaque, int ret)
4893 {
4894     CoroutineIOCompletion *co = opaque;
4895 
4896     co->ret = ret;
4897     qemu_coroutine_enter(co->coroutine, NULL);
4898 }
4899 
4900 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4901                                       int nb_sectors, QEMUIOVector *iov,
4902                                       bool is_write)
4903 {
4904     CoroutineIOCompletion co = {
4905         .coroutine = qemu_coroutine_self(),
4906     };
4907     BlockAIOCB *acb;
4908 
4909     if (is_write) {
4910         acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4911                                        bdrv_co_io_em_complete, &co);
4912     } else {
4913         acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4914                                       bdrv_co_io_em_complete, &co);
4915     }
4916 
4917     trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4918     if (!acb) {
4919         return -EIO;
4920     }
4921     qemu_coroutine_yield();
4922 
4923     return co.ret;
4924 }
4925 
4926 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4927                                          int64_t sector_num, int nb_sectors,
4928                                          QEMUIOVector *iov)
4929 {
4930     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4931 }
4932 
4933 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4934                                          int64_t sector_num, int nb_sectors,
4935                                          QEMUIOVector *iov)
4936 {
4937     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4938 }
4939 
4940 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4941 {
4942     RwCo *rwco = opaque;
4943 
4944     rwco->ret = bdrv_co_flush(rwco->bs);
4945 }
4946 
4947 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4948 {
4949     int ret;
4950 
4951     if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4952         return 0;
4953     }
4954 
4955     /* Write back cached data to the OS even with cache=unsafe */
4956     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4957     if (bs->drv->bdrv_co_flush_to_os) {
4958         ret = bs->drv->bdrv_co_flush_to_os(bs);
4959         if (ret < 0) {
4960             return ret;
4961         }
4962     }
4963 
4964     /* But don't actually force it to the disk with cache=unsafe */
4965     if (bs->open_flags & BDRV_O_NO_FLUSH) {
4966         goto flush_parent;
4967     }
4968 
4969     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4970     if (bs->drv->bdrv_co_flush_to_disk) {
4971         ret = bs->drv->bdrv_co_flush_to_disk(bs);
4972     } else if (bs->drv->bdrv_aio_flush) {
4973         BlockAIOCB *acb;
4974         CoroutineIOCompletion co = {
4975             .coroutine = qemu_coroutine_self(),
4976         };
4977 
4978         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4979         if (acb == NULL) {
4980             ret = -EIO;
4981         } else {
4982             qemu_coroutine_yield();
4983             ret = co.ret;
4984         }
4985     } else {
4986         /*
4987          * Some block drivers always operate in either writethrough or unsafe
4988          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4989          * know how the server works (because the behaviour is hardcoded or
4990          * depends on server-side configuration), so we can't ensure that
4991          * everything is safe on disk. Returning an error doesn't work because
4992          * that would break guests even if the server operates in writethrough
4993          * mode.
4994          *
4995          * Let's hope the user knows what he's doing.
4996          */
4997         ret = 0;
4998     }
4999     if (ret < 0) {
5000         return ret;
5001     }
5002 
5003     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
5004      * in the case of cache=unsafe, so there are no useless flushes.
5005      */
5006 flush_parent:
5007     return bdrv_co_flush(bs->file);
5008 }
5009 
5010 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
5011 {
5012     Error *local_err = NULL;
5013     int ret;
5014 
5015     if (!bs->drv)  {
5016         return;
5017     }
5018 
5019     if (!(bs->open_flags & BDRV_O_INCOMING)) {
5020         return;
5021     }
5022     bs->open_flags &= ~BDRV_O_INCOMING;
5023 
5024     if (bs->drv->bdrv_invalidate_cache) {
5025         bs->drv->bdrv_invalidate_cache(bs, &local_err);
5026     } else if (bs->file) {
5027         bdrv_invalidate_cache(bs->file, &local_err);
5028     }
5029     if (local_err) {
5030         error_propagate(errp, local_err);
5031         return;
5032     }
5033 
5034     ret = refresh_total_sectors(bs, bs->total_sectors);
5035     if (ret < 0) {
5036         error_setg_errno(errp, -ret, "Could not refresh total sector count");
5037         return;
5038     }
5039 }
5040 
5041 void bdrv_invalidate_cache_all(Error **errp)
5042 {
5043     BlockDriverState *bs;
5044     Error *local_err = NULL;
5045 
5046     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5047         AioContext *aio_context = bdrv_get_aio_context(bs);
5048 
5049         aio_context_acquire(aio_context);
5050         bdrv_invalidate_cache(bs, &local_err);
5051         aio_context_release(aio_context);
5052         if (local_err) {
5053             error_propagate(errp, local_err);
5054             return;
5055         }
5056     }
5057 }
5058 
5059 int bdrv_flush(BlockDriverState *bs)
5060 {
5061     Coroutine *co;
5062     RwCo rwco = {
5063         .bs = bs,
5064         .ret = NOT_DONE,
5065     };
5066 
5067     if (qemu_in_coroutine()) {
5068         /* Fast-path if already in coroutine context */
5069         bdrv_flush_co_entry(&rwco);
5070     } else {
5071         AioContext *aio_context = bdrv_get_aio_context(bs);
5072 
5073         co = qemu_coroutine_create(bdrv_flush_co_entry);
5074         qemu_coroutine_enter(co, &rwco);
5075         while (rwco.ret == NOT_DONE) {
5076             aio_poll(aio_context, true);
5077         }
5078     }
5079 
5080     return rwco.ret;
5081 }
5082 
5083 typedef struct DiscardCo {
5084     BlockDriverState *bs;
5085     int64_t sector_num;
5086     int nb_sectors;
5087     int ret;
5088 } DiscardCo;
5089 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5090 {
5091     DiscardCo *rwco = opaque;
5092 
5093     rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5094 }
5095 
5096 /* if no limit is specified in the BlockLimits use a default
5097  * of 32768 512-byte sectors (16 MiB) per request.
5098  */
5099 #define MAX_DISCARD_DEFAULT 32768
5100 
5101 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5102                                  int nb_sectors)
5103 {
5104     int max_discard;
5105 
5106     if (!bs->drv) {
5107         return -ENOMEDIUM;
5108     } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5109         return -EIO;
5110     } else if (bs->read_only) {
5111         return -EROFS;
5112     }
5113 
5114     bdrv_reset_dirty(bs, sector_num, nb_sectors);
5115 
5116     /* Do nothing if disabled.  */
5117     if (!(bs->open_flags & BDRV_O_UNMAP)) {
5118         return 0;
5119     }
5120 
5121     if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5122         return 0;
5123     }
5124 
5125     max_discard = bs->bl.max_discard ?  bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5126     while (nb_sectors > 0) {
5127         int ret;
5128         int num = nb_sectors;
5129 
5130         /* align request */
5131         if (bs->bl.discard_alignment &&
5132             num >= bs->bl.discard_alignment &&
5133             sector_num % bs->bl.discard_alignment) {
5134             if (num > bs->bl.discard_alignment) {
5135                 num = bs->bl.discard_alignment;
5136             }
5137             num -= sector_num % bs->bl.discard_alignment;
5138         }
5139 
5140         /* limit request size */
5141         if (num > max_discard) {
5142             num = max_discard;
5143         }
5144 
5145         if (bs->drv->bdrv_co_discard) {
5146             ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5147         } else {
5148             BlockAIOCB *acb;
5149             CoroutineIOCompletion co = {
5150                 .coroutine = qemu_coroutine_self(),
5151             };
5152 
5153             acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5154                                             bdrv_co_io_em_complete, &co);
5155             if (acb == NULL) {
5156                 return -EIO;
5157             } else {
5158                 qemu_coroutine_yield();
5159                 ret = co.ret;
5160             }
5161         }
5162         if (ret && ret != -ENOTSUP) {
5163             return ret;
5164         }
5165 
5166         sector_num += num;
5167         nb_sectors -= num;
5168     }
5169     return 0;
5170 }
5171 
5172 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5173 {
5174     Coroutine *co;
5175     DiscardCo rwco = {
5176         .bs = bs,
5177         .sector_num = sector_num,
5178         .nb_sectors = nb_sectors,
5179         .ret = NOT_DONE,
5180     };
5181 
5182     if (qemu_in_coroutine()) {
5183         /* Fast-path if already in coroutine context */
5184         bdrv_discard_co_entry(&rwco);
5185     } else {
5186         AioContext *aio_context = bdrv_get_aio_context(bs);
5187 
5188         co = qemu_coroutine_create(bdrv_discard_co_entry);
5189         qemu_coroutine_enter(co, &rwco);
5190         while (rwco.ret == NOT_DONE) {
5191             aio_poll(aio_context, true);
5192         }
5193     }
5194 
5195     return rwco.ret;
5196 }
5197 
5198 /**************************************************************/
5199 /* removable device support */
5200 
5201 /**
5202  * Return TRUE if the media is present
5203  */
5204 int bdrv_is_inserted(BlockDriverState *bs)
5205 {
5206     BlockDriver *drv = bs->drv;
5207 
5208     if (!drv)
5209         return 0;
5210     if (!drv->bdrv_is_inserted)
5211         return 1;
5212     return drv->bdrv_is_inserted(bs);
5213 }
5214 
5215 /**
5216  * Return whether the media changed since the last call to this
5217  * function, or -ENOTSUP if we don't know.  Most drivers don't know.
5218  */
5219 int bdrv_media_changed(BlockDriverState *bs)
5220 {
5221     BlockDriver *drv = bs->drv;
5222 
5223     if (drv && drv->bdrv_media_changed) {
5224         return drv->bdrv_media_changed(bs);
5225     }
5226     return -ENOTSUP;
5227 }
5228 
5229 /**
5230  * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5231  */
5232 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5233 {
5234     BlockDriver *drv = bs->drv;
5235     const char *device_name;
5236 
5237     if (drv && drv->bdrv_eject) {
5238         drv->bdrv_eject(bs, eject_flag);
5239     }
5240 
5241     device_name = bdrv_get_device_name(bs);
5242     if (device_name[0] != '\0') {
5243         qapi_event_send_device_tray_moved(device_name,
5244                                           eject_flag, &error_abort);
5245     }
5246 }
5247 
5248 /**
5249  * Lock or unlock the media (if it is locked, the user won't be able
5250  * to eject it manually).
5251  */
5252 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5253 {
5254     BlockDriver *drv = bs->drv;
5255 
5256     trace_bdrv_lock_medium(bs, locked);
5257 
5258     if (drv && drv->bdrv_lock_medium) {
5259         drv->bdrv_lock_medium(bs, locked);
5260     }
5261 }
5262 
5263 /* needed for generic scsi interface */
5264 
5265 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5266 {
5267     BlockDriver *drv = bs->drv;
5268 
5269     if (drv && drv->bdrv_ioctl)
5270         return drv->bdrv_ioctl(bs, req, buf);
5271     return -ENOTSUP;
5272 }
5273 
5274 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5275         unsigned long int req, void *buf,
5276         BlockCompletionFunc *cb, void *opaque)
5277 {
5278     BlockDriver *drv = bs->drv;
5279 
5280     if (drv && drv->bdrv_aio_ioctl)
5281         return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5282     return NULL;
5283 }
5284 
5285 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5286 {
5287     bs->guest_block_size = align;
5288 }
5289 
5290 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5291 {
5292     return qemu_memalign(bdrv_opt_mem_align(bs), size);
5293 }
5294 
5295 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
5296 {
5297     return memset(qemu_blockalign(bs, size), 0, size);
5298 }
5299 
5300 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
5301 {
5302     size_t align = bdrv_opt_mem_align(bs);
5303 
5304     /* Ensure that NULL is never returned on success */
5305     assert(align > 0);
5306     if (size == 0) {
5307         size = align;
5308     }
5309 
5310     return qemu_try_memalign(align, size);
5311 }
5312 
5313 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
5314 {
5315     void *mem = qemu_try_blockalign(bs, size);
5316 
5317     if (mem) {
5318         memset(mem, 0, size);
5319     }
5320 
5321     return mem;
5322 }
5323 
5324 /*
5325  * Check if all memory in this vector is sector aligned.
5326  */
5327 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5328 {
5329     int i;
5330     size_t alignment = bdrv_opt_mem_align(bs);
5331 
5332     for (i = 0; i < qiov->niov; i++) {
5333         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5334             return false;
5335         }
5336         if (qiov->iov[i].iov_len % alignment) {
5337             return false;
5338         }
5339     }
5340 
5341     return true;
5342 }
5343 
5344 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5345                                           Error **errp)
5346 {
5347     int64_t bitmap_size;
5348     BdrvDirtyBitmap *bitmap;
5349 
5350     assert((granularity & (granularity - 1)) == 0);
5351 
5352     granularity >>= BDRV_SECTOR_BITS;
5353     assert(granularity);
5354     bitmap_size = bdrv_nb_sectors(bs);
5355     if (bitmap_size < 0) {
5356         error_setg_errno(errp, -bitmap_size, "could not get length of device");
5357         errno = -bitmap_size;
5358         return NULL;
5359     }
5360     bitmap = g_new0(BdrvDirtyBitmap, 1);
5361     bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5362     QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5363     return bitmap;
5364 }
5365 
5366 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5367 {
5368     BdrvDirtyBitmap *bm, *next;
5369     QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5370         if (bm == bitmap) {
5371             QLIST_REMOVE(bitmap, list);
5372             hbitmap_free(bitmap->bitmap);
5373             g_free(bitmap);
5374             return;
5375         }
5376     }
5377 }
5378 
5379 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5380 {
5381     BdrvDirtyBitmap *bm;
5382     BlockDirtyInfoList *list = NULL;
5383     BlockDirtyInfoList **plist = &list;
5384 
5385     QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5386         BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1);
5387         BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1);
5388         info->count = bdrv_get_dirty_count(bs, bm);
5389         info->granularity =
5390             ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5391         entry->value = info;
5392         *plist = entry;
5393         plist = &entry->next;
5394     }
5395 
5396     return list;
5397 }
5398 
5399 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5400 {
5401     if (bitmap) {
5402         return hbitmap_get(bitmap->bitmap, sector);
5403     } else {
5404         return 0;
5405     }
5406 }
5407 
5408 void bdrv_dirty_iter_init(BlockDriverState *bs,
5409                           BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5410 {
5411     hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5412 }
5413 
5414 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5415                     int nr_sectors)
5416 {
5417     BdrvDirtyBitmap *bitmap;
5418     QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5419         hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5420     }
5421 }
5422 
5423 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5424 {
5425     BdrvDirtyBitmap *bitmap;
5426     QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5427         hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5428     }
5429 }
5430 
5431 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5432 {
5433     return hbitmap_count(bitmap->bitmap);
5434 }
5435 
5436 /* Get a reference to bs */
5437 void bdrv_ref(BlockDriverState *bs)
5438 {
5439     bs->refcnt++;
5440 }
5441 
5442 /* Release a previously grabbed reference to bs.
5443  * If after releasing, reference count is zero, the BlockDriverState is
5444  * deleted. */
5445 void bdrv_unref(BlockDriverState *bs)
5446 {
5447     if (!bs) {
5448         return;
5449     }
5450     assert(bs->refcnt > 0);
5451     if (--bs->refcnt == 0) {
5452         bdrv_delete(bs);
5453     }
5454 }
5455 
5456 struct BdrvOpBlocker {
5457     Error *reason;
5458     QLIST_ENTRY(BdrvOpBlocker) list;
5459 };
5460 
5461 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5462 {
5463     BdrvOpBlocker *blocker;
5464     assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5465     if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5466         blocker = QLIST_FIRST(&bs->op_blockers[op]);
5467         if (errp) {
5468             error_setg(errp, "Device '%s' is busy: %s",
5469                        bdrv_get_device_name(bs),
5470                        error_get_pretty(blocker->reason));
5471         }
5472         return true;
5473     }
5474     return false;
5475 }
5476 
5477 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5478 {
5479     BdrvOpBlocker *blocker;
5480     assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5481 
5482     blocker = g_new0(BdrvOpBlocker, 1);
5483     blocker->reason = reason;
5484     QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5485 }
5486 
5487 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5488 {
5489     BdrvOpBlocker *blocker, *next;
5490     assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5491     QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5492         if (blocker->reason == reason) {
5493             QLIST_REMOVE(blocker, list);
5494             g_free(blocker);
5495         }
5496     }
5497 }
5498 
5499 void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5500 {
5501     int i;
5502     for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5503         bdrv_op_block(bs, i, reason);
5504     }
5505 }
5506 
5507 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5508 {
5509     int i;
5510     for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5511         bdrv_op_unblock(bs, i, reason);
5512     }
5513 }
5514 
5515 bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5516 {
5517     int i;
5518 
5519     for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5520         if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5521             return false;
5522         }
5523     }
5524     return true;
5525 }
5526 
5527 void bdrv_iostatus_enable(BlockDriverState *bs)
5528 {
5529     bs->iostatus_enabled = true;
5530     bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5531 }
5532 
5533 /* The I/O status is only enabled if the drive explicitly
5534  * enables it _and_ the VM is configured to stop on errors */
5535 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5536 {
5537     return (bs->iostatus_enabled &&
5538            (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5539             bs->on_write_error == BLOCKDEV_ON_ERROR_STOP   ||
5540             bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5541 }
5542 
5543 void bdrv_iostatus_disable(BlockDriverState *bs)
5544 {
5545     bs->iostatus_enabled = false;
5546 }
5547 
5548 void bdrv_iostatus_reset(BlockDriverState *bs)
5549 {
5550     if (bdrv_iostatus_is_enabled(bs)) {
5551         bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5552         if (bs->job) {
5553             block_job_iostatus_reset(bs->job);
5554         }
5555     }
5556 }
5557 
5558 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5559 {
5560     assert(bdrv_iostatus_is_enabled(bs));
5561     if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5562         bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5563                                          BLOCK_DEVICE_IO_STATUS_FAILED;
5564     }
5565 }
5566 
5567 void bdrv_img_create(const char *filename, const char *fmt,
5568                      const char *base_filename, const char *base_fmt,
5569                      char *options, uint64_t img_size, int flags,
5570                      Error **errp, bool quiet)
5571 {
5572     QemuOptsList *create_opts = NULL;
5573     QemuOpts *opts = NULL;
5574     const char *backing_fmt, *backing_file;
5575     int64_t size;
5576     BlockDriver *drv, *proto_drv;
5577     BlockDriver *backing_drv = NULL;
5578     Error *local_err = NULL;
5579     int ret = 0;
5580 
5581     /* Find driver and parse its options */
5582     drv = bdrv_find_format(fmt);
5583     if (!drv) {
5584         error_setg(errp, "Unknown file format '%s'", fmt);
5585         return;
5586     }
5587 
5588     proto_drv = bdrv_find_protocol(filename, true);
5589     if (!proto_drv) {
5590         error_setg(errp, "Unknown protocol '%s'", filename);
5591         return;
5592     }
5593 
5594     if (!drv->create_opts) {
5595         error_setg(errp, "Format driver '%s' does not support image creation",
5596                    drv->format_name);
5597         return;
5598     }
5599 
5600     if (!proto_drv->create_opts) {
5601         error_setg(errp, "Protocol driver '%s' does not support image creation",
5602                    proto_drv->format_name);
5603         return;
5604     }
5605 
5606     create_opts = qemu_opts_append(create_opts, drv->create_opts);
5607     create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
5608 
5609     /* Create parameter list with default values */
5610     opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
5611     qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size);
5612 
5613     /* Parse -o options */
5614     if (options) {
5615         if (qemu_opts_do_parse(opts, options, NULL) != 0) {
5616             error_setg(errp, "Invalid options for file format '%s'", fmt);
5617             goto out;
5618         }
5619     }
5620 
5621     if (base_filename) {
5622         if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) {
5623             error_setg(errp, "Backing file not supported for file format '%s'",
5624                        fmt);
5625             goto out;
5626         }
5627     }
5628 
5629     if (base_fmt) {
5630         if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) {
5631             error_setg(errp, "Backing file format not supported for file "
5632                              "format '%s'", fmt);
5633             goto out;
5634         }
5635     }
5636 
5637     backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5638     if (backing_file) {
5639         if (!strcmp(filename, backing_file)) {
5640             error_setg(errp, "Error: Trying to create an image with the "
5641                              "same filename as the backing file");
5642             goto out;
5643         }
5644     }
5645 
5646     backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5647     if (backing_fmt) {
5648         backing_drv = bdrv_find_format(backing_fmt);
5649         if (!backing_drv) {
5650             error_setg(errp, "Unknown backing file format '%s'",
5651                        backing_fmt);
5652             goto out;
5653         }
5654     }
5655 
5656     // The size for the image must always be specified, with one exception:
5657     // If we are using a backing file, we can obtain the size from there
5658     size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5659     if (size == -1) {
5660         if (backing_file) {
5661             BlockDriverState *bs;
5662             char *full_backing = g_new0(char, PATH_MAX);
5663             int64_t size;
5664             int back_flags;
5665 
5666             bdrv_get_full_backing_filename_from_filename(filename, backing_file,
5667                                                          full_backing, PATH_MAX,
5668                                                          &local_err);
5669             if (local_err) {
5670                 g_free(full_backing);
5671                 goto out;
5672             }
5673 
5674             /* backing files always opened read-only */
5675             back_flags =
5676                 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5677 
5678             bs = NULL;
5679             ret = bdrv_open(&bs, full_backing, NULL, NULL, back_flags,
5680                             backing_drv, &local_err);
5681             g_free(full_backing);
5682             if (ret < 0) {
5683                 goto out;
5684             }
5685             size = bdrv_getlength(bs);
5686             if (size < 0) {
5687                 error_setg_errno(errp, -size, "Could not get size of '%s'",
5688                                  backing_file);
5689                 bdrv_unref(bs);
5690                 goto out;
5691             }
5692 
5693             qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size);
5694 
5695             bdrv_unref(bs);
5696         } else {
5697             error_setg(errp, "Image creation needs a size parameter");
5698             goto out;
5699         }
5700     }
5701 
5702     if (!quiet) {
5703         printf("Formatting '%s', fmt=%s", filename, fmt);
5704         qemu_opts_print(opts, " ");
5705         puts("");
5706     }
5707 
5708     ret = bdrv_create(drv, filename, opts, &local_err);
5709 
5710     if (ret == -EFBIG) {
5711         /* This is generally a better message than whatever the driver would
5712          * deliver (especially because of the cluster_size_hint), since that
5713          * is most probably not much different from "image too large". */
5714         const char *cluster_size_hint = "";
5715         if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
5716             cluster_size_hint = " (try using a larger cluster size)";
5717         }
5718         error_setg(errp, "The image size is too large for file format '%s'"
5719                    "%s", fmt, cluster_size_hint);
5720         error_free(local_err);
5721         local_err = NULL;
5722     }
5723 
5724 out:
5725     qemu_opts_del(opts);
5726     qemu_opts_free(create_opts);
5727     if (local_err) {
5728         error_propagate(errp, local_err);
5729     }
5730 }
5731 
5732 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5733 {
5734     return bs->aio_context;
5735 }
5736 
5737 void bdrv_detach_aio_context(BlockDriverState *bs)
5738 {
5739     BdrvAioNotifier *baf;
5740 
5741     if (!bs->drv) {
5742         return;
5743     }
5744 
5745     QLIST_FOREACH(baf, &bs->aio_notifiers, list) {
5746         baf->detach_aio_context(baf->opaque);
5747     }
5748 
5749     if (bs->io_limits_enabled) {
5750         throttle_detach_aio_context(&bs->throttle_state);
5751     }
5752     if (bs->drv->bdrv_detach_aio_context) {
5753         bs->drv->bdrv_detach_aio_context(bs);
5754     }
5755     if (bs->file) {
5756         bdrv_detach_aio_context(bs->file);
5757     }
5758     if (bs->backing_hd) {
5759         bdrv_detach_aio_context(bs->backing_hd);
5760     }
5761 
5762     bs->aio_context = NULL;
5763 }
5764 
5765 void bdrv_attach_aio_context(BlockDriverState *bs,
5766                              AioContext *new_context)
5767 {
5768     BdrvAioNotifier *ban;
5769 
5770     if (!bs->drv) {
5771         return;
5772     }
5773 
5774     bs->aio_context = new_context;
5775 
5776     if (bs->backing_hd) {
5777         bdrv_attach_aio_context(bs->backing_hd, new_context);
5778     }
5779     if (bs->file) {
5780         bdrv_attach_aio_context(bs->file, new_context);
5781     }
5782     if (bs->drv->bdrv_attach_aio_context) {
5783         bs->drv->bdrv_attach_aio_context(bs, new_context);
5784     }
5785     if (bs->io_limits_enabled) {
5786         throttle_attach_aio_context(&bs->throttle_state, new_context);
5787     }
5788 
5789     QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
5790         ban->attached_aio_context(new_context, ban->opaque);
5791     }
5792 }
5793 
5794 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5795 {
5796     bdrv_drain_all(); /* ensure there are no in-flight requests */
5797 
5798     bdrv_detach_aio_context(bs);
5799 
5800     /* This function executes in the old AioContext so acquire the new one in
5801      * case it runs in a different thread.
5802      */
5803     aio_context_acquire(new_context);
5804     bdrv_attach_aio_context(bs, new_context);
5805     aio_context_release(new_context);
5806 }
5807 
5808 void bdrv_add_aio_context_notifier(BlockDriverState *bs,
5809         void (*attached_aio_context)(AioContext *new_context, void *opaque),
5810         void (*detach_aio_context)(void *opaque), void *opaque)
5811 {
5812     BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1);
5813     *ban = (BdrvAioNotifier){
5814         .attached_aio_context = attached_aio_context,
5815         .detach_aio_context   = detach_aio_context,
5816         .opaque               = opaque
5817     };
5818 
5819     QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list);
5820 }
5821 
5822 void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
5823                                       void (*attached_aio_context)(AioContext *,
5824                                                                    void *),
5825                                       void (*detach_aio_context)(void *),
5826                                       void *opaque)
5827 {
5828     BdrvAioNotifier *ban, *ban_next;
5829 
5830     QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
5831         if (ban->attached_aio_context == attached_aio_context &&
5832             ban->detach_aio_context   == detach_aio_context   &&
5833             ban->opaque               == opaque)
5834         {
5835             QLIST_REMOVE(ban, list);
5836             g_free(ban);
5837 
5838             return;
5839         }
5840     }
5841 
5842     abort();
5843 }
5844 
5845 void bdrv_add_before_write_notifier(BlockDriverState *bs,
5846                                     NotifierWithReturn *notifier)
5847 {
5848     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5849 }
5850 
5851 int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts,
5852                        BlockDriverAmendStatusCB *status_cb)
5853 {
5854     if (!bs->drv->bdrv_amend_options) {
5855         return -ENOTSUP;
5856     }
5857     return bs->drv->bdrv_amend_options(bs, opts, status_cb);
5858 }
5859 
5860 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5861  * of block filter and by bdrv_is_first_non_filter.
5862  * It is used to test if the given bs is the candidate or recurse more in the
5863  * node graph.
5864  */
5865 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5866                                       BlockDriverState *candidate)
5867 {
5868     /* return false if basic checks fails */
5869     if (!bs || !bs->drv) {
5870         return false;
5871     }
5872 
5873     /* the code reached a non block filter driver -> check if the bs is
5874      * the same as the candidate. It's the recursion termination condition.
5875      */
5876     if (!bs->drv->is_filter) {
5877         return bs == candidate;
5878     }
5879     /* Down this path the driver is a block filter driver */
5880 
5881     /* If the block filter recursion method is defined use it to recurse down
5882      * the node graph.
5883      */
5884     if (bs->drv->bdrv_recurse_is_first_non_filter) {
5885         return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5886     }
5887 
5888     /* the driver is a block filter but don't allow to recurse -> return false
5889      */
5890     return false;
5891 }
5892 
5893 /* This function checks if the candidate is the first non filter bs down it's
5894  * bs chain. Since we don't have pointers to parents it explore all bs chains
5895  * from the top. Some filters can choose not to pass down the recursion.
5896  */
5897 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5898 {
5899     BlockDriverState *bs;
5900 
5901     /* walk down the bs forest recursively */
5902     QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5903         bool perm;
5904 
5905         /* try to recurse in this top level bs */
5906         perm = bdrv_recurse_is_first_non_filter(bs, candidate);
5907 
5908         /* candidate is the first non filter */
5909         if (perm) {
5910             return true;
5911         }
5912     }
5913 
5914     return false;
5915 }
5916 
5917 BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
5918 {
5919     BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
5920     AioContext *aio_context;
5921 
5922     if (!to_replace_bs) {
5923         error_setg(errp, "Node name '%s' not found", node_name);
5924         return NULL;
5925     }
5926 
5927     aio_context = bdrv_get_aio_context(to_replace_bs);
5928     aio_context_acquire(aio_context);
5929 
5930     if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
5931         to_replace_bs = NULL;
5932         goto out;
5933     }
5934 
5935     /* We don't want arbitrary node of the BDS chain to be replaced only the top
5936      * most non filter in order to prevent data corruption.
5937      * Another benefit is that this tests exclude backing files which are
5938      * blocked by the backing blockers.
5939      */
5940     if (!bdrv_is_first_non_filter(to_replace_bs)) {
5941         error_setg(errp, "Only top most non filter can be replaced");
5942         to_replace_bs = NULL;
5943         goto out;
5944     }
5945 
5946 out:
5947     aio_context_release(aio_context);
5948     return to_replace_bs;
5949 }
5950 
5951 void bdrv_io_plug(BlockDriverState *bs)
5952 {
5953     BlockDriver *drv = bs->drv;
5954     if (drv && drv->bdrv_io_plug) {
5955         drv->bdrv_io_plug(bs);
5956     } else if (bs->file) {
5957         bdrv_io_plug(bs->file);
5958     }
5959 }
5960 
5961 void bdrv_io_unplug(BlockDriverState *bs)
5962 {
5963     BlockDriver *drv = bs->drv;
5964     if (drv && drv->bdrv_io_unplug) {
5965         drv->bdrv_io_unplug(bs);
5966     } else if (bs->file) {
5967         bdrv_io_unplug(bs->file);
5968     }
5969 }
5970 
5971 void bdrv_flush_io_queue(BlockDriverState *bs)
5972 {
5973     BlockDriver *drv = bs->drv;
5974     if (drv && drv->bdrv_flush_io_queue) {
5975         drv->bdrv_flush_io_queue(bs);
5976     } else if (bs->file) {
5977         bdrv_flush_io_queue(bs->file);
5978     }
5979 }
5980 
5981 static bool append_open_options(QDict *d, BlockDriverState *bs)
5982 {
5983     const QDictEntry *entry;
5984     bool found_any = false;
5985 
5986     for (entry = qdict_first(bs->options); entry;
5987          entry = qdict_next(bs->options, entry))
5988     {
5989         /* Only take options for this level and exclude all non-driver-specific
5990          * options */
5991         if (!strchr(qdict_entry_key(entry), '.') &&
5992             strcmp(qdict_entry_key(entry), "node-name"))
5993         {
5994             qobject_incref(qdict_entry_value(entry));
5995             qdict_put_obj(d, qdict_entry_key(entry), qdict_entry_value(entry));
5996             found_any = true;
5997         }
5998     }
5999 
6000     return found_any;
6001 }
6002 
6003 /* Updates the following BDS fields:
6004  *  - exact_filename: A filename which may be used for opening a block device
6005  *                    which (mostly) equals the given BDS (even without any
6006  *                    other options; so reading and writing must return the same
6007  *                    results, but caching etc. may be different)
6008  *  - full_open_options: Options which, when given when opening a block device
6009  *                       (without a filename), result in a BDS (mostly)
6010  *                       equalling the given one
6011  *  - filename: If exact_filename is set, it is copied here. Otherwise,
6012  *              full_open_options is converted to a JSON object, prefixed with
6013  *              "json:" (for use through the JSON pseudo protocol) and put here.
6014  */
6015 void bdrv_refresh_filename(BlockDriverState *bs)
6016 {
6017     BlockDriver *drv = bs->drv;
6018     QDict *opts;
6019 
6020     if (!drv) {
6021         return;
6022     }
6023 
6024     /* This BDS's file name will most probably depend on its file's name, so
6025      * refresh that first */
6026     if (bs->file) {
6027         bdrv_refresh_filename(bs->file);
6028     }
6029 
6030     if (drv->bdrv_refresh_filename) {
6031         /* Obsolete information is of no use here, so drop the old file name
6032          * information before refreshing it */
6033         bs->exact_filename[0] = '\0';
6034         if (bs->full_open_options) {
6035             QDECREF(bs->full_open_options);
6036             bs->full_open_options = NULL;
6037         }
6038 
6039         drv->bdrv_refresh_filename(bs);
6040     } else if (bs->file) {
6041         /* Try to reconstruct valid information from the underlying file */
6042         bool has_open_options;
6043 
6044         bs->exact_filename[0] = '\0';
6045         if (bs->full_open_options) {
6046             QDECREF(bs->full_open_options);
6047             bs->full_open_options = NULL;
6048         }
6049 
6050         opts = qdict_new();
6051         has_open_options = append_open_options(opts, bs);
6052 
6053         /* If no specific options have been given for this BDS, the filename of
6054          * the underlying file should suffice for this one as well */
6055         if (bs->file->exact_filename[0] && !has_open_options) {
6056             strcpy(bs->exact_filename, bs->file->exact_filename);
6057         }
6058         /* Reconstructing the full options QDict is simple for most format block
6059          * drivers, as long as the full options are known for the underlying
6060          * file BDS. The full options QDict of that file BDS should somehow
6061          * contain a representation of the filename, therefore the following
6062          * suffices without querying the (exact_)filename of this BDS. */
6063         if (bs->file->full_open_options) {
6064             qdict_put_obj(opts, "driver",
6065                           QOBJECT(qstring_from_str(drv->format_name)));
6066             QINCREF(bs->file->full_open_options);
6067             qdict_put_obj(opts, "file", QOBJECT(bs->file->full_open_options));
6068 
6069             bs->full_open_options = opts;
6070         } else {
6071             QDECREF(opts);
6072         }
6073     } else if (!bs->full_open_options && qdict_size(bs->options)) {
6074         /* There is no underlying file BDS (at least referenced by BDS.file),
6075          * so the full options QDict should be equal to the options given
6076          * specifically for this block device when it was opened (plus the
6077          * driver specification).
6078          * Because those options don't change, there is no need to update
6079          * full_open_options when it's already set. */
6080 
6081         opts = qdict_new();
6082         append_open_options(opts, bs);
6083         qdict_put_obj(opts, "driver",
6084                       QOBJECT(qstring_from_str(drv->format_name)));
6085 
6086         if (bs->exact_filename[0]) {
6087             /* This may not work for all block protocol drivers (some may
6088              * require this filename to be parsed), but we have to find some
6089              * default solution here, so just include it. If some block driver
6090              * does not support pure options without any filename at all or
6091              * needs some special format of the options QDict, it needs to
6092              * implement the driver-specific bdrv_refresh_filename() function.
6093              */
6094             qdict_put_obj(opts, "filename",
6095                           QOBJECT(qstring_from_str(bs->exact_filename)));
6096         }
6097 
6098         bs->full_open_options = opts;
6099     }
6100 
6101     if (bs->exact_filename[0]) {
6102         pstrcpy(bs->filename, sizeof(bs->filename), bs->exact_filename);
6103     } else if (bs->full_open_options) {
6104         QString *json = qobject_to_json(QOBJECT(bs->full_open_options));
6105         snprintf(bs->filename, sizeof(bs->filename), "json:%s",
6106                  qstring_get_str(json));
6107         QDECREF(json);
6108     }
6109 }
6110 
6111 /* This accessor function purpose is to allow the device models to access the
6112  * BlockAcctStats structure embedded inside a BlockDriverState without being
6113  * aware of the BlockDriverState structure layout.
6114  * It will go away when the BlockAcctStats structure will be moved inside
6115  * the device models.
6116  */
6117 BlockAcctStats *bdrv_get_stats(BlockDriverState *bs)
6118 {
6119     return &bs->stats;
6120 }
6121