xref: /openbmc/qemu/blockdev.c (revision 43b48cfc3e8ff745a10a6b78a55519d5cf7ec5e8)
1 /*
2  * QEMU host block devices
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or
7  * later.  See the COPYING file in the top-level directory.
8  *
9  * This file incorporates work covered by the following copyright and
10  * permission notice:
11  *
12  * Copyright (c) 2003-2008 Fabrice Bellard
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this software and associated documentation files (the "Software"), to deal
16  * in the Software without restriction, including without limitation the rights
17  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18  * copies of the Software, and to permit persons to whom the Software is
19  * furnished to do so, subject to the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30  * THE SOFTWARE.
31  */
32 
33 #include "sysemu/block-backend.h"
34 #include "sysemu/blockdev.h"
35 #include "hw/block/block.h"
36 #include "block/blockjob.h"
37 #include "block/throttle-groups.h"
38 #include "monitor/monitor.h"
39 #include "qemu/error-report.h"
40 #include "qemu/option.h"
41 #include "qemu/config-file.h"
42 #include "qapi/qmp/types.h"
43 #include "qapi-visit.h"
44 #include "qapi/qmp/qerror.h"
45 #include "qapi/qmp-output-visitor.h"
46 #include "qapi/util.h"
47 #include "sysemu/sysemu.h"
48 #include "block/block_int.h"
49 #include "qmp-commands.h"
50 #include "trace.h"
51 #include "sysemu/arch_init.h"
52 
53 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
54     QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
55 
56 static const char *const if_name[IF_COUNT] = {
57     [IF_NONE] = "none",
58     [IF_IDE] = "ide",
59     [IF_SCSI] = "scsi",
60     [IF_FLOPPY] = "floppy",
61     [IF_PFLASH] = "pflash",
62     [IF_MTD] = "mtd",
63     [IF_SD] = "sd",
64     [IF_VIRTIO] = "virtio",
65     [IF_XEN] = "xen",
66 };
67 
68 static int if_max_devs[IF_COUNT] = {
69     /*
70      * Do not change these numbers!  They govern how drive option
71      * index maps to unit and bus.  That mapping is ABI.
72      *
73      * All controllers used to imlement if=T drives need to support
74      * if_max_devs[T] units, for any T with if_max_devs[T] != 0.
75      * Otherwise, some index values map to "impossible" bus, unit
76      * values.
77      *
78      * For instance, if you change [IF_SCSI] to 255, -drive
79      * if=scsi,index=12 no longer means bus=1,unit=5, but
80      * bus=0,unit=12.  With an lsi53c895a controller (7 units max),
81      * the drive can't be set up.  Regression.
82      */
83     [IF_IDE] = 2,
84     [IF_SCSI] = 7,
85 };
86 
87 /**
88  * Boards may call this to offer board-by-board overrides
89  * of the default, global values.
90  */
91 void override_max_devs(BlockInterfaceType type, int max_devs)
92 {
93     BlockBackend *blk;
94     DriveInfo *dinfo;
95 
96     if (max_devs <= 0) {
97         return;
98     }
99 
100     for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
101         dinfo = blk_legacy_dinfo(blk);
102         if (dinfo->type == type) {
103             fprintf(stderr, "Cannot override units-per-bus property of"
104                     " the %s interface, because a drive of that type has"
105                     " already been added.\n", if_name[type]);
106             g_assert_not_reached();
107         }
108     }
109 
110     if_max_devs[type] = max_devs;
111 }
112 
113 /*
114  * We automatically delete the drive when a device using it gets
115  * unplugged.  Questionable feature, but we can't just drop it.
116  * Device models call blockdev_mark_auto_del() to schedule the
117  * automatic deletion, and generic qdev code calls blockdev_auto_del()
118  * when deletion is actually safe.
119  */
120 void blockdev_mark_auto_del(BlockBackend *blk)
121 {
122     DriveInfo *dinfo = blk_legacy_dinfo(blk);
123     BlockDriverState *bs = blk_bs(blk);
124     AioContext *aio_context;
125 
126     if (!dinfo) {
127         return;
128     }
129 
130     if (bs) {
131         aio_context = bdrv_get_aio_context(bs);
132         aio_context_acquire(aio_context);
133 
134         if (bs->job) {
135             block_job_cancel(bs->job);
136         }
137 
138         aio_context_release(aio_context);
139     }
140 
141     dinfo->auto_del = 1;
142 }
143 
144 void blockdev_auto_del(BlockBackend *blk)
145 {
146     DriveInfo *dinfo = blk_legacy_dinfo(blk);
147 
148     if (dinfo && dinfo->auto_del) {
149         blk_unref(blk);
150     }
151 }
152 
153 /**
154  * Returns the current mapping of how many units per bus
155  * a particular interface can support.
156  *
157  *  A positive integer indicates n units per bus.
158  *  0 implies the mapping has not been established.
159  * -1 indicates an invalid BlockInterfaceType was given.
160  */
161 int drive_get_max_devs(BlockInterfaceType type)
162 {
163     if (type >= IF_IDE && type < IF_COUNT) {
164         return if_max_devs[type];
165     }
166 
167     return -1;
168 }
169 
170 static int drive_index_to_bus_id(BlockInterfaceType type, int index)
171 {
172     int max_devs = if_max_devs[type];
173     return max_devs ? index / max_devs : 0;
174 }
175 
176 static int drive_index_to_unit_id(BlockInterfaceType type, int index)
177 {
178     int max_devs = if_max_devs[type];
179     return max_devs ? index % max_devs : index;
180 }
181 
182 QemuOpts *drive_def(const char *optstr)
183 {
184     return qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false);
185 }
186 
187 QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
188                     const char *optstr)
189 {
190     QemuOpts *opts;
191 
192     opts = drive_def(optstr);
193     if (!opts) {
194         return NULL;
195     }
196     if (type != IF_DEFAULT) {
197         qemu_opt_set(opts, "if", if_name[type], &error_abort);
198     }
199     if (index >= 0) {
200         qemu_opt_set_number(opts, "index", index, &error_abort);
201     }
202     if (file)
203         qemu_opt_set(opts, "file", file, &error_abort);
204     return opts;
205 }
206 
207 DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit)
208 {
209     BlockBackend *blk;
210     DriveInfo *dinfo;
211 
212     for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
213         dinfo = blk_legacy_dinfo(blk);
214         if (dinfo && dinfo->type == type
215             && dinfo->bus == bus && dinfo->unit == unit) {
216             return dinfo;
217         }
218     }
219 
220     return NULL;
221 }
222 
223 bool drive_check_orphaned(void)
224 {
225     BlockBackend *blk;
226     DriveInfo *dinfo;
227     bool rs = false;
228 
229     for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
230         dinfo = blk_legacy_dinfo(blk);
231         /* If dinfo->bdrv->dev is NULL, it has no device attached. */
232         /* Unless this is a default drive, this may be an oversight. */
233         if (!blk_get_attached_dev(blk) && !dinfo->is_default &&
234             dinfo->type != IF_NONE) {
235             fprintf(stderr, "Warning: Orphaned drive without device: "
236                     "id=%s,file=%s,if=%s,bus=%d,unit=%d\n",
237                     blk_name(blk), blk_bs(blk) ? blk_bs(blk)->filename : "",
238                     if_name[dinfo->type], dinfo->bus, dinfo->unit);
239             rs = true;
240         }
241     }
242 
243     return rs;
244 }
245 
246 DriveInfo *drive_get_by_index(BlockInterfaceType type, int index)
247 {
248     return drive_get(type,
249                      drive_index_to_bus_id(type, index),
250                      drive_index_to_unit_id(type, index));
251 }
252 
253 int drive_get_max_bus(BlockInterfaceType type)
254 {
255     int max_bus;
256     BlockBackend *blk;
257     DriveInfo *dinfo;
258 
259     max_bus = -1;
260     for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
261         dinfo = blk_legacy_dinfo(blk);
262         if (dinfo && dinfo->type == type && dinfo->bus > max_bus) {
263             max_bus = dinfo->bus;
264         }
265     }
266     return max_bus;
267 }
268 
269 /* Get a block device.  This should only be used for single-drive devices
270    (e.g. SD/Floppy/MTD).  Multi-disk devices (scsi/ide) should use the
271    appropriate bus.  */
272 DriveInfo *drive_get_next(BlockInterfaceType type)
273 {
274     static int next_block_unit[IF_COUNT];
275 
276     return drive_get(type, 0, next_block_unit[type]++);
277 }
278 
279 static void bdrv_format_print(void *opaque, const char *name)
280 {
281     error_printf(" %s", name);
282 }
283 
284 typedef struct {
285     QEMUBH *bh;
286     BlockDriverState *bs;
287 } BDRVPutRefBH;
288 
289 static int parse_block_error_action(const char *buf, bool is_read, Error **errp)
290 {
291     if (!strcmp(buf, "ignore")) {
292         return BLOCKDEV_ON_ERROR_IGNORE;
293     } else if (!is_read && !strcmp(buf, "enospc")) {
294         return BLOCKDEV_ON_ERROR_ENOSPC;
295     } else if (!strcmp(buf, "stop")) {
296         return BLOCKDEV_ON_ERROR_STOP;
297     } else if (!strcmp(buf, "report")) {
298         return BLOCKDEV_ON_ERROR_REPORT;
299     } else {
300         error_setg(errp, "'%s' invalid %s error action",
301                    buf, is_read ? "read" : "write");
302         return -1;
303     }
304 }
305 
306 static bool parse_stats_intervals(BlockAcctStats *stats, QList *intervals,
307                                   Error **errp)
308 {
309     const QListEntry *entry;
310     for (entry = qlist_first(intervals); entry; entry = qlist_next(entry)) {
311         switch (qobject_type(entry->value)) {
312 
313         case QTYPE_QSTRING: {
314             unsigned long long length;
315             const char *str = qstring_get_str(qobject_to_qstring(entry->value));
316             if (parse_uint_full(str, &length, 10) == 0 &&
317                 length > 0 && length <= UINT_MAX) {
318                 block_acct_add_interval(stats, (unsigned) length);
319             } else {
320                 error_setg(errp, "Invalid interval length: %s", str);
321                 return false;
322             }
323             break;
324         }
325 
326         case QTYPE_QINT: {
327             int64_t length = qint_get_int(qobject_to_qint(entry->value));
328             if (length > 0 && length <= UINT_MAX) {
329                 block_acct_add_interval(stats, (unsigned) length);
330             } else {
331                 error_setg(errp, "Invalid interval length: %" PRId64, length);
332                 return false;
333             }
334             break;
335         }
336 
337         default:
338             error_setg(errp, "The specification of stats-intervals is invalid");
339             return false;
340         }
341     }
342     return true;
343 }
344 
345 static bool check_throttle_config(ThrottleConfig *cfg, Error **errp)
346 {
347     if (throttle_conflicting(cfg)) {
348         error_setg(errp, "bps/iops/max total values and read/write values"
349                          " cannot be used at the same time");
350         return false;
351     }
352 
353     if (!throttle_is_valid(cfg)) {
354         error_setg(errp, "bps/iops/max values must be within [0, %lld]",
355                    THROTTLE_VALUE_MAX);
356         return false;
357     }
358 
359     if (throttle_max_is_missing_limit(cfg)) {
360         error_setg(errp, "bps_max/iops_max require corresponding"
361                          " bps/iops values");
362         return false;
363     }
364 
365     return true;
366 }
367 
368 typedef enum { MEDIA_DISK, MEDIA_CDROM } DriveMediaType;
369 
370 /* All parameters but @opts are optional and may be set to NULL. */
371 static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags,
372     const char **throttling_group, ThrottleConfig *throttle_cfg,
373     BlockdevDetectZeroesOptions *detect_zeroes, Error **errp)
374 {
375     const char *discard;
376     Error *local_error = NULL;
377     const char *aio;
378 
379     if (bdrv_flags) {
380         if (!qemu_opt_get_bool(opts, "read-only", false)) {
381             *bdrv_flags |= BDRV_O_RDWR;
382         }
383         if (qemu_opt_get_bool(opts, "copy-on-read", false)) {
384             *bdrv_flags |= BDRV_O_COPY_ON_READ;
385         }
386 
387         if ((discard = qemu_opt_get(opts, "discard")) != NULL) {
388             if (bdrv_parse_discard_flags(discard, bdrv_flags) != 0) {
389                 error_setg(errp, "Invalid discard option");
390                 return;
391             }
392         }
393 
394         if ((aio = qemu_opt_get(opts, "aio")) != NULL) {
395             if (!strcmp(aio, "native")) {
396                 *bdrv_flags |= BDRV_O_NATIVE_AIO;
397             } else if (!strcmp(aio, "threads")) {
398                 /* this is the default */
399             } else {
400                error_setg(errp, "invalid aio option");
401                return;
402             }
403         }
404     }
405 
406     /* disk I/O throttling */
407     if (throttling_group) {
408         *throttling_group = qemu_opt_get(opts, "throttling.group");
409     }
410 
411     if (throttle_cfg) {
412         memset(throttle_cfg, 0, sizeof(*throttle_cfg));
413         throttle_cfg->buckets[THROTTLE_BPS_TOTAL].avg =
414             qemu_opt_get_number(opts, "throttling.bps-total", 0);
415         throttle_cfg->buckets[THROTTLE_BPS_READ].avg  =
416             qemu_opt_get_number(opts, "throttling.bps-read", 0);
417         throttle_cfg->buckets[THROTTLE_BPS_WRITE].avg =
418             qemu_opt_get_number(opts, "throttling.bps-write", 0);
419         throttle_cfg->buckets[THROTTLE_OPS_TOTAL].avg =
420             qemu_opt_get_number(opts, "throttling.iops-total", 0);
421         throttle_cfg->buckets[THROTTLE_OPS_READ].avg =
422             qemu_opt_get_number(opts, "throttling.iops-read", 0);
423         throttle_cfg->buckets[THROTTLE_OPS_WRITE].avg =
424             qemu_opt_get_number(opts, "throttling.iops-write", 0);
425 
426         throttle_cfg->buckets[THROTTLE_BPS_TOTAL].max =
427             qemu_opt_get_number(opts, "throttling.bps-total-max", 0);
428         throttle_cfg->buckets[THROTTLE_BPS_READ].max  =
429             qemu_opt_get_number(opts, "throttling.bps-read-max", 0);
430         throttle_cfg->buckets[THROTTLE_BPS_WRITE].max =
431             qemu_opt_get_number(opts, "throttling.bps-write-max", 0);
432         throttle_cfg->buckets[THROTTLE_OPS_TOTAL].max =
433             qemu_opt_get_number(opts, "throttling.iops-total-max", 0);
434         throttle_cfg->buckets[THROTTLE_OPS_READ].max =
435             qemu_opt_get_number(opts, "throttling.iops-read-max", 0);
436         throttle_cfg->buckets[THROTTLE_OPS_WRITE].max =
437             qemu_opt_get_number(opts, "throttling.iops-write-max", 0);
438 
439         throttle_cfg->op_size =
440             qemu_opt_get_number(opts, "throttling.iops-size", 0);
441 
442         if (!check_throttle_config(throttle_cfg, errp)) {
443             return;
444         }
445     }
446 
447     if (detect_zeroes) {
448         *detect_zeroes =
449             qapi_enum_parse(BlockdevDetectZeroesOptions_lookup,
450                             qemu_opt_get(opts, "detect-zeroes"),
451                             BLOCKDEV_DETECT_ZEROES_OPTIONS__MAX,
452                             BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
453                             &local_error);
454         if (local_error) {
455             error_propagate(errp, local_error);
456             return;
457         }
458 
459         if (bdrv_flags &&
460             *detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP &&
461             !(*bdrv_flags & BDRV_O_UNMAP))
462         {
463             error_setg(errp, "setting detect-zeroes to unmap is not allowed "
464                              "without setting discard operation to unmap");
465             return;
466         }
467     }
468 }
469 
470 /* Takes the ownership of bs_opts */
471 static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
472                                    Error **errp)
473 {
474     const char *buf;
475     int bdrv_flags = 0;
476     int on_read_error, on_write_error;
477     bool account_invalid, account_failed;
478     BlockBackend *blk;
479     BlockDriverState *bs;
480     ThrottleConfig cfg;
481     int snapshot = 0;
482     Error *error = NULL;
483     QemuOpts *opts;
484     QDict *interval_dict = NULL;
485     QList *interval_list = NULL;
486     const char *id;
487     BlockdevDetectZeroesOptions detect_zeroes =
488         BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
489     const char *throttling_group = NULL;
490 
491     /* Check common options by copying from bs_opts to opts, all other options
492      * stay in bs_opts for processing by bdrv_open(). */
493     id = qdict_get_try_str(bs_opts, "id");
494     opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error);
495     if (error) {
496         error_propagate(errp, error);
497         goto err_no_opts;
498     }
499 
500     qemu_opts_absorb_qdict(opts, bs_opts, &error);
501     if (error) {
502         error_propagate(errp, error);
503         goto early_err;
504     }
505 
506     if (id) {
507         qdict_del(bs_opts, "id");
508     }
509 
510     /* extract parameters */
511     snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
512 
513     account_invalid = qemu_opt_get_bool(opts, "stats-account-invalid", true);
514     account_failed = qemu_opt_get_bool(opts, "stats-account-failed", true);
515 
516     qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals.");
517     qdict_array_split(interval_dict, &interval_list);
518 
519     if (qdict_size(interval_dict) != 0) {
520         error_setg(errp, "Invalid option stats-intervals.%s",
521                    qdict_first(interval_dict)->key);
522         goto early_err;
523     }
524 
525     extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg,
526                                     &detect_zeroes, &error);
527     if (error) {
528         error_propagate(errp, error);
529         goto early_err;
530     }
531 
532     if ((buf = qemu_opt_get(opts, "format")) != NULL) {
533         if (is_help_option(buf)) {
534             error_printf("Supported formats:");
535             bdrv_iterate_format(bdrv_format_print, NULL);
536             error_printf("\n");
537             goto early_err;
538         }
539 
540         if (qdict_haskey(bs_opts, "driver")) {
541             error_setg(errp, "Cannot specify both 'driver' and 'format'");
542             goto early_err;
543         }
544         qdict_put(bs_opts, "driver", qstring_from_str(buf));
545     }
546 
547     on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
548     if ((buf = qemu_opt_get(opts, "werror")) != NULL) {
549         on_write_error = parse_block_error_action(buf, 0, &error);
550         if (error) {
551             error_propagate(errp, error);
552             goto early_err;
553         }
554     }
555 
556     on_read_error = BLOCKDEV_ON_ERROR_REPORT;
557     if ((buf = qemu_opt_get(opts, "rerror")) != NULL) {
558         on_read_error = parse_block_error_action(buf, 1, &error);
559         if (error) {
560             error_propagate(errp, error);
561             goto early_err;
562         }
563     }
564 
565     if (snapshot) {
566         bdrv_flags |= BDRV_O_SNAPSHOT;
567     }
568 
569     /* init */
570     if ((!file || !*file) && !qdict_size(bs_opts)) {
571         BlockBackendRootState *blk_rs;
572 
573         blk = blk_new(qemu_opts_id(opts), errp);
574         if (!blk) {
575             goto early_err;
576         }
577 
578         blk_rs = blk_get_root_state(blk);
579         blk_rs->open_flags    = bdrv_flags;
580         blk_rs->read_only     = !(bdrv_flags & BDRV_O_RDWR);
581         blk_rs->detect_zeroes = detect_zeroes;
582 
583         if (throttle_enabled(&cfg)) {
584             if (!throttling_group) {
585                 throttling_group = blk_name(blk);
586             }
587             blk_rs->throttle_group = g_strdup(throttling_group);
588             blk_rs->throttle_state = throttle_group_incref(throttling_group);
589             blk_rs->throttle_state->cfg = cfg;
590         }
591 
592         QDECREF(bs_opts);
593     } else {
594         if (file && !*file) {
595             file = NULL;
596         }
597 
598         /* bdrv_open() defaults to the values in bdrv_flags (for compatibility
599          * with other callers) rather than what we want as the real defaults.
600          * Apply the defaults here instead. */
601         qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_WB, "on");
602         qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
603         qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
604 
605         if (snapshot) {
606             /* always use cache=unsafe with snapshot */
607             qdict_put(bs_opts, BDRV_OPT_CACHE_WB, qstring_from_str("on"));
608             qdict_put(bs_opts, BDRV_OPT_CACHE_DIRECT, qstring_from_str("off"));
609             qdict_put(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, qstring_from_str("on"));
610         }
611 
612         blk = blk_new_open(qemu_opts_id(opts), file, NULL, bs_opts, bdrv_flags,
613                            errp);
614         if (!blk) {
615             goto err_no_bs_opts;
616         }
617         bs = blk_bs(blk);
618 
619         bs->detect_zeroes = detect_zeroes;
620 
621         /* disk I/O throttling */
622         if (throttle_enabled(&cfg)) {
623             if (!throttling_group) {
624                 throttling_group = blk_name(blk);
625             }
626             bdrv_io_limits_enable(bs, throttling_group);
627             bdrv_set_io_limits(bs, &cfg);
628         }
629 
630         if (bdrv_key_required(bs)) {
631             autostart = 0;
632         }
633 
634         block_acct_init(blk_get_stats(blk), account_invalid, account_failed);
635 
636         if (!parse_stats_intervals(blk_get_stats(blk), interval_list, errp)) {
637             blk_unref(blk);
638             blk = NULL;
639             goto err_no_bs_opts;
640         }
641     }
642 
643     blk_set_on_error(blk, on_read_error, on_write_error);
644 
645 err_no_bs_opts:
646     qemu_opts_del(opts);
647     QDECREF(interval_dict);
648     QDECREF(interval_list);
649     return blk;
650 
651 early_err:
652     qemu_opts_del(opts);
653     QDECREF(interval_dict);
654     QDECREF(interval_list);
655 err_no_opts:
656     QDECREF(bs_opts);
657     return NULL;
658 }
659 
660 static QemuOptsList qemu_root_bds_opts;
661 
662 /* Takes the ownership of bs_opts */
663 static BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp)
664 {
665     BlockDriverState *bs;
666     QemuOpts *opts;
667     Error *local_error = NULL;
668     BlockdevDetectZeroesOptions detect_zeroes;
669     int ret;
670     int bdrv_flags = 0;
671 
672     opts = qemu_opts_create(&qemu_root_bds_opts, NULL, 1, errp);
673     if (!opts) {
674         goto fail;
675     }
676 
677     qemu_opts_absorb_qdict(opts, bs_opts, &local_error);
678     if (local_error) {
679         error_propagate(errp, local_error);
680         goto fail;
681     }
682 
683     extract_common_blockdev_options(opts, &bdrv_flags, NULL, NULL,
684                                     &detect_zeroes, &local_error);
685     if (local_error) {
686         error_propagate(errp, local_error);
687         goto fail;
688     }
689 
690     bs = NULL;
691     ret = bdrv_open(&bs, NULL, NULL, bs_opts, bdrv_flags, errp);
692     if (ret < 0) {
693         goto fail_no_bs_opts;
694     }
695 
696     bs->detect_zeroes = detect_zeroes;
697 
698 fail_no_bs_opts:
699     qemu_opts_del(opts);
700     return bs;
701 
702 fail:
703     qemu_opts_del(opts);
704     QDECREF(bs_opts);
705     return NULL;
706 }
707 
708 void blockdev_close_all_bdrv_states(void)
709 {
710     BlockDriverState *bs, *next_bs;
711 
712     QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) {
713         AioContext *ctx = bdrv_get_aio_context(bs);
714 
715         aio_context_acquire(ctx);
716         bdrv_unref(bs);
717         aio_context_release(ctx);
718     }
719 }
720 
721 static void qemu_opt_rename(QemuOpts *opts, const char *from, const char *to,
722                             Error **errp)
723 {
724     const char *value;
725 
726     value = qemu_opt_get(opts, from);
727     if (value) {
728         if (qemu_opt_find(opts, to)) {
729             error_setg(errp, "'%s' and its alias '%s' can't be used at the "
730                        "same time", to, from);
731             return;
732         }
733     }
734 
735     /* rename all items in opts */
736     while ((value = qemu_opt_get(opts, from))) {
737         qemu_opt_set(opts, to, value, &error_abort);
738         qemu_opt_unset(opts, from);
739     }
740 }
741 
742 QemuOptsList qemu_legacy_drive_opts = {
743     .name = "drive",
744     .head = QTAILQ_HEAD_INITIALIZER(qemu_legacy_drive_opts.head),
745     .desc = {
746         {
747             .name = "bus",
748             .type = QEMU_OPT_NUMBER,
749             .help = "bus number",
750         },{
751             .name = "unit",
752             .type = QEMU_OPT_NUMBER,
753             .help = "unit number (i.e. lun for scsi)",
754         },{
755             .name = "index",
756             .type = QEMU_OPT_NUMBER,
757             .help = "index number",
758         },{
759             .name = "media",
760             .type = QEMU_OPT_STRING,
761             .help = "media type (disk, cdrom)",
762         },{
763             .name = "if",
764             .type = QEMU_OPT_STRING,
765             .help = "interface (ide, scsi, sd, mtd, floppy, pflash, virtio)",
766         },{
767             .name = "cyls",
768             .type = QEMU_OPT_NUMBER,
769             .help = "number of cylinders (ide disk geometry)",
770         },{
771             .name = "heads",
772             .type = QEMU_OPT_NUMBER,
773             .help = "number of heads (ide disk geometry)",
774         },{
775             .name = "secs",
776             .type = QEMU_OPT_NUMBER,
777             .help = "number of sectors (ide disk geometry)",
778         },{
779             .name = "trans",
780             .type = QEMU_OPT_STRING,
781             .help = "chs translation (auto, lba, none)",
782         },{
783             .name = "boot",
784             .type = QEMU_OPT_BOOL,
785             .help = "(deprecated, ignored)",
786         },{
787             .name = "addr",
788             .type = QEMU_OPT_STRING,
789             .help = "pci address (virtio only)",
790         },{
791             .name = "serial",
792             .type = QEMU_OPT_STRING,
793             .help = "disk serial number",
794         },{
795             .name = "file",
796             .type = QEMU_OPT_STRING,
797             .help = "file name",
798         },
799 
800         /* Options that are passed on, but have special semantics with -drive */
801         {
802             .name = "read-only",
803             .type = QEMU_OPT_BOOL,
804             .help = "open drive file as read-only",
805         },{
806             .name = "rerror",
807             .type = QEMU_OPT_STRING,
808             .help = "read error action",
809         },{
810             .name = "werror",
811             .type = QEMU_OPT_STRING,
812             .help = "write error action",
813         },{
814             .name = "copy-on-read",
815             .type = QEMU_OPT_BOOL,
816             .help = "copy read data from backing file into image file",
817         },
818 
819         { /* end of list */ }
820     },
821 };
822 
823 DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type)
824 {
825     const char *value;
826     BlockBackend *blk;
827     DriveInfo *dinfo = NULL;
828     QDict *bs_opts;
829     QemuOpts *legacy_opts;
830     DriveMediaType media = MEDIA_DISK;
831     BlockInterfaceType type;
832     int cyls, heads, secs, translation;
833     int max_devs, bus_id, unit_id, index;
834     const char *devaddr;
835     const char *werror, *rerror;
836     bool read_only = false;
837     bool copy_on_read;
838     const char *serial;
839     const char *filename;
840     Error *local_err = NULL;
841     int i;
842 
843     /* Change legacy command line options into QMP ones */
844     static const struct {
845         const char *from;
846         const char *to;
847     } opt_renames[] = {
848         { "iops",           "throttling.iops-total" },
849         { "iops_rd",        "throttling.iops-read" },
850         { "iops_wr",        "throttling.iops-write" },
851 
852         { "bps",            "throttling.bps-total" },
853         { "bps_rd",         "throttling.bps-read" },
854         { "bps_wr",         "throttling.bps-write" },
855 
856         { "iops_max",       "throttling.iops-total-max" },
857         { "iops_rd_max",    "throttling.iops-read-max" },
858         { "iops_wr_max",    "throttling.iops-write-max" },
859 
860         { "bps_max",        "throttling.bps-total-max" },
861         { "bps_rd_max",     "throttling.bps-read-max" },
862         { "bps_wr_max",     "throttling.bps-write-max" },
863 
864         { "iops_size",      "throttling.iops-size" },
865 
866         { "group",          "throttling.group" },
867 
868         { "readonly",       "read-only" },
869     };
870 
871     for (i = 0; i < ARRAY_SIZE(opt_renames); i++) {
872         qemu_opt_rename(all_opts, opt_renames[i].from, opt_renames[i].to,
873                         &local_err);
874         if (local_err) {
875             error_report_err(local_err);
876             return NULL;
877         }
878     }
879 
880     value = qemu_opt_get(all_opts, "cache");
881     if (value) {
882         int flags = 0;
883 
884         if (bdrv_parse_cache_flags(value, &flags) != 0) {
885             error_report("invalid cache option");
886             return NULL;
887         }
888 
889         /* Specific options take precedence */
890         if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) {
891             qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB,
892                               !!(flags & BDRV_O_CACHE_WB), &error_abort);
893         }
894         if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) {
895             qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT,
896                               !!(flags & BDRV_O_NOCACHE), &error_abort);
897         }
898         if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) {
899             qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH,
900                               !!(flags & BDRV_O_NO_FLUSH), &error_abort);
901         }
902         qemu_opt_unset(all_opts, "cache");
903     }
904 
905     /* Get a QDict for processing the options */
906     bs_opts = qdict_new();
907     qemu_opts_to_qdict(all_opts, bs_opts);
908 
909     legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0,
910                                    &error_abort);
911     qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err);
912     if (local_err) {
913         error_report_err(local_err);
914         goto fail;
915     }
916 
917     /* Deprecated option boot=[on|off] */
918     if (qemu_opt_get(legacy_opts, "boot") != NULL) {
919         fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be "
920                 "ignored. Future versions will reject this parameter. Please "
921                 "update your scripts.\n");
922     }
923 
924     /* Media type */
925     value = qemu_opt_get(legacy_opts, "media");
926     if (value) {
927         if (!strcmp(value, "disk")) {
928             media = MEDIA_DISK;
929         } else if (!strcmp(value, "cdrom")) {
930             media = MEDIA_CDROM;
931             read_only = true;
932         } else {
933             error_report("'%s' invalid media", value);
934             goto fail;
935         }
936     }
937 
938     /* copy-on-read is disabled with a warning for read-only devices */
939     read_only |= qemu_opt_get_bool(legacy_opts, "read-only", false);
940     copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false);
941 
942     if (read_only && copy_on_read) {
943         error_report("warning: disabling copy-on-read on read-only drive");
944         copy_on_read = false;
945     }
946 
947     qdict_put(bs_opts, "read-only",
948               qstring_from_str(read_only ? "on" : "off"));
949     qdict_put(bs_opts, "copy-on-read",
950               qstring_from_str(copy_on_read ? "on" :"off"));
951 
952     /* Controller type */
953     value = qemu_opt_get(legacy_opts, "if");
954     if (value) {
955         for (type = 0;
956              type < IF_COUNT && strcmp(value, if_name[type]);
957              type++) {
958         }
959         if (type == IF_COUNT) {
960             error_report("unsupported bus type '%s'", value);
961             goto fail;
962         }
963     } else {
964         type = block_default_type;
965     }
966 
967     /* Geometry */
968     cyls  = qemu_opt_get_number(legacy_opts, "cyls", 0);
969     heads = qemu_opt_get_number(legacy_opts, "heads", 0);
970     secs  = qemu_opt_get_number(legacy_opts, "secs", 0);
971 
972     if (cyls || heads || secs) {
973         if (cyls < 1) {
974             error_report("invalid physical cyls number");
975             goto fail;
976         }
977         if (heads < 1) {
978             error_report("invalid physical heads number");
979             goto fail;
980         }
981         if (secs < 1) {
982             error_report("invalid physical secs number");
983             goto fail;
984         }
985     }
986 
987     translation = BIOS_ATA_TRANSLATION_AUTO;
988     value = qemu_opt_get(legacy_opts, "trans");
989     if (value != NULL) {
990         if (!cyls) {
991             error_report("'%s' trans must be used with cyls, heads and secs",
992                          value);
993             goto fail;
994         }
995         if (!strcmp(value, "none")) {
996             translation = BIOS_ATA_TRANSLATION_NONE;
997         } else if (!strcmp(value, "lba")) {
998             translation = BIOS_ATA_TRANSLATION_LBA;
999         } else if (!strcmp(value, "large")) {
1000             translation = BIOS_ATA_TRANSLATION_LARGE;
1001         } else if (!strcmp(value, "rechs")) {
1002             translation = BIOS_ATA_TRANSLATION_RECHS;
1003         } else if (!strcmp(value, "auto")) {
1004             translation = BIOS_ATA_TRANSLATION_AUTO;
1005         } else {
1006             error_report("'%s' invalid translation type", value);
1007             goto fail;
1008         }
1009     }
1010 
1011     if (media == MEDIA_CDROM) {
1012         if (cyls || secs || heads) {
1013             error_report("CHS can't be set with media=cdrom");
1014             goto fail;
1015         }
1016     }
1017 
1018     /* Device address specified by bus/unit or index.
1019      * If none was specified, try to find the first free one. */
1020     bus_id  = qemu_opt_get_number(legacy_opts, "bus", 0);
1021     unit_id = qemu_opt_get_number(legacy_opts, "unit", -1);
1022     index   = qemu_opt_get_number(legacy_opts, "index", -1);
1023 
1024     max_devs = if_max_devs[type];
1025 
1026     if (index != -1) {
1027         if (bus_id != 0 || unit_id != -1) {
1028             error_report("index cannot be used with bus and unit");
1029             goto fail;
1030         }
1031         bus_id = drive_index_to_bus_id(type, index);
1032         unit_id = drive_index_to_unit_id(type, index);
1033     }
1034 
1035     if (unit_id == -1) {
1036        unit_id = 0;
1037        while (drive_get(type, bus_id, unit_id) != NULL) {
1038            unit_id++;
1039            if (max_devs && unit_id >= max_devs) {
1040                unit_id -= max_devs;
1041                bus_id++;
1042            }
1043        }
1044     }
1045 
1046     if (max_devs && unit_id >= max_devs) {
1047         error_report("unit %d too big (max is %d)", unit_id, max_devs - 1);
1048         goto fail;
1049     }
1050 
1051     if (drive_get(type, bus_id, unit_id) != NULL) {
1052         error_report("drive with bus=%d, unit=%d (index=%d) exists",
1053                      bus_id, unit_id, index);
1054         goto fail;
1055     }
1056 
1057     /* Serial number */
1058     serial = qemu_opt_get(legacy_opts, "serial");
1059 
1060     /* no id supplied -> create one */
1061     if (qemu_opts_id(all_opts) == NULL) {
1062         char *new_id;
1063         const char *mediastr = "";
1064         if (type == IF_IDE || type == IF_SCSI) {
1065             mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd";
1066         }
1067         if (max_devs) {
1068             new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id,
1069                                      mediastr, unit_id);
1070         } else {
1071             new_id = g_strdup_printf("%s%s%i", if_name[type],
1072                                      mediastr, unit_id);
1073         }
1074         qdict_put(bs_opts, "id", qstring_from_str(new_id));
1075         g_free(new_id);
1076     }
1077 
1078     /* Add virtio block device */
1079     devaddr = qemu_opt_get(legacy_opts, "addr");
1080     if (devaddr && type != IF_VIRTIO) {
1081         error_report("addr is not supported by this bus type");
1082         goto fail;
1083     }
1084 
1085     if (type == IF_VIRTIO) {
1086         QemuOpts *devopts;
1087         devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
1088                                    &error_abort);
1089         if (arch_type == QEMU_ARCH_S390X) {
1090             qemu_opt_set(devopts, "driver", "virtio-blk-ccw", &error_abort);
1091         } else {
1092             qemu_opt_set(devopts, "driver", "virtio-blk-pci", &error_abort);
1093         }
1094         qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
1095                      &error_abort);
1096         if (devaddr) {
1097             qemu_opt_set(devopts, "addr", devaddr, &error_abort);
1098         }
1099     }
1100 
1101     filename = qemu_opt_get(legacy_opts, "file");
1102 
1103     /* Check werror/rerror compatibility with if=... */
1104     werror = qemu_opt_get(legacy_opts, "werror");
1105     if (werror != NULL) {
1106         if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO &&
1107             type != IF_NONE) {
1108             error_report("werror is not supported by this bus type");
1109             goto fail;
1110         }
1111         qdict_put(bs_opts, "werror", qstring_from_str(werror));
1112     }
1113 
1114     rerror = qemu_opt_get(legacy_opts, "rerror");
1115     if (rerror != NULL) {
1116         if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI &&
1117             type != IF_NONE) {
1118             error_report("rerror is not supported by this bus type");
1119             goto fail;
1120         }
1121         qdict_put(bs_opts, "rerror", qstring_from_str(rerror));
1122     }
1123 
1124     /* Actual block device init: Functionality shared with blockdev-add */
1125     blk = blockdev_init(filename, bs_opts, &local_err);
1126     bs_opts = NULL;
1127     if (!blk) {
1128         if (local_err) {
1129             error_report_err(local_err);
1130         }
1131         goto fail;
1132     } else {
1133         assert(!local_err);
1134     }
1135 
1136     /* Create legacy DriveInfo */
1137     dinfo = g_malloc0(sizeof(*dinfo));
1138     dinfo->opts = all_opts;
1139 
1140     dinfo->cyls = cyls;
1141     dinfo->heads = heads;
1142     dinfo->secs = secs;
1143     dinfo->trans = translation;
1144 
1145     dinfo->type = type;
1146     dinfo->bus = bus_id;
1147     dinfo->unit = unit_id;
1148     dinfo->devaddr = devaddr;
1149     dinfo->serial = g_strdup(serial);
1150 
1151     blk_set_legacy_dinfo(blk, dinfo);
1152 
1153     switch(type) {
1154     case IF_IDE:
1155     case IF_SCSI:
1156     case IF_XEN:
1157     case IF_NONE:
1158         dinfo->media_cd = media == MEDIA_CDROM;
1159         break;
1160     default:
1161         break;
1162     }
1163 
1164 fail:
1165     qemu_opts_del(legacy_opts);
1166     QDECREF(bs_opts);
1167     return dinfo;
1168 }
1169 
1170 void hmp_commit(Monitor *mon, const QDict *qdict)
1171 {
1172     const char *device = qdict_get_str(qdict, "device");
1173     BlockBackend *blk;
1174     int ret;
1175 
1176     if (!strcmp(device, "all")) {
1177         ret = bdrv_commit_all();
1178     } else {
1179         BlockDriverState *bs;
1180         AioContext *aio_context;
1181 
1182         blk = blk_by_name(device);
1183         if (!blk) {
1184             monitor_printf(mon, "Device '%s' not found\n", device);
1185             return;
1186         }
1187         if (!blk_is_available(blk)) {
1188             monitor_printf(mon, "Device '%s' has no medium\n", device);
1189             return;
1190         }
1191 
1192         bs = blk_bs(blk);
1193         aio_context = bdrv_get_aio_context(bs);
1194         aio_context_acquire(aio_context);
1195 
1196         ret = bdrv_commit(bs);
1197 
1198         aio_context_release(aio_context);
1199     }
1200     if (ret < 0) {
1201         monitor_printf(mon, "'commit' error for '%s': %s\n", device,
1202                        strerror(-ret));
1203     }
1204 }
1205 
1206 static void blockdev_do_action(TransactionActionKind type, void *data,
1207                                Error **errp)
1208 {
1209     TransactionAction action;
1210     TransactionActionList list;
1211 
1212     action.type = type;
1213     action.u.data = data;
1214     list.value = &action;
1215     list.next = NULL;
1216     qmp_transaction(&list, false, NULL, errp);
1217 }
1218 
1219 void qmp_blockdev_snapshot_sync(bool has_device, const char *device,
1220                                 bool has_node_name, const char *node_name,
1221                                 const char *snapshot_file,
1222                                 bool has_snapshot_node_name,
1223                                 const char *snapshot_node_name,
1224                                 bool has_format, const char *format,
1225                                 bool has_mode, NewImageMode mode, Error **errp)
1226 {
1227     BlockdevSnapshotSync snapshot = {
1228         .has_device = has_device,
1229         .device = (char *) device,
1230         .has_node_name = has_node_name,
1231         .node_name = (char *) node_name,
1232         .snapshot_file = (char *) snapshot_file,
1233         .has_snapshot_node_name = has_snapshot_node_name,
1234         .snapshot_node_name = (char *) snapshot_node_name,
1235         .has_format = has_format,
1236         .format = (char *) format,
1237         .has_mode = has_mode,
1238         .mode = mode,
1239     };
1240     blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC,
1241                        &snapshot, errp);
1242 }
1243 
1244 void qmp_blockdev_snapshot(const char *node, const char *overlay,
1245                            Error **errp)
1246 {
1247     BlockdevSnapshot snapshot_data = {
1248         .node = (char *) node,
1249         .overlay = (char *) overlay
1250     };
1251 
1252     blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT,
1253                        &snapshot_data, errp);
1254 }
1255 
1256 void qmp_blockdev_snapshot_internal_sync(const char *device,
1257                                          const char *name,
1258                                          Error **errp)
1259 {
1260     BlockdevSnapshotInternal snapshot = {
1261         .device = (char *) device,
1262         .name = (char *) name
1263     };
1264 
1265     blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC,
1266                        &snapshot, errp);
1267 }
1268 
1269 SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
1270                                                          bool has_id,
1271                                                          const char *id,
1272                                                          bool has_name,
1273                                                          const char *name,
1274                                                          Error **errp)
1275 {
1276     BlockDriverState *bs;
1277     BlockBackend *blk;
1278     AioContext *aio_context;
1279     QEMUSnapshotInfo sn;
1280     Error *local_err = NULL;
1281     SnapshotInfo *info = NULL;
1282     int ret;
1283 
1284     blk = blk_by_name(device);
1285     if (!blk) {
1286         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1287                   "Device '%s' not found", device);
1288         return NULL;
1289     }
1290 
1291     aio_context = blk_get_aio_context(blk);
1292     aio_context_acquire(aio_context);
1293 
1294     if (!has_id) {
1295         id = NULL;
1296     }
1297 
1298     if (!has_name) {
1299         name = NULL;
1300     }
1301 
1302     if (!id && !name) {
1303         error_setg(errp, "Name or id must be provided");
1304         goto out_aio_context;
1305     }
1306 
1307     if (!blk_is_available(blk)) {
1308         error_setg(errp, "Device '%s' has no medium", device);
1309         goto out_aio_context;
1310     }
1311     bs = blk_bs(blk);
1312 
1313     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) {
1314         goto out_aio_context;
1315     }
1316 
1317     ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err);
1318     if (local_err) {
1319         error_propagate(errp, local_err);
1320         goto out_aio_context;
1321     }
1322     if (!ret) {
1323         error_setg(errp,
1324                    "Snapshot with id '%s' and name '%s' does not exist on "
1325                    "device '%s'",
1326                    STR_OR_NULL(id), STR_OR_NULL(name), device);
1327         goto out_aio_context;
1328     }
1329 
1330     bdrv_snapshot_delete(bs, id, name, &local_err);
1331     if (local_err) {
1332         error_propagate(errp, local_err);
1333         goto out_aio_context;
1334     }
1335 
1336     aio_context_release(aio_context);
1337 
1338     info = g_new0(SnapshotInfo, 1);
1339     info->id = g_strdup(sn.id_str);
1340     info->name = g_strdup(sn.name);
1341     info->date_nsec = sn.date_nsec;
1342     info->date_sec = sn.date_sec;
1343     info->vm_state_size = sn.vm_state_size;
1344     info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000;
1345     info->vm_clock_sec = sn.vm_clock_nsec / 1000000000;
1346 
1347     return info;
1348 
1349 out_aio_context:
1350     aio_context_release(aio_context);
1351     return NULL;
1352 }
1353 
1354 /**
1355  * block_dirty_bitmap_lookup:
1356  * Return a dirty bitmap (if present), after validating
1357  * the node reference and bitmap names.
1358  *
1359  * @node: The name of the BDS node to search for bitmaps
1360  * @name: The name of the bitmap to search for
1361  * @pbs: Output pointer for BDS lookup, if desired. Can be NULL.
1362  * @paio: Output pointer for aio_context acquisition, if desired. Can be NULL.
1363  * @errp: Output pointer for error information. Can be NULL.
1364  *
1365  * @return: A bitmap object on success, or NULL on failure.
1366  */
1367 static BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
1368                                                   const char *name,
1369                                                   BlockDriverState **pbs,
1370                                                   AioContext **paio,
1371                                                   Error **errp)
1372 {
1373     BlockDriverState *bs;
1374     BdrvDirtyBitmap *bitmap;
1375     AioContext *aio_context;
1376 
1377     if (!node) {
1378         error_setg(errp, "Node cannot be NULL");
1379         return NULL;
1380     }
1381     if (!name) {
1382         error_setg(errp, "Bitmap name cannot be NULL");
1383         return NULL;
1384     }
1385     bs = bdrv_lookup_bs(node, node, NULL);
1386     if (!bs) {
1387         error_setg(errp, "Node '%s' not found", node);
1388         return NULL;
1389     }
1390 
1391     aio_context = bdrv_get_aio_context(bs);
1392     aio_context_acquire(aio_context);
1393 
1394     bitmap = bdrv_find_dirty_bitmap(bs, name);
1395     if (!bitmap) {
1396         error_setg(errp, "Dirty bitmap '%s' not found", name);
1397         goto fail;
1398     }
1399 
1400     if (pbs) {
1401         *pbs = bs;
1402     }
1403     if (paio) {
1404         *paio = aio_context;
1405     } else {
1406         aio_context_release(aio_context);
1407     }
1408 
1409     return bitmap;
1410 
1411  fail:
1412     aio_context_release(aio_context);
1413     return NULL;
1414 }
1415 
1416 /* New and old BlockDriverState structs for atomic group operations */
1417 
1418 typedef struct BlkActionState BlkActionState;
1419 
1420 /**
1421  * BlkActionOps:
1422  * Table of operations that define an Action.
1423  *
1424  * @instance_size: Size of state struct, in bytes.
1425  * @prepare: Prepare the work, must NOT be NULL.
1426  * @commit: Commit the changes, can be NULL.
1427  * @abort: Abort the changes on fail, can be NULL.
1428  * @clean: Clean up resources after all transaction actions have called
1429  *         commit() or abort(). Can be NULL.
1430  *
1431  * Only prepare() may fail. In a single transaction, only one of commit() or
1432  * abort() will be called. clean() will always be called if it is present.
1433  */
1434 typedef struct BlkActionOps {
1435     size_t instance_size;
1436     void (*prepare)(BlkActionState *common, Error **errp);
1437     void (*commit)(BlkActionState *common);
1438     void (*abort)(BlkActionState *common);
1439     void (*clean)(BlkActionState *common);
1440 } BlkActionOps;
1441 
1442 /**
1443  * BlkActionState:
1444  * Describes one Action's state within a Transaction.
1445  *
1446  * @action: QAPI-defined enum identifying which Action to perform.
1447  * @ops: Table of ActionOps this Action can perform.
1448  * @block_job_txn: Transaction which this action belongs to.
1449  * @entry: List membership for all Actions in this Transaction.
1450  *
1451  * This structure must be arranged as first member in a subclassed type,
1452  * assuming that the compiler will also arrange it to the same offsets as the
1453  * base class.
1454  */
1455 struct BlkActionState {
1456     TransactionAction *action;
1457     const BlkActionOps *ops;
1458     BlockJobTxn *block_job_txn;
1459     TransactionProperties *txn_props;
1460     QSIMPLEQ_ENTRY(BlkActionState) entry;
1461 };
1462 
1463 /* internal snapshot private data */
1464 typedef struct InternalSnapshotState {
1465     BlkActionState common;
1466     BlockDriverState *bs;
1467     AioContext *aio_context;
1468     QEMUSnapshotInfo sn;
1469     bool created;
1470 } InternalSnapshotState;
1471 
1472 
1473 static int action_check_completion_mode(BlkActionState *s, Error **errp)
1474 {
1475     if (s->txn_props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
1476         error_setg(errp,
1477                    "Action '%s' does not support Transaction property "
1478                    "completion-mode = %s",
1479                    TransactionActionKind_lookup[s->action->type],
1480                    ActionCompletionMode_lookup[s->txn_props->completion_mode]);
1481         return -1;
1482     }
1483     return 0;
1484 }
1485 
1486 static void internal_snapshot_prepare(BlkActionState *common,
1487                                       Error **errp)
1488 {
1489     Error *local_err = NULL;
1490     const char *device;
1491     const char *name;
1492     BlockBackend *blk;
1493     BlockDriverState *bs;
1494     QEMUSnapshotInfo old_sn, *sn;
1495     bool ret;
1496     qemu_timeval tv;
1497     BlockdevSnapshotInternal *internal;
1498     InternalSnapshotState *state;
1499     int ret1;
1500 
1501     g_assert(common->action->type ==
1502              TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC);
1503     internal = common->action->u.blockdev_snapshot_internal_sync;
1504     state = DO_UPCAST(InternalSnapshotState, common, common);
1505 
1506     /* 1. parse input */
1507     device = internal->device;
1508     name = internal->name;
1509 
1510     /* 2. check for validation */
1511     if (action_check_completion_mode(common, errp) < 0) {
1512         return;
1513     }
1514 
1515     blk = blk_by_name(device);
1516     if (!blk) {
1517         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1518                   "Device '%s' not found", device);
1519         return;
1520     }
1521 
1522     /* AioContext is released in .clean() */
1523     state->aio_context = blk_get_aio_context(blk);
1524     aio_context_acquire(state->aio_context);
1525 
1526     if (!blk_is_available(blk)) {
1527         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
1528         return;
1529     }
1530     bs = blk_bs(blk);
1531 
1532     state->bs = bs;
1533     bdrv_drained_begin(bs);
1534 
1535     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
1536         return;
1537     }
1538 
1539     if (bdrv_is_read_only(bs)) {
1540         error_setg(errp, "Device '%s' is read only", device);
1541         return;
1542     }
1543 
1544     if (!bdrv_can_snapshot(bs)) {
1545         error_setg(errp, "Block format '%s' used by device '%s' "
1546                    "does not support internal snapshots",
1547                    bs->drv->format_name, device);
1548         return;
1549     }
1550 
1551     if (!strlen(name)) {
1552         error_setg(errp, "Name is empty");
1553         return;
1554     }
1555 
1556     /* check whether a snapshot with name exist */
1557     ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn,
1558                                             &local_err);
1559     if (local_err) {
1560         error_propagate(errp, local_err);
1561         return;
1562     } else if (ret) {
1563         error_setg(errp,
1564                    "Snapshot with name '%s' already exists on device '%s'",
1565                    name, device);
1566         return;
1567     }
1568 
1569     /* 3. take the snapshot */
1570     sn = &state->sn;
1571     pstrcpy(sn->name, sizeof(sn->name), name);
1572     qemu_gettimeofday(&tv);
1573     sn->date_sec = tv.tv_sec;
1574     sn->date_nsec = tv.tv_usec * 1000;
1575     sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1576 
1577     ret1 = bdrv_snapshot_create(bs, sn);
1578     if (ret1 < 0) {
1579         error_setg_errno(errp, -ret1,
1580                          "Failed to create snapshot '%s' on device '%s'",
1581                          name, device);
1582         return;
1583     }
1584 
1585     /* 4. succeed, mark a snapshot is created */
1586     state->created = true;
1587 }
1588 
1589 static void internal_snapshot_abort(BlkActionState *common)
1590 {
1591     InternalSnapshotState *state =
1592                              DO_UPCAST(InternalSnapshotState, common, common);
1593     BlockDriverState *bs = state->bs;
1594     QEMUSnapshotInfo *sn = &state->sn;
1595     Error *local_error = NULL;
1596 
1597     if (!state->created) {
1598         return;
1599     }
1600 
1601     if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
1602         error_reportf_err(local_error,
1603                           "Failed to delete snapshot with id '%s' and "
1604                           "name '%s' on device '%s' in abort: ",
1605                           sn->id_str, sn->name,
1606                           bdrv_get_device_name(bs));
1607     }
1608 }
1609 
1610 static void internal_snapshot_clean(BlkActionState *common)
1611 {
1612     InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState,
1613                                              common, common);
1614 
1615     if (state->aio_context) {
1616         if (state->bs) {
1617             bdrv_drained_end(state->bs);
1618         }
1619         aio_context_release(state->aio_context);
1620     }
1621 }
1622 
1623 /* external snapshot private data */
1624 typedef struct ExternalSnapshotState {
1625     BlkActionState common;
1626     BlockDriverState *old_bs;
1627     BlockDriverState *new_bs;
1628     AioContext *aio_context;
1629 } ExternalSnapshotState;
1630 
1631 static void external_snapshot_prepare(BlkActionState *common,
1632                                       Error **errp)
1633 {
1634     int flags = 0, ret;
1635     QDict *options = NULL;
1636     Error *local_err = NULL;
1637     /* Device and node name of the image to generate the snapshot from */
1638     const char *device;
1639     const char *node_name;
1640     /* Reference to the new image (for 'blockdev-snapshot') */
1641     const char *snapshot_ref;
1642     /* File name of the new image (for 'blockdev-snapshot-sync') */
1643     const char *new_image_file;
1644     ExternalSnapshotState *state =
1645                              DO_UPCAST(ExternalSnapshotState, common, common);
1646     TransactionAction *action = common->action;
1647 
1648     /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
1649      * purpose but a different set of parameters */
1650     switch (action->type) {
1651     case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT:
1652         {
1653             BlockdevSnapshot *s = action->u.blockdev_snapshot;
1654             device = s->node;
1655             node_name = s->node;
1656             new_image_file = NULL;
1657             snapshot_ref = s->overlay;
1658         }
1659         break;
1660     case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC:
1661         {
1662             BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync;
1663             device = s->has_device ? s->device : NULL;
1664             node_name = s->has_node_name ? s->node_name : NULL;
1665             new_image_file = s->snapshot_file;
1666             snapshot_ref = NULL;
1667         }
1668         break;
1669     default:
1670         g_assert_not_reached();
1671     }
1672 
1673     /* start processing */
1674     if (action_check_completion_mode(common, errp) < 0) {
1675         return;
1676     }
1677 
1678     state->old_bs = bdrv_lookup_bs(device, node_name, errp);
1679     if (!state->old_bs) {
1680         return;
1681     }
1682 
1683     /* Acquire AioContext now so any threads operating on old_bs stop */
1684     state->aio_context = bdrv_get_aio_context(state->old_bs);
1685     aio_context_acquire(state->aio_context);
1686     bdrv_drained_begin(state->old_bs);
1687 
1688     if (!bdrv_is_inserted(state->old_bs)) {
1689         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
1690         return;
1691     }
1692 
1693     if (bdrv_op_is_blocked(state->old_bs,
1694                            BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) {
1695         return;
1696     }
1697 
1698     if (!bdrv_is_read_only(state->old_bs)) {
1699         if (bdrv_flush(state->old_bs)) {
1700             error_setg(errp, QERR_IO_ERROR);
1701             return;
1702         }
1703     }
1704 
1705     if (!bdrv_is_first_non_filter(state->old_bs)) {
1706         error_setg(errp, QERR_FEATURE_DISABLED, "snapshot");
1707         return;
1708     }
1709 
1710     if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
1711         BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync;
1712         const char *format = s->has_format ? s->format : "qcow2";
1713         enum NewImageMode mode;
1714         const char *snapshot_node_name =
1715             s->has_snapshot_node_name ? s->snapshot_node_name : NULL;
1716 
1717         if (node_name && !snapshot_node_name) {
1718             error_setg(errp, "New snapshot node name missing");
1719             return;
1720         }
1721 
1722         if (snapshot_node_name &&
1723             bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
1724             error_setg(errp, "New snapshot node name already in use");
1725             return;
1726         }
1727 
1728         flags = state->old_bs->open_flags;
1729 
1730         /* create new image w/backing file */
1731         mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS;
1732         if (mode != NEW_IMAGE_MODE_EXISTING) {
1733             bdrv_img_create(new_image_file, format,
1734                             state->old_bs->filename,
1735                             state->old_bs->drv->format_name,
1736                             NULL, -1, flags, &local_err, false);
1737             if (local_err) {
1738                 error_propagate(errp, local_err);
1739                 return;
1740             }
1741         }
1742 
1743         options = qdict_new();
1744         if (s->has_snapshot_node_name) {
1745             qdict_put(options, "node-name",
1746                       qstring_from_str(snapshot_node_name));
1747         }
1748         qdict_put(options, "driver", qstring_from_str(format));
1749 
1750         flags |= BDRV_O_NO_BACKING;
1751     }
1752 
1753     assert(state->new_bs == NULL);
1754     ret = bdrv_open(&state->new_bs, new_image_file, snapshot_ref, options,
1755                     flags, errp);
1756     /* We will manually add the backing_hd field to the bs later */
1757     if (ret != 0) {
1758         return;
1759     }
1760 
1761     if (state->new_bs->blk != NULL) {
1762         error_setg(errp, "The snapshot is already in use by %s",
1763                    blk_name(state->new_bs->blk));
1764         return;
1765     }
1766 
1767     if (bdrv_op_is_blocked(state->new_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
1768                            errp)) {
1769         return;
1770     }
1771 
1772     if (state->new_bs->backing != NULL) {
1773         error_setg(errp, "The snapshot already has a backing image");
1774         return;
1775     }
1776 
1777     if (!state->new_bs->drv->supports_backing) {
1778         error_setg(errp, "The snapshot does not support backing images");
1779     }
1780 }
1781 
1782 static void external_snapshot_commit(BlkActionState *common)
1783 {
1784     ExternalSnapshotState *state =
1785                              DO_UPCAST(ExternalSnapshotState, common, common);
1786 
1787     bdrv_set_aio_context(state->new_bs, state->aio_context);
1788 
1789     /* This removes our old bs and adds the new bs */
1790     bdrv_append(state->new_bs, state->old_bs);
1791     /* We don't need (or want) to use the transactional
1792      * bdrv_reopen_multiple() across all the entries at once, because we
1793      * don't want to abort all of them if one of them fails the reopen */
1794     bdrv_reopen(state->old_bs, state->old_bs->open_flags & ~BDRV_O_RDWR,
1795                 NULL);
1796 }
1797 
1798 static void external_snapshot_abort(BlkActionState *common)
1799 {
1800     ExternalSnapshotState *state =
1801                              DO_UPCAST(ExternalSnapshotState, common, common);
1802     if (state->new_bs) {
1803         bdrv_unref(state->new_bs);
1804     }
1805 }
1806 
1807 static void external_snapshot_clean(BlkActionState *common)
1808 {
1809     ExternalSnapshotState *state =
1810                              DO_UPCAST(ExternalSnapshotState, common, common);
1811     if (state->aio_context) {
1812         bdrv_drained_end(state->old_bs);
1813         aio_context_release(state->aio_context);
1814     }
1815 }
1816 
1817 typedef struct DriveBackupState {
1818     BlkActionState common;
1819     BlockDriverState *bs;
1820     AioContext *aio_context;
1821     BlockJob *job;
1822 } DriveBackupState;
1823 
1824 static void do_drive_backup(const char *device, const char *target,
1825                             bool has_format, const char *format,
1826                             enum MirrorSyncMode sync,
1827                             bool has_mode, enum NewImageMode mode,
1828                             bool has_speed, int64_t speed,
1829                             bool has_bitmap, const char *bitmap,
1830                             bool has_on_source_error,
1831                             BlockdevOnError on_source_error,
1832                             bool has_on_target_error,
1833                             BlockdevOnError on_target_error,
1834                             BlockJobTxn *txn, Error **errp);
1835 
1836 static void drive_backup_prepare(BlkActionState *common, Error **errp)
1837 {
1838     DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1839     BlockBackend *blk;
1840     DriveBackup *backup;
1841     Error *local_err = NULL;
1842 
1843     assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
1844     backup = common->action->u.drive_backup;
1845 
1846     blk = blk_by_name(backup->device);
1847     if (!blk) {
1848         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1849                   "Device '%s' not found", backup->device);
1850         return;
1851     }
1852 
1853     if (!blk_is_available(blk)) {
1854         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
1855         return;
1856     }
1857 
1858     /* AioContext is released in .clean() */
1859     state->aio_context = blk_get_aio_context(blk);
1860     aio_context_acquire(state->aio_context);
1861     bdrv_drained_begin(blk_bs(blk));
1862     state->bs = blk_bs(blk);
1863 
1864     do_drive_backup(backup->device, backup->target,
1865                     backup->has_format, backup->format,
1866                     backup->sync,
1867                     backup->has_mode, backup->mode,
1868                     backup->has_speed, backup->speed,
1869                     backup->has_bitmap, backup->bitmap,
1870                     backup->has_on_source_error, backup->on_source_error,
1871                     backup->has_on_target_error, backup->on_target_error,
1872                     common->block_job_txn, &local_err);
1873     if (local_err) {
1874         error_propagate(errp, local_err);
1875         return;
1876     }
1877 
1878     state->job = state->bs->job;
1879 }
1880 
1881 static void drive_backup_abort(BlkActionState *common)
1882 {
1883     DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1884     BlockDriverState *bs = state->bs;
1885 
1886     /* Only cancel if it's the job we started */
1887     if (bs && bs->job && bs->job == state->job) {
1888         block_job_cancel_sync(bs->job);
1889     }
1890 }
1891 
1892 static void drive_backup_clean(BlkActionState *common)
1893 {
1894     DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1895 
1896     if (state->aio_context) {
1897         bdrv_drained_end(state->bs);
1898         aio_context_release(state->aio_context);
1899     }
1900 }
1901 
1902 typedef struct BlockdevBackupState {
1903     BlkActionState common;
1904     BlockDriverState *bs;
1905     BlockJob *job;
1906     AioContext *aio_context;
1907 } BlockdevBackupState;
1908 
1909 static void do_blockdev_backup(const char *device, const char *target,
1910                                enum MirrorSyncMode sync,
1911                                bool has_speed, int64_t speed,
1912                                bool has_on_source_error,
1913                                BlockdevOnError on_source_error,
1914                                bool has_on_target_error,
1915                                BlockdevOnError on_target_error,
1916                                BlockJobTxn *txn, Error **errp);
1917 
1918 static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
1919 {
1920     BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1921     BlockdevBackup *backup;
1922     BlockBackend *blk, *target;
1923     Error *local_err = NULL;
1924 
1925     assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
1926     backup = common->action->u.blockdev_backup;
1927 
1928     blk = blk_by_name(backup->device);
1929     if (!blk) {
1930         error_setg(errp, "Device '%s' not found", backup->device);
1931         return;
1932     }
1933 
1934     if (!blk_is_available(blk)) {
1935         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
1936         return;
1937     }
1938 
1939     target = blk_by_name(backup->target);
1940     if (!target) {
1941         error_setg(errp, "Device '%s' not found", backup->target);
1942         return;
1943     }
1944 
1945     /* AioContext is released in .clean() */
1946     state->aio_context = blk_get_aio_context(blk);
1947     if (state->aio_context != blk_get_aio_context(target)) {
1948         state->aio_context = NULL;
1949         error_setg(errp, "Backup between two IO threads is not implemented");
1950         return;
1951     }
1952     aio_context_acquire(state->aio_context);
1953     state->bs = blk_bs(blk);
1954     bdrv_drained_begin(state->bs);
1955 
1956     do_blockdev_backup(backup->device, backup->target,
1957                        backup->sync,
1958                        backup->has_speed, backup->speed,
1959                        backup->has_on_source_error, backup->on_source_error,
1960                        backup->has_on_target_error, backup->on_target_error,
1961                        common->block_job_txn, &local_err);
1962     if (local_err) {
1963         error_propagate(errp, local_err);
1964         return;
1965     }
1966 
1967     state->job = state->bs->job;
1968 }
1969 
1970 static void blockdev_backup_abort(BlkActionState *common)
1971 {
1972     BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1973     BlockDriverState *bs = state->bs;
1974 
1975     /* Only cancel if it's the job we started */
1976     if (bs && bs->job && bs->job == state->job) {
1977         block_job_cancel_sync(bs->job);
1978     }
1979 }
1980 
1981 static void blockdev_backup_clean(BlkActionState *common)
1982 {
1983     BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1984 
1985     if (state->aio_context) {
1986         bdrv_drained_end(state->bs);
1987         aio_context_release(state->aio_context);
1988     }
1989 }
1990 
1991 typedef struct BlockDirtyBitmapState {
1992     BlkActionState common;
1993     BdrvDirtyBitmap *bitmap;
1994     BlockDriverState *bs;
1995     AioContext *aio_context;
1996     HBitmap *backup;
1997     bool prepared;
1998 } BlockDirtyBitmapState;
1999 
2000 static void block_dirty_bitmap_add_prepare(BlkActionState *common,
2001                                            Error **errp)
2002 {
2003     Error *local_err = NULL;
2004     BlockDirtyBitmapAdd *action;
2005     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2006                                              common, common);
2007 
2008     if (action_check_completion_mode(common, errp) < 0) {
2009         return;
2010     }
2011 
2012     action = common->action->u.block_dirty_bitmap_add;
2013     /* AIO context taken and released within qmp_block_dirty_bitmap_add */
2014     qmp_block_dirty_bitmap_add(action->node, action->name,
2015                                action->has_granularity, action->granularity,
2016                                &local_err);
2017 
2018     if (!local_err) {
2019         state->prepared = true;
2020     } else {
2021         error_propagate(errp, local_err);
2022     }
2023 }
2024 
2025 static void block_dirty_bitmap_add_abort(BlkActionState *common)
2026 {
2027     BlockDirtyBitmapAdd *action;
2028     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2029                                              common, common);
2030 
2031     action = common->action->u.block_dirty_bitmap_add;
2032     /* Should not be able to fail: IF the bitmap was added via .prepare(),
2033      * then the node reference and bitmap name must have been valid.
2034      */
2035     if (state->prepared) {
2036         qmp_block_dirty_bitmap_remove(action->node, action->name, &error_abort);
2037     }
2038 }
2039 
2040 static void block_dirty_bitmap_clear_prepare(BlkActionState *common,
2041                                              Error **errp)
2042 {
2043     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2044                                              common, common);
2045     BlockDirtyBitmap *action;
2046 
2047     if (action_check_completion_mode(common, errp) < 0) {
2048         return;
2049     }
2050 
2051     action = common->action->u.block_dirty_bitmap_clear;
2052     state->bitmap = block_dirty_bitmap_lookup(action->node,
2053                                               action->name,
2054                                               &state->bs,
2055                                               &state->aio_context,
2056                                               errp);
2057     if (!state->bitmap) {
2058         return;
2059     }
2060 
2061     if (bdrv_dirty_bitmap_frozen(state->bitmap)) {
2062         error_setg(errp, "Cannot modify a frozen bitmap");
2063         return;
2064     } else if (!bdrv_dirty_bitmap_enabled(state->bitmap)) {
2065         error_setg(errp, "Cannot clear a disabled bitmap");
2066         return;
2067     }
2068 
2069     bdrv_clear_dirty_bitmap(state->bitmap, &state->backup);
2070     /* AioContext is released in .clean() */
2071 }
2072 
2073 static void block_dirty_bitmap_clear_abort(BlkActionState *common)
2074 {
2075     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2076                                              common, common);
2077 
2078     bdrv_undo_clear_dirty_bitmap(state->bitmap, state->backup);
2079 }
2080 
2081 static void block_dirty_bitmap_clear_commit(BlkActionState *common)
2082 {
2083     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2084                                              common, common);
2085 
2086     hbitmap_free(state->backup);
2087 }
2088 
2089 static void block_dirty_bitmap_clear_clean(BlkActionState *common)
2090 {
2091     BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2092                                              common, common);
2093 
2094     if (state->aio_context) {
2095         aio_context_release(state->aio_context);
2096     }
2097 }
2098 
2099 static void abort_prepare(BlkActionState *common, Error **errp)
2100 {
2101     error_setg(errp, "Transaction aborted using Abort action");
2102 }
2103 
2104 static void abort_commit(BlkActionState *common)
2105 {
2106     g_assert_not_reached(); /* this action never succeeds */
2107 }
2108 
2109 static const BlkActionOps actions[] = {
2110     [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT] = {
2111         .instance_size = sizeof(ExternalSnapshotState),
2112         .prepare  = external_snapshot_prepare,
2113         .commit   = external_snapshot_commit,
2114         .abort = external_snapshot_abort,
2115         .clean = external_snapshot_clean,
2116     },
2117     [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC] = {
2118         .instance_size = sizeof(ExternalSnapshotState),
2119         .prepare  = external_snapshot_prepare,
2120         .commit   = external_snapshot_commit,
2121         .abort = external_snapshot_abort,
2122         .clean = external_snapshot_clean,
2123     },
2124     [TRANSACTION_ACTION_KIND_DRIVE_BACKUP] = {
2125         .instance_size = sizeof(DriveBackupState),
2126         .prepare = drive_backup_prepare,
2127         .abort = drive_backup_abort,
2128         .clean = drive_backup_clean,
2129     },
2130     [TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP] = {
2131         .instance_size = sizeof(BlockdevBackupState),
2132         .prepare = blockdev_backup_prepare,
2133         .abort = blockdev_backup_abort,
2134         .clean = blockdev_backup_clean,
2135     },
2136     [TRANSACTION_ACTION_KIND_ABORT] = {
2137         .instance_size = sizeof(BlkActionState),
2138         .prepare = abort_prepare,
2139         .commit = abort_commit,
2140     },
2141     [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC] = {
2142         .instance_size = sizeof(InternalSnapshotState),
2143         .prepare  = internal_snapshot_prepare,
2144         .abort = internal_snapshot_abort,
2145         .clean = internal_snapshot_clean,
2146     },
2147     [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD] = {
2148         .instance_size = sizeof(BlockDirtyBitmapState),
2149         .prepare = block_dirty_bitmap_add_prepare,
2150         .abort = block_dirty_bitmap_add_abort,
2151     },
2152     [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR] = {
2153         .instance_size = sizeof(BlockDirtyBitmapState),
2154         .prepare = block_dirty_bitmap_clear_prepare,
2155         .commit = block_dirty_bitmap_clear_commit,
2156         .abort = block_dirty_bitmap_clear_abort,
2157         .clean = block_dirty_bitmap_clear_clean,
2158     }
2159 };
2160 
2161 /**
2162  * Allocate a TransactionProperties structure if necessary, and fill
2163  * that structure with desired defaults if they are unset.
2164  */
2165 static TransactionProperties *get_transaction_properties(
2166     TransactionProperties *props)
2167 {
2168     if (!props) {
2169         props = g_new0(TransactionProperties, 1);
2170     }
2171 
2172     if (!props->has_completion_mode) {
2173         props->has_completion_mode = true;
2174         props->completion_mode = ACTION_COMPLETION_MODE_INDIVIDUAL;
2175     }
2176 
2177     return props;
2178 }
2179 
2180 /*
2181  * 'Atomic' group operations.  The operations are performed as a set, and if
2182  * any fail then we roll back all operations in the group.
2183  */
2184 void qmp_transaction(TransactionActionList *dev_list,
2185                      bool has_props,
2186                      struct TransactionProperties *props,
2187                      Error **errp)
2188 {
2189     TransactionActionList *dev_entry = dev_list;
2190     BlockJobTxn *block_job_txn = NULL;
2191     BlkActionState *state, *next;
2192     Error *local_err = NULL;
2193 
2194     QSIMPLEQ_HEAD(snap_bdrv_states, BlkActionState) snap_bdrv_states;
2195     QSIMPLEQ_INIT(&snap_bdrv_states);
2196 
2197     /* Does this transaction get canceled as a group on failure?
2198      * If not, we don't really need to make a BlockJobTxn.
2199      */
2200     props = get_transaction_properties(props);
2201     if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
2202         block_job_txn = block_job_txn_new();
2203     }
2204 
2205     /* drain all i/o before any operations */
2206     bdrv_drain_all();
2207 
2208     /* We don't do anything in this loop that commits us to the operations */
2209     while (NULL != dev_entry) {
2210         TransactionAction *dev_info = NULL;
2211         const BlkActionOps *ops;
2212 
2213         dev_info = dev_entry->value;
2214         dev_entry = dev_entry->next;
2215 
2216         assert(dev_info->type < ARRAY_SIZE(actions));
2217 
2218         ops = &actions[dev_info->type];
2219         assert(ops->instance_size > 0);
2220 
2221         state = g_malloc0(ops->instance_size);
2222         state->ops = ops;
2223         state->action = dev_info;
2224         state->block_job_txn = block_job_txn;
2225         state->txn_props = props;
2226         QSIMPLEQ_INSERT_TAIL(&snap_bdrv_states, state, entry);
2227 
2228         state->ops->prepare(state, &local_err);
2229         if (local_err) {
2230             error_propagate(errp, local_err);
2231             goto delete_and_fail;
2232         }
2233     }
2234 
2235     QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
2236         if (state->ops->commit) {
2237             state->ops->commit(state);
2238         }
2239     }
2240 
2241     /* success */
2242     goto exit;
2243 
2244 delete_and_fail:
2245     /* failure, and it is all-or-none; roll back all operations */
2246     QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
2247         if (state->ops->abort) {
2248             state->ops->abort(state);
2249         }
2250     }
2251 exit:
2252     QSIMPLEQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) {
2253         if (state->ops->clean) {
2254             state->ops->clean(state);
2255         }
2256         g_free(state);
2257     }
2258     if (!has_props) {
2259         qapi_free_TransactionProperties(props);
2260     }
2261     block_job_txn_unref(block_job_txn);
2262 }
2263 
2264 void qmp_eject(const char *device, bool has_force, bool force, Error **errp)
2265 {
2266     Error *local_err = NULL;
2267 
2268     qmp_blockdev_open_tray(device, has_force, force, &local_err);
2269     if (local_err) {
2270         error_propagate(errp, local_err);
2271         return;
2272     }
2273 
2274     qmp_x_blockdev_remove_medium(device, errp);
2275 }
2276 
2277 void qmp_block_passwd(bool has_device, const char *device,
2278                       bool has_node_name, const char *node_name,
2279                       const char *password, Error **errp)
2280 {
2281     Error *local_err = NULL;
2282     BlockDriverState *bs;
2283     AioContext *aio_context;
2284 
2285     bs = bdrv_lookup_bs(has_device ? device : NULL,
2286                         has_node_name ? node_name : NULL,
2287                         &local_err);
2288     if (local_err) {
2289         error_propagate(errp, local_err);
2290         return;
2291     }
2292 
2293     aio_context = bdrv_get_aio_context(bs);
2294     aio_context_acquire(aio_context);
2295 
2296     bdrv_add_key(bs, password, errp);
2297 
2298     aio_context_release(aio_context);
2299 }
2300 
2301 void qmp_blockdev_open_tray(const char *device, bool has_force, bool force,
2302                             Error **errp)
2303 {
2304     BlockBackend *blk;
2305     bool locked;
2306 
2307     if (!has_force) {
2308         force = false;
2309     }
2310 
2311     blk = blk_by_name(device);
2312     if (!blk) {
2313         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2314                   "Device '%s' not found", device);
2315         return;
2316     }
2317 
2318     if (!blk_dev_has_removable_media(blk)) {
2319         error_setg(errp, "Device '%s' is not removable", device);
2320         return;
2321     }
2322 
2323     if (!blk_dev_has_tray(blk)) {
2324         /* Ignore this command on tray-less devices */
2325         return;
2326     }
2327 
2328     if (blk_dev_is_tray_open(blk)) {
2329         return;
2330     }
2331 
2332     locked = blk_dev_is_medium_locked(blk);
2333     if (locked) {
2334         blk_dev_eject_request(blk, force);
2335     }
2336 
2337     if (!locked || force) {
2338         blk_dev_change_media_cb(blk, false);
2339     }
2340 }
2341 
2342 void qmp_blockdev_close_tray(const char *device, Error **errp)
2343 {
2344     BlockBackend *blk;
2345 
2346     blk = blk_by_name(device);
2347     if (!blk) {
2348         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2349                   "Device '%s' not found", device);
2350         return;
2351     }
2352 
2353     if (!blk_dev_has_removable_media(blk)) {
2354         error_setg(errp, "Device '%s' is not removable", device);
2355         return;
2356     }
2357 
2358     if (!blk_dev_has_tray(blk)) {
2359         /* Ignore this command on tray-less devices */
2360         return;
2361     }
2362 
2363     if (!blk_dev_is_tray_open(blk)) {
2364         return;
2365     }
2366 
2367     blk_dev_change_media_cb(blk, true);
2368 }
2369 
2370 void qmp_x_blockdev_remove_medium(const char *device, Error **errp)
2371 {
2372     BlockBackend *blk;
2373     BlockDriverState *bs;
2374     AioContext *aio_context;
2375     bool has_device;
2376 
2377     blk = blk_by_name(device);
2378     if (!blk) {
2379         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2380                   "Device '%s' not found", device);
2381         return;
2382     }
2383 
2384     /* For BBs without a device, we can exchange the BDS tree at will */
2385     has_device = blk_get_attached_dev(blk);
2386 
2387     if (has_device && !blk_dev_has_removable_media(blk)) {
2388         error_setg(errp, "Device '%s' is not removable", device);
2389         return;
2390     }
2391 
2392     if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
2393         error_setg(errp, "Tray of device '%s' is not open", device);
2394         return;
2395     }
2396 
2397     bs = blk_bs(blk);
2398     if (!bs) {
2399         return;
2400     }
2401 
2402     aio_context = bdrv_get_aio_context(bs);
2403     aio_context_acquire(aio_context);
2404 
2405     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
2406         goto out;
2407     }
2408 
2409     /* This follows the convention established by bdrv_make_anon() */
2410     if (bs->device_list.tqe_prev) {
2411         bdrv_device_remove(bs);
2412     }
2413 
2414     blk_remove_bs(blk);
2415 
2416     if (!blk_dev_has_tray(blk)) {
2417         /* For tray-less devices, blockdev-open-tray is a no-op (or may not be
2418          * called at all); therefore, the medium needs to be ejected here.
2419          * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load
2420          * value passed here (i.e. false). */
2421         blk_dev_change_media_cb(blk, false);
2422     }
2423 
2424 out:
2425     aio_context_release(aio_context);
2426 }
2427 
2428 static void qmp_blockdev_insert_anon_medium(const char *device,
2429                                             BlockDriverState *bs, Error **errp)
2430 {
2431     BlockBackend *blk;
2432     bool has_device;
2433 
2434     blk = blk_by_name(device);
2435     if (!blk) {
2436         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2437                   "Device '%s' not found", device);
2438         return;
2439     }
2440 
2441     /* For BBs without a device, we can exchange the BDS tree at will */
2442     has_device = blk_get_attached_dev(blk);
2443 
2444     if (has_device && !blk_dev_has_removable_media(blk)) {
2445         error_setg(errp, "Device '%s' is not removable", device);
2446         return;
2447     }
2448 
2449     if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
2450         error_setg(errp, "Tray of device '%s' is not open", device);
2451         return;
2452     }
2453 
2454     if (blk_bs(blk)) {
2455         error_setg(errp, "There already is a medium in device '%s'", device);
2456         return;
2457     }
2458 
2459     blk_insert_bs(blk, bs);
2460 
2461     QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
2462 
2463     if (!blk_dev_has_tray(blk)) {
2464         /* For tray-less devices, blockdev-close-tray is a no-op (or may not be
2465          * called at all); therefore, the medium needs to be pushed into the
2466          * slot here.
2467          * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load
2468          * value passed here (i.e. true). */
2469         blk_dev_change_media_cb(blk, true);
2470     }
2471 }
2472 
2473 void qmp_x_blockdev_insert_medium(const char *device, const char *node_name,
2474                                   Error **errp)
2475 {
2476     BlockDriverState *bs;
2477 
2478     bs = bdrv_find_node(node_name);
2479     if (!bs) {
2480         error_setg(errp, "Node '%s' not found", node_name);
2481         return;
2482     }
2483 
2484     if (bs->blk) {
2485         error_setg(errp, "Node '%s' is already in use by '%s'", node_name,
2486                    blk_name(bs->blk));
2487         return;
2488     }
2489 
2490     qmp_blockdev_insert_anon_medium(device, bs, errp);
2491 }
2492 
2493 void qmp_blockdev_change_medium(const char *device, const char *filename,
2494                                 bool has_format, const char *format,
2495                                 bool has_read_only,
2496                                 BlockdevChangeReadOnlyMode read_only,
2497                                 Error **errp)
2498 {
2499     BlockBackend *blk;
2500     BlockDriverState *medium_bs = NULL;
2501     int bdrv_flags, ret;
2502     QDict *options = NULL;
2503     Error *err = NULL;
2504 
2505     blk = blk_by_name(device);
2506     if (!blk) {
2507         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2508                   "Device '%s' not found", device);
2509         goto fail;
2510     }
2511 
2512     if (blk_bs(blk)) {
2513         blk_update_root_state(blk);
2514     }
2515 
2516     bdrv_flags = blk_get_open_flags_from_root_state(blk);
2517 
2518     if (!has_read_only) {
2519         read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN;
2520     }
2521 
2522     switch (read_only) {
2523     case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN:
2524         break;
2525 
2526     case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY:
2527         bdrv_flags &= ~BDRV_O_RDWR;
2528         break;
2529 
2530     case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE:
2531         bdrv_flags |= BDRV_O_RDWR;
2532         break;
2533 
2534     default:
2535         abort();
2536     }
2537 
2538     if (has_format) {
2539         options = qdict_new();
2540         qdict_put(options, "driver", qstring_from_str(format));
2541     }
2542 
2543     assert(!medium_bs);
2544     ret = bdrv_open(&medium_bs, filename, NULL, options, bdrv_flags, errp);
2545     if (ret < 0) {
2546         goto fail;
2547     }
2548 
2549     blk_apply_root_state(blk, medium_bs);
2550 
2551     bdrv_add_key(medium_bs, NULL, &err);
2552     if (err) {
2553         error_propagate(errp, err);
2554         goto fail;
2555     }
2556 
2557     qmp_blockdev_open_tray(device, false, false, &err);
2558     if (err) {
2559         error_propagate(errp, err);
2560         goto fail;
2561     }
2562 
2563     qmp_x_blockdev_remove_medium(device, &err);
2564     if (err) {
2565         error_propagate(errp, err);
2566         goto fail;
2567     }
2568 
2569     qmp_blockdev_insert_anon_medium(device, medium_bs, &err);
2570     if (err) {
2571         error_propagate(errp, err);
2572         goto fail;
2573     }
2574 
2575     qmp_blockdev_close_tray(device, errp);
2576 
2577 fail:
2578     /* If the medium has been inserted, the device has its own reference, so
2579      * ours must be relinquished; and if it has not been inserted successfully,
2580      * the reference must be relinquished anyway */
2581     bdrv_unref(medium_bs);
2582 }
2583 
2584 /* throttling disk I/O limits */
2585 void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd,
2586                                int64_t bps_wr,
2587                                int64_t iops,
2588                                int64_t iops_rd,
2589                                int64_t iops_wr,
2590                                bool has_bps_max,
2591                                int64_t bps_max,
2592                                bool has_bps_rd_max,
2593                                int64_t bps_rd_max,
2594                                bool has_bps_wr_max,
2595                                int64_t bps_wr_max,
2596                                bool has_iops_max,
2597                                int64_t iops_max,
2598                                bool has_iops_rd_max,
2599                                int64_t iops_rd_max,
2600                                bool has_iops_wr_max,
2601                                int64_t iops_wr_max,
2602                                bool has_iops_size,
2603                                int64_t iops_size,
2604                                bool has_group,
2605                                const char *group, Error **errp)
2606 {
2607     ThrottleConfig cfg;
2608     BlockDriverState *bs;
2609     BlockBackend *blk;
2610     AioContext *aio_context;
2611 
2612     blk = blk_by_name(device);
2613     if (!blk) {
2614         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2615                   "Device '%s' not found", device);
2616         return;
2617     }
2618 
2619     aio_context = blk_get_aio_context(blk);
2620     aio_context_acquire(aio_context);
2621 
2622     bs = blk_bs(blk);
2623     if (!bs) {
2624         error_setg(errp, "Device '%s' has no medium", device);
2625         goto out;
2626     }
2627 
2628     memset(&cfg, 0, sizeof(cfg));
2629     cfg.buckets[THROTTLE_BPS_TOTAL].avg = bps;
2630     cfg.buckets[THROTTLE_BPS_READ].avg  = bps_rd;
2631     cfg.buckets[THROTTLE_BPS_WRITE].avg = bps_wr;
2632 
2633     cfg.buckets[THROTTLE_OPS_TOTAL].avg = iops;
2634     cfg.buckets[THROTTLE_OPS_READ].avg  = iops_rd;
2635     cfg.buckets[THROTTLE_OPS_WRITE].avg = iops_wr;
2636 
2637     if (has_bps_max) {
2638         cfg.buckets[THROTTLE_BPS_TOTAL].max = bps_max;
2639     }
2640     if (has_bps_rd_max) {
2641         cfg.buckets[THROTTLE_BPS_READ].max = bps_rd_max;
2642     }
2643     if (has_bps_wr_max) {
2644         cfg.buckets[THROTTLE_BPS_WRITE].max = bps_wr_max;
2645     }
2646     if (has_iops_max) {
2647         cfg.buckets[THROTTLE_OPS_TOTAL].max = iops_max;
2648     }
2649     if (has_iops_rd_max) {
2650         cfg.buckets[THROTTLE_OPS_READ].max = iops_rd_max;
2651     }
2652     if (has_iops_wr_max) {
2653         cfg.buckets[THROTTLE_OPS_WRITE].max = iops_wr_max;
2654     }
2655 
2656     if (has_iops_size) {
2657         cfg.op_size = iops_size;
2658     }
2659 
2660     if (!check_throttle_config(&cfg, errp)) {
2661         goto out;
2662     }
2663 
2664     if (throttle_enabled(&cfg)) {
2665         /* Enable I/O limits if they're not enabled yet, otherwise
2666          * just update the throttling group. */
2667         if (!bs->throttle_state) {
2668             bdrv_io_limits_enable(bs, has_group ? group : device);
2669         } else if (has_group) {
2670             bdrv_io_limits_update_group(bs, group);
2671         }
2672         /* Set the new throttling configuration */
2673         bdrv_set_io_limits(bs, &cfg);
2674     } else if (bs->throttle_state) {
2675         /* If all throttling settings are set to 0, disable I/O limits */
2676         bdrv_io_limits_disable(bs);
2677     }
2678 
2679 out:
2680     aio_context_release(aio_context);
2681 }
2682 
2683 void qmp_block_dirty_bitmap_add(const char *node, const char *name,
2684                                 bool has_granularity, uint32_t granularity,
2685                                 Error **errp)
2686 {
2687     AioContext *aio_context;
2688     BlockDriverState *bs;
2689 
2690     if (!name || name[0] == '\0') {
2691         error_setg(errp, "Bitmap name cannot be empty");
2692         return;
2693     }
2694 
2695     bs = bdrv_lookup_bs(node, node, errp);
2696     if (!bs) {
2697         return;
2698     }
2699 
2700     aio_context = bdrv_get_aio_context(bs);
2701     aio_context_acquire(aio_context);
2702 
2703     if (has_granularity) {
2704         if (granularity < 512 || !is_power_of_2(granularity)) {
2705             error_setg(errp, "Granularity must be power of 2 "
2706                              "and at least 512");
2707             goto out;
2708         }
2709     } else {
2710         /* Default to cluster size, if available: */
2711         granularity = bdrv_get_default_bitmap_granularity(bs);
2712     }
2713 
2714     bdrv_create_dirty_bitmap(bs, granularity, name, errp);
2715 
2716  out:
2717     aio_context_release(aio_context);
2718 }
2719 
2720 void qmp_block_dirty_bitmap_remove(const char *node, const char *name,
2721                                    Error **errp)
2722 {
2723     AioContext *aio_context;
2724     BlockDriverState *bs;
2725     BdrvDirtyBitmap *bitmap;
2726 
2727     bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp);
2728     if (!bitmap || !bs) {
2729         return;
2730     }
2731 
2732     if (bdrv_dirty_bitmap_frozen(bitmap)) {
2733         error_setg(errp,
2734                    "Bitmap '%s' is currently frozen and cannot be removed",
2735                    name);
2736         goto out;
2737     }
2738     bdrv_dirty_bitmap_make_anon(bitmap);
2739     bdrv_release_dirty_bitmap(bs, bitmap);
2740 
2741  out:
2742     aio_context_release(aio_context);
2743 }
2744 
2745 /**
2746  * Completely clear a bitmap, for the purposes of synchronizing a bitmap
2747  * immediately after a full backup operation.
2748  */
2749 void qmp_block_dirty_bitmap_clear(const char *node, const char *name,
2750                                   Error **errp)
2751 {
2752     AioContext *aio_context;
2753     BdrvDirtyBitmap *bitmap;
2754     BlockDriverState *bs;
2755 
2756     bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp);
2757     if (!bitmap || !bs) {
2758         return;
2759     }
2760 
2761     if (bdrv_dirty_bitmap_frozen(bitmap)) {
2762         error_setg(errp,
2763                    "Bitmap '%s' is currently frozen and cannot be modified",
2764                    name);
2765         goto out;
2766     } else if (!bdrv_dirty_bitmap_enabled(bitmap)) {
2767         error_setg(errp,
2768                    "Bitmap '%s' is currently disabled and cannot be cleared",
2769                    name);
2770         goto out;
2771     }
2772 
2773     bdrv_clear_dirty_bitmap(bitmap, NULL);
2774 
2775  out:
2776     aio_context_release(aio_context);
2777 }
2778 
2779 void hmp_drive_del(Monitor *mon, const QDict *qdict)
2780 {
2781     const char *id = qdict_get_str(qdict, "id");
2782     BlockBackend *blk;
2783     BlockDriverState *bs;
2784     AioContext *aio_context;
2785     Error *local_err = NULL;
2786 
2787     blk = blk_by_name(id);
2788     if (!blk) {
2789         error_report("Device '%s' not found", id);
2790         return;
2791     }
2792 
2793     if (!blk_legacy_dinfo(blk)) {
2794         error_report("Deleting device added with blockdev-add"
2795                      " is not supported");
2796         return;
2797     }
2798 
2799     aio_context = blk_get_aio_context(blk);
2800     aio_context_acquire(aio_context);
2801 
2802     bs = blk_bs(blk);
2803     if (bs) {
2804         if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) {
2805             error_report_err(local_err);
2806             aio_context_release(aio_context);
2807             return;
2808         }
2809 
2810         blk_remove_bs(blk);
2811     }
2812 
2813     /* if we have a device attached to this BlockDriverState
2814      * then we need to make the drive anonymous until the device
2815      * can be removed.  If this is a drive with no device backing
2816      * then we can just get rid of the block driver state right here.
2817      */
2818     if (blk_get_attached_dev(blk)) {
2819         blk_hide_on_behalf_of_hmp_drive_del(blk);
2820         /* Further I/O must not pause the guest */
2821         blk_set_on_error(blk, BLOCKDEV_ON_ERROR_REPORT,
2822                          BLOCKDEV_ON_ERROR_REPORT);
2823     } else {
2824         blk_unref(blk);
2825     }
2826 
2827     aio_context_release(aio_context);
2828 }
2829 
2830 void qmp_block_resize(bool has_device, const char *device,
2831                       bool has_node_name, const char *node_name,
2832                       int64_t size, Error **errp)
2833 {
2834     Error *local_err = NULL;
2835     BlockDriverState *bs;
2836     AioContext *aio_context;
2837     int ret;
2838 
2839     bs = bdrv_lookup_bs(has_device ? device : NULL,
2840                         has_node_name ? node_name : NULL,
2841                         &local_err);
2842     if (local_err) {
2843         error_propagate(errp, local_err);
2844         return;
2845     }
2846 
2847     aio_context = bdrv_get_aio_context(bs);
2848     aio_context_acquire(aio_context);
2849 
2850     if (!bdrv_is_first_non_filter(bs)) {
2851         error_setg(errp, QERR_FEATURE_DISABLED, "resize");
2852         goto out;
2853     }
2854 
2855     if (size < 0) {
2856         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size");
2857         goto out;
2858     }
2859 
2860     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
2861         error_setg(errp, QERR_DEVICE_IN_USE, device);
2862         goto out;
2863     }
2864 
2865     /* complete all in-flight operations before resizing the device */
2866     bdrv_drain_all();
2867 
2868     ret = bdrv_truncate(bs, size);
2869     switch (ret) {
2870     case 0:
2871         break;
2872     case -ENOMEDIUM:
2873         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
2874         break;
2875     case -ENOTSUP:
2876         error_setg(errp, QERR_UNSUPPORTED);
2877         break;
2878     case -EACCES:
2879         error_setg(errp, "Device '%s' is read only", device);
2880         break;
2881     case -EBUSY:
2882         error_setg(errp, QERR_DEVICE_IN_USE, device);
2883         break;
2884     default:
2885         error_setg_errno(errp, -ret, "Could not resize");
2886         break;
2887     }
2888 
2889 out:
2890     aio_context_release(aio_context);
2891 }
2892 
2893 static void block_job_cb(void *opaque, int ret)
2894 {
2895     /* Note that this function may be executed from another AioContext besides
2896      * the QEMU main loop.  If you need to access anything that assumes the
2897      * QEMU global mutex, use a BH or introduce a mutex.
2898      */
2899 
2900     BlockDriverState *bs = opaque;
2901     const char *msg = NULL;
2902 
2903     trace_block_job_cb(bs, bs->job, ret);
2904 
2905     assert(bs->job);
2906 
2907     if (ret < 0) {
2908         msg = strerror(-ret);
2909     }
2910 
2911     if (block_job_is_cancelled(bs->job)) {
2912         block_job_event_cancelled(bs->job);
2913     } else {
2914         block_job_event_completed(bs->job, msg);
2915     }
2916 }
2917 
2918 void qmp_block_stream(const char *device,
2919                       bool has_base, const char *base,
2920                       bool has_backing_file, const char *backing_file,
2921                       bool has_speed, int64_t speed,
2922                       bool has_on_error, BlockdevOnError on_error,
2923                       Error **errp)
2924 {
2925     BlockBackend *blk;
2926     BlockDriverState *bs;
2927     BlockDriverState *base_bs = NULL;
2928     AioContext *aio_context;
2929     Error *local_err = NULL;
2930     const char *base_name = NULL;
2931 
2932     if (!has_on_error) {
2933         on_error = BLOCKDEV_ON_ERROR_REPORT;
2934     }
2935 
2936     blk = blk_by_name(device);
2937     if (!blk) {
2938         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2939                   "Device '%s' not found", device);
2940         return;
2941     }
2942 
2943     aio_context = blk_get_aio_context(blk);
2944     aio_context_acquire(aio_context);
2945 
2946     if (!blk_is_available(blk)) {
2947         error_setg(errp, "Device '%s' has no medium", device);
2948         goto out;
2949     }
2950     bs = blk_bs(blk);
2951 
2952     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_STREAM, errp)) {
2953         goto out;
2954     }
2955 
2956     if (has_base) {
2957         base_bs = bdrv_find_backing_image(bs, base);
2958         if (base_bs == NULL) {
2959             error_setg(errp, QERR_BASE_NOT_FOUND, base);
2960             goto out;
2961         }
2962         assert(bdrv_get_aio_context(base_bs) == aio_context);
2963         base_name = base;
2964     }
2965 
2966     /* if we are streaming the entire chain, the result will have no backing
2967      * file, and specifying one is therefore an error */
2968     if (base_bs == NULL && has_backing_file) {
2969         error_setg(errp, "backing file specified, but streaming the "
2970                          "entire chain");
2971         goto out;
2972     }
2973 
2974     /* backing_file string overrides base bs filename */
2975     base_name = has_backing_file ? backing_file : base_name;
2976 
2977     stream_start(bs, base_bs, base_name, has_speed ? speed : 0,
2978                  on_error, block_job_cb, bs, &local_err);
2979     if (local_err) {
2980         error_propagate(errp, local_err);
2981         goto out;
2982     }
2983 
2984     trace_qmp_block_stream(bs, bs->job);
2985 
2986 out:
2987     aio_context_release(aio_context);
2988 }
2989 
2990 void qmp_block_commit(const char *device,
2991                       bool has_base, const char *base,
2992                       bool has_top, const char *top,
2993                       bool has_backing_file, const char *backing_file,
2994                       bool has_speed, int64_t speed,
2995                       Error **errp)
2996 {
2997     BlockBackend *blk;
2998     BlockDriverState *bs;
2999     BlockDriverState *base_bs, *top_bs;
3000     AioContext *aio_context;
3001     Error *local_err = NULL;
3002     /* This will be part of the QMP command, if/when the
3003      * BlockdevOnError change for blkmirror makes it in
3004      */
3005     BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT;
3006 
3007     if (!has_speed) {
3008         speed = 0;
3009     }
3010 
3011     /* Important Note:
3012      *  libvirt relies on the DeviceNotFound error class in order to probe for
3013      *  live commit feature versions; for this to work, we must make sure to
3014      *  perform the device lookup before any generic errors that may occur in a
3015      *  scenario in which all optional arguments are omitted. */
3016     blk = blk_by_name(device);
3017     if (!blk) {
3018         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3019                   "Device '%s' not found", device);
3020         return;
3021     }
3022 
3023     aio_context = blk_get_aio_context(blk);
3024     aio_context_acquire(aio_context);
3025 
3026     if (!blk_is_available(blk)) {
3027         error_setg(errp, "Device '%s' has no medium", device);
3028         goto out;
3029     }
3030     bs = blk_bs(blk);
3031 
3032     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) {
3033         goto out;
3034     }
3035 
3036     /* default top_bs is the active layer */
3037     top_bs = bs;
3038 
3039     if (has_top && top) {
3040         if (strcmp(bs->filename, top) != 0) {
3041             top_bs = bdrv_find_backing_image(bs, top);
3042         }
3043     }
3044 
3045     if (top_bs == NULL) {
3046         error_setg(errp, "Top image file %s not found", top ? top : "NULL");
3047         goto out;
3048     }
3049 
3050     assert(bdrv_get_aio_context(top_bs) == aio_context);
3051 
3052     if (has_base && base) {
3053         base_bs = bdrv_find_backing_image(top_bs, base);
3054     } else {
3055         base_bs = bdrv_find_base(top_bs);
3056     }
3057 
3058     if (base_bs == NULL) {
3059         error_setg(errp, QERR_BASE_NOT_FOUND, base ? base : "NULL");
3060         goto out;
3061     }
3062 
3063     assert(bdrv_get_aio_context(base_bs) == aio_context);
3064 
3065     if (bdrv_op_is_blocked(base_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
3066         goto out;
3067     }
3068 
3069     /* Do not allow attempts to commit an image into itself */
3070     if (top_bs == base_bs) {
3071         error_setg(errp, "cannot commit an image into itself");
3072         goto out;
3073     }
3074 
3075     if (top_bs == bs) {
3076         if (has_backing_file) {
3077             error_setg(errp, "'backing-file' specified,"
3078                              " but 'top' is the active layer");
3079             goto out;
3080         }
3081         commit_active_start(bs, base_bs, speed, on_error, block_job_cb,
3082                             bs, &local_err);
3083     } else {
3084         commit_start(bs, base_bs, top_bs, speed, on_error, block_job_cb, bs,
3085                      has_backing_file ? backing_file : NULL, &local_err);
3086     }
3087     if (local_err != NULL) {
3088         error_propagate(errp, local_err);
3089         goto out;
3090     }
3091 
3092 out:
3093     aio_context_release(aio_context);
3094 }
3095 
3096 static void do_drive_backup(const char *device, const char *target,
3097                             bool has_format, const char *format,
3098                             enum MirrorSyncMode sync,
3099                             bool has_mode, enum NewImageMode mode,
3100                             bool has_speed, int64_t speed,
3101                             bool has_bitmap, const char *bitmap,
3102                             bool has_on_source_error,
3103                             BlockdevOnError on_source_error,
3104                             bool has_on_target_error,
3105                             BlockdevOnError on_target_error,
3106                             BlockJobTxn *txn, Error **errp)
3107 {
3108     BlockBackend *blk;
3109     BlockDriverState *bs;
3110     BlockDriverState *target_bs;
3111     BlockDriverState *source = NULL;
3112     BdrvDirtyBitmap *bmap = NULL;
3113     AioContext *aio_context;
3114     QDict *options = NULL;
3115     Error *local_err = NULL;
3116     int flags;
3117     int64_t size;
3118     int ret;
3119 
3120     if (!has_speed) {
3121         speed = 0;
3122     }
3123     if (!has_on_source_error) {
3124         on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3125     }
3126     if (!has_on_target_error) {
3127         on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3128     }
3129     if (!has_mode) {
3130         mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
3131     }
3132 
3133     blk = blk_by_name(device);
3134     if (!blk) {
3135         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3136                   "Device '%s' not found", device);
3137         return;
3138     }
3139 
3140     aio_context = blk_get_aio_context(blk);
3141     aio_context_acquire(aio_context);
3142 
3143     /* Although backup_run has this check too, we need to use bs->drv below, so
3144      * do an early check redundantly. */
3145     if (!blk_is_available(blk)) {
3146         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
3147         goto out;
3148     }
3149     bs = blk_bs(blk);
3150 
3151     if (!has_format) {
3152         format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name;
3153     }
3154 
3155     /* Early check to avoid creating target */
3156     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
3157         goto out;
3158     }
3159 
3160     flags = bs->open_flags | BDRV_O_RDWR;
3161 
3162     /* See if we have a backing HD we can use to create our new image
3163      * on top of. */
3164     if (sync == MIRROR_SYNC_MODE_TOP) {
3165         source = backing_bs(bs);
3166         if (!source) {
3167             sync = MIRROR_SYNC_MODE_FULL;
3168         }
3169     }
3170     if (sync == MIRROR_SYNC_MODE_NONE) {
3171         source = bs;
3172     }
3173 
3174     size = bdrv_getlength(bs);
3175     if (size < 0) {
3176         error_setg_errno(errp, -size, "bdrv_getlength failed");
3177         goto out;
3178     }
3179 
3180     if (mode != NEW_IMAGE_MODE_EXISTING) {
3181         assert(format);
3182         if (source) {
3183             bdrv_img_create(target, format, source->filename,
3184                             source->drv->format_name, NULL,
3185                             size, flags, &local_err, false);
3186         } else {
3187             bdrv_img_create(target, format, NULL, NULL, NULL,
3188                             size, flags, &local_err, false);
3189         }
3190     }
3191 
3192     if (local_err) {
3193         error_propagate(errp, local_err);
3194         goto out;
3195     }
3196 
3197     if (format) {
3198         options = qdict_new();
3199         qdict_put(options, "driver", qstring_from_str(format));
3200     }
3201 
3202     target_bs = NULL;
3203     ret = bdrv_open(&target_bs, target, NULL, options, flags, &local_err);
3204     if (ret < 0) {
3205         error_propagate(errp, local_err);
3206         goto out;
3207     }
3208 
3209     bdrv_set_aio_context(target_bs, aio_context);
3210 
3211     if (has_bitmap) {
3212         bmap = bdrv_find_dirty_bitmap(bs, bitmap);
3213         if (!bmap) {
3214             error_setg(errp, "Bitmap '%s' could not be found", bitmap);
3215             bdrv_unref(target_bs);
3216             goto out;
3217         }
3218     }
3219 
3220     backup_start(bs, target_bs, speed, sync, bmap,
3221                  on_source_error, on_target_error,
3222                  block_job_cb, bs, txn, &local_err);
3223     if (local_err != NULL) {
3224         bdrv_unref(target_bs);
3225         error_propagate(errp, local_err);
3226         goto out;
3227     }
3228 
3229 out:
3230     aio_context_release(aio_context);
3231 }
3232 
3233 void qmp_drive_backup(const char *device, const char *target,
3234                       bool has_format, const char *format,
3235                       enum MirrorSyncMode sync,
3236                       bool has_mode, enum NewImageMode mode,
3237                       bool has_speed, int64_t speed,
3238                       bool has_bitmap, const char *bitmap,
3239                       bool has_on_source_error, BlockdevOnError on_source_error,
3240                       bool has_on_target_error, BlockdevOnError on_target_error,
3241                       Error **errp)
3242 {
3243     return do_drive_backup(device, target, has_format, format, sync,
3244                            has_mode, mode, has_speed, speed,
3245                            has_bitmap, bitmap,
3246                            has_on_source_error, on_source_error,
3247                            has_on_target_error, on_target_error,
3248                            NULL, errp);
3249 }
3250 
3251 BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp)
3252 {
3253     return bdrv_named_nodes_list(errp);
3254 }
3255 
3256 void do_blockdev_backup(const char *device, const char *target,
3257                          enum MirrorSyncMode sync,
3258                          bool has_speed, int64_t speed,
3259                          bool has_on_source_error,
3260                          BlockdevOnError on_source_error,
3261                          bool has_on_target_error,
3262                          BlockdevOnError on_target_error,
3263                          BlockJobTxn *txn, Error **errp)
3264 {
3265     BlockBackend *blk, *target_blk;
3266     BlockDriverState *bs;
3267     BlockDriverState *target_bs;
3268     Error *local_err = NULL;
3269     AioContext *aio_context;
3270 
3271     if (!has_speed) {
3272         speed = 0;
3273     }
3274     if (!has_on_source_error) {
3275         on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3276     }
3277     if (!has_on_target_error) {
3278         on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3279     }
3280 
3281     blk = blk_by_name(device);
3282     if (!blk) {
3283         error_setg(errp, "Device '%s' not found", device);
3284         return;
3285     }
3286 
3287     aio_context = blk_get_aio_context(blk);
3288     aio_context_acquire(aio_context);
3289 
3290     if (!blk_is_available(blk)) {
3291         error_setg(errp, "Device '%s' has no medium", device);
3292         goto out;
3293     }
3294     bs = blk_bs(blk);
3295 
3296     target_blk = blk_by_name(target);
3297     if (!target_blk) {
3298         error_setg(errp, "Device '%s' not found", target);
3299         goto out;
3300     }
3301 
3302     if (!blk_is_available(target_blk)) {
3303         error_setg(errp, "Device '%s' has no medium", target);
3304         goto out;
3305     }
3306     target_bs = blk_bs(target_blk);
3307 
3308     bdrv_ref(target_bs);
3309     bdrv_set_aio_context(target_bs, aio_context);
3310     backup_start(bs, target_bs, speed, sync, NULL, on_source_error,
3311                  on_target_error, block_job_cb, bs, txn, &local_err);
3312     if (local_err != NULL) {
3313         bdrv_unref(target_bs);
3314         error_propagate(errp, local_err);
3315     }
3316 out:
3317     aio_context_release(aio_context);
3318 }
3319 
3320 void qmp_blockdev_backup(const char *device, const char *target,
3321                          enum MirrorSyncMode sync,
3322                          bool has_speed, int64_t speed,
3323                          bool has_on_source_error,
3324                          BlockdevOnError on_source_error,
3325                          bool has_on_target_error,
3326                          BlockdevOnError on_target_error,
3327                          Error **errp)
3328 {
3329     do_blockdev_backup(device, target, sync, has_speed, speed,
3330                        has_on_source_error, on_source_error,
3331                        has_on_target_error, on_target_error,
3332                        NULL, errp);
3333 }
3334 
3335 /* Parameter check and block job starting for drive mirroring.
3336  * Caller should hold @device and @target's aio context (must be the same).
3337  **/
3338 static void blockdev_mirror_common(BlockDriverState *bs,
3339                                    BlockDriverState *target,
3340                                    bool has_replaces, const char *replaces,
3341                                    enum MirrorSyncMode sync,
3342                                    bool has_speed, int64_t speed,
3343                                    bool has_granularity, uint32_t granularity,
3344                                    bool has_buf_size, int64_t buf_size,
3345                                    bool has_on_source_error,
3346                                    BlockdevOnError on_source_error,
3347                                    bool has_on_target_error,
3348                                    BlockdevOnError on_target_error,
3349                                    bool has_unmap, bool unmap,
3350                                    Error **errp)
3351 {
3352 
3353     if (!has_speed) {
3354         speed = 0;
3355     }
3356     if (!has_on_source_error) {
3357         on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3358     }
3359     if (!has_on_target_error) {
3360         on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3361     }
3362     if (!has_granularity) {
3363         granularity = 0;
3364     }
3365     if (!has_buf_size) {
3366         buf_size = 0;
3367     }
3368     if (!has_unmap) {
3369         unmap = true;
3370     }
3371 
3372     if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
3373         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
3374                    "a value in range [512B, 64MB]");
3375         return;
3376     }
3377     if (granularity & (granularity - 1)) {
3378         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
3379                    "power of 2");
3380         return;
3381     }
3382 
3383     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
3384         return;
3385     }
3386     if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_MIRROR_TARGET, errp)) {
3387         return;
3388     }
3389     if (target->blk) {
3390         error_setg(errp, "Cannot mirror to an attached block device");
3391         return;
3392     }
3393 
3394     if (!bs->backing && sync == MIRROR_SYNC_MODE_TOP) {
3395         sync = MIRROR_SYNC_MODE_FULL;
3396     }
3397 
3398     /* pass the node name to replace to mirror start since it's loose coupling
3399      * and will allow to check whether the node still exist at mirror completion
3400      */
3401     mirror_start(bs, target,
3402                  has_replaces ? replaces : NULL,
3403                  speed, granularity, buf_size, sync,
3404                  on_source_error, on_target_error, unmap,
3405                  block_job_cb, bs, errp);
3406 }
3407 
3408 void qmp_drive_mirror(const char *device, const char *target,
3409                       bool has_format, const char *format,
3410                       bool has_node_name, const char *node_name,
3411                       bool has_replaces, const char *replaces,
3412                       enum MirrorSyncMode sync,
3413                       bool has_mode, enum NewImageMode mode,
3414                       bool has_speed, int64_t speed,
3415                       bool has_granularity, uint32_t granularity,
3416                       bool has_buf_size, int64_t buf_size,
3417                       bool has_on_source_error, BlockdevOnError on_source_error,
3418                       bool has_on_target_error, BlockdevOnError on_target_error,
3419                       bool has_unmap, bool unmap,
3420                       Error **errp)
3421 {
3422     BlockDriverState *bs;
3423     BlockBackend *blk;
3424     BlockDriverState *source, *target_bs;
3425     AioContext *aio_context;
3426     Error *local_err = NULL;
3427     QDict *options = NULL;
3428     int flags;
3429     int64_t size;
3430     int ret;
3431 
3432     blk = blk_by_name(device);
3433     if (!blk) {
3434         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3435                   "Device '%s' not found", device);
3436         return;
3437     }
3438 
3439     aio_context = blk_get_aio_context(blk);
3440     aio_context_acquire(aio_context);
3441 
3442     if (!blk_is_available(blk)) {
3443         error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
3444         goto out;
3445     }
3446     bs = blk_bs(blk);
3447     if (!has_mode) {
3448         mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
3449     }
3450 
3451     if (!has_format) {
3452         format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name;
3453     }
3454 
3455     flags = bs->open_flags | BDRV_O_RDWR;
3456     source = backing_bs(bs);
3457     if (!source && sync == MIRROR_SYNC_MODE_TOP) {
3458         sync = MIRROR_SYNC_MODE_FULL;
3459     }
3460     if (sync == MIRROR_SYNC_MODE_NONE) {
3461         source = bs;
3462     }
3463 
3464     size = bdrv_getlength(bs);
3465     if (size < 0) {
3466         error_setg_errno(errp, -size, "bdrv_getlength failed");
3467         goto out;
3468     }
3469 
3470     if (has_replaces) {
3471         BlockDriverState *to_replace_bs;
3472         AioContext *replace_aio_context;
3473         int64_t replace_size;
3474 
3475         if (!has_node_name) {
3476             error_setg(errp, "a node-name must be provided when replacing a"
3477                              " named node of the graph");
3478             goto out;
3479         }
3480 
3481         to_replace_bs = check_to_replace_node(bs, replaces, &local_err);
3482 
3483         if (!to_replace_bs) {
3484             error_propagate(errp, local_err);
3485             goto out;
3486         }
3487 
3488         replace_aio_context = bdrv_get_aio_context(to_replace_bs);
3489         aio_context_acquire(replace_aio_context);
3490         replace_size = bdrv_getlength(to_replace_bs);
3491         aio_context_release(replace_aio_context);
3492 
3493         if (size != replace_size) {
3494             error_setg(errp, "cannot replace image with a mirror image of "
3495                              "different size");
3496             goto out;
3497         }
3498     }
3499 
3500     if ((sync == MIRROR_SYNC_MODE_FULL || !source)
3501         && mode != NEW_IMAGE_MODE_EXISTING)
3502     {
3503         /* create new image w/o backing file */
3504         assert(format);
3505         bdrv_img_create(target, format,
3506                         NULL, NULL, NULL, size, flags, &local_err, false);
3507     } else {
3508         switch (mode) {
3509         case NEW_IMAGE_MODE_EXISTING:
3510             break;
3511         case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
3512             /* create new image with backing file */
3513             bdrv_img_create(target, format,
3514                             source->filename,
3515                             source->drv->format_name,
3516                             NULL, size, flags, &local_err, false);
3517             break;
3518         default:
3519             abort();
3520         }
3521     }
3522 
3523     if (local_err) {
3524         error_propagate(errp, local_err);
3525         goto out;
3526     }
3527 
3528     options = qdict_new();
3529     if (has_node_name) {
3530         qdict_put(options, "node-name", qstring_from_str(node_name));
3531     }
3532     if (format) {
3533         qdict_put(options, "driver", qstring_from_str(format));
3534     }
3535 
3536     /* Mirroring takes care of copy-on-write using the source's backing
3537      * file.
3538      */
3539     target_bs = NULL;
3540     ret = bdrv_open(&target_bs, target, NULL, options,
3541                     flags | BDRV_O_NO_BACKING, &local_err);
3542     if (ret < 0) {
3543         error_propagate(errp, local_err);
3544         goto out;
3545     }
3546 
3547     bdrv_set_aio_context(target_bs, aio_context);
3548 
3549     blockdev_mirror_common(bs, target_bs,
3550                            has_replaces, replaces, sync,
3551                            has_speed, speed,
3552                            has_granularity, granularity,
3553                            has_buf_size, buf_size,
3554                            has_on_source_error, on_source_error,
3555                            has_on_target_error, on_target_error,
3556                            has_unmap, unmap,
3557                            &local_err);
3558     if (local_err) {
3559         error_propagate(errp, local_err);
3560         bdrv_unref(target_bs);
3561     }
3562 out:
3563     aio_context_release(aio_context);
3564 }
3565 
3566 void qmp_blockdev_mirror(const char *device, const char *target,
3567                          bool has_replaces, const char *replaces,
3568                          MirrorSyncMode sync,
3569                          bool has_speed, int64_t speed,
3570                          bool has_granularity, uint32_t granularity,
3571                          bool has_buf_size, int64_t buf_size,
3572                          bool has_on_source_error,
3573                          BlockdevOnError on_source_error,
3574                          bool has_on_target_error,
3575                          BlockdevOnError on_target_error,
3576                          Error **errp)
3577 {
3578     BlockDriverState *bs;
3579     BlockBackend *blk;
3580     BlockDriverState *target_bs;
3581     AioContext *aio_context;
3582     Error *local_err = NULL;
3583 
3584     blk = blk_by_name(device);
3585     if (!blk) {
3586         error_setg(errp, "Device '%s' not found", device);
3587         return;
3588     }
3589     bs = blk_bs(blk);
3590 
3591     if (!bs) {
3592         error_setg(errp, "Device '%s' has no media", device);
3593         return;
3594     }
3595 
3596     target_bs = bdrv_lookup_bs(target, target, errp);
3597     if (!target_bs) {
3598         return;
3599     }
3600 
3601     aio_context = bdrv_get_aio_context(bs);
3602     aio_context_acquire(aio_context);
3603 
3604     bdrv_ref(target_bs);
3605     bdrv_set_aio_context(target_bs, aio_context);
3606 
3607     blockdev_mirror_common(bs, target_bs,
3608                            has_replaces, replaces, sync,
3609                            has_speed, speed,
3610                            has_granularity, granularity,
3611                            has_buf_size, buf_size,
3612                            has_on_source_error, on_source_error,
3613                            has_on_target_error, on_target_error,
3614                            true, true,
3615                            &local_err);
3616     if (local_err) {
3617         error_propagate(errp, local_err);
3618         bdrv_unref(target_bs);
3619     }
3620 
3621     aio_context_release(aio_context);
3622 }
3623 
3624 /* Get the block job for a given device name and acquire its AioContext */
3625 static BlockJob *find_block_job(const char *device, AioContext **aio_context,
3626                                 Error **errp)
3627 {
3628     BlockBackend *blk;
3629     BlockDriverState *bs;
3630 
3631     *aio_context = NULL;
3632 
3633     blk = blk_by_name(device);
3634     if (!blk) {
3635         goto notfound;
3636     }
3637 
3638     *aio_context = blk_get_aio_context(blk);
3639     aio_context_acquire(*aio_context);
3640 
3641     if (!blk_is_available(blk)) {
3642         goto notfound;
3643     }
3644     bs = blk_bs(blk);
3645 
3646     if (!bs->job) {
3647         goto notfound;
3648     }
3649 
3650     return bs->job;
3651 
3652 notfound:
3653     error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
3654               "No active block job on device '%s'", device);
3655     if (*aio_context) {
3656         aio_context_release(*aio_context);
3657         *aio_context = NULL;
3658     }
3659     return NULL;
3660 }
3661 
3662 void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
3663 {
3664     AioContext *aio_context;
3665     BlockJob *job = find_block_job(device, &aio_context, errp);
3666 
3667     if (!job) {
3668         return;
3669     }
3670 
3671     block_job_set_speed(job, speed, errp);
3672     aio_context_release(aio_context);
3673 }
3674 
3675 void qmp_block_job_cancel(const char *device,
3676                           bool has_force, bool force, Error **errp)
3677 {
3678     AioContext *aio_context;
3679     BlockJob *job = find_block_job(device, &aio_context, errp);
3680 
3681     if (!job) {
3682         return;
3683     }
3684 
3685     if (!has_force) {
3686         force = false;
3687     }
3688 
3689     if (job->user_paused && !force) {
3690         error_setg(errp, "The block job for device '%s' is currently paused",
3691                    device);
3692         goto out;
3693     }
3694 
3695     trace_qmp_block_job_cancel(job);
3696     block_job_cancel(job);
3697 out:
3698     aio_context_release(aio_context);
3699 }
3700 
3701 void qmp_block_job_pause(const char *device, Error **errp)
3702 {
3703     AioContext *aio_context;
3704     BlockJob *job = find_block_job(device, &aio_context, errp);
3705 
3706     if (!job || job->user_paused) {
3707         return;
3708     }
3709 
3710     job->user_paused = true;
3711     trace_qmp_block_job_pause(job);
3712     block_job_pause(job);
3713     aio_context_release(aio_context);
3714 }
3715 
3716 void qmp_block_job_resume(const char *device, Error **errp)
3717 {
3718     AioContext *aio_context;
3719     BlockJob *job = find_block_job(device, &aio_context, errp);
3720 
3721     if (!job || !job->user_paused) {
3722         return;
3723     }
3724 
3725     job->user_paused = false;
3726     trace_qmp_block_job_resume(job);
3727     block_job_resume(job);
3728     aio_context_release(aio_context);
3729 }
3730 
3731 void qmp_block_job_complete(const char *device, Error **errp)
3732 {
3733     AioContext *aio_context;
3734     BlockJob *job = find_block_job(device, &aio_context, errp);
3735 
3736     if (!job) {
3737         return;
3738     }
3739 
3740     trace_qmp_block_job_complete(job);
3741     block_job_complete(job, errp);
3742     aio_context_release(aio_context);
3743 }
3744 
3745 void qmp_change_backing_file(const char *device,
3746                              const char *image_node_name,
3747                              const char *backing_file,
3748                              Error **errp)
3749 {
3750     BlockBackend *blk;
3751     BlockDriverState *bs = NULL;
3752     AioContext *aio_context;
3753     BlockDriverState *image_bs = NULL;
3754     Error *local_err = NULL;
3755     bool ro;
3756     int open_flags;
3757     int ret;
3758 
3759     blk = blk_by_name(device);
3760     if (!blk) {
3761         error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3762                   "Device '%s' not found", device);
3763         return;
3764     }
3765 
3766     aio_context = blk_get_aio_context(blk);
3767     aio_context_acquire(aio_context);
3768 
3769     if (!blk_is_available(blk)) {
3770         error_setg(errp, "Device '%s' has no medium", device);
3771         goto out;
3772     }
3773     bs = blk_bs(blk);
3774 
3775     image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err);
3776     if (local_err) {
3777         error_propagate(errp, local_err);
3778         goto out;
3779     }
3780 
3781     if (!image_bs) {
3782         error_setg(errp, "image file not found");
3783         goto out;
3784     }
3785 
3786     if (bdrv_find_base(image_bs) == image_bs) {
3787         error_setg(errp, "not allowing backing file change on an image "
3788                          "without a backing file");
3789         goto out;
3790     }
3791 
3792     /* even though we are not necessarily operating on bs, we need it to
3793      * determine if block ops are currently prohibited on the chain */
3794     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
3795         goto out;
3796     }
3797 
3798     /* final sanity check */
3799     if (!bdrv_chain_contains(bs, image_bs)) {
3800         error_setg(errp, "'%s' and image file are not in the same chain",
3801                    device);
3802         goto out;
3803     }
3804 
3805     /* if not r/w, reopen to make r/w */
3806     open_flags = image_bs->open_flags;
3807     ro = bdrv_is_read_only(image_bs);
3808 
3809     if (ro) {
3810         bdrv_reopen(image_bs, open_flags | BDRV_O_RDWR, &local_err);
3811         if (local_err) {
3812             error_propagate(errp, local_err);
3813             goto out;
3814         }
3815     }
3816 
3817     ret = bdrv_change_backing_file(image_bs, backing_file,
3818                                image_bs->drv ? image_bs->drv->format_name : "");
3819 
3820     if (ret < 0) {
3821         error_setg_errno(errp, -ret, "Could not change backing file to '%s'",
3822                          backing_file);
3823         /* don't exit here, so we can try to restore open flags if
3824          * appropriate */
3825     }
3826 
3827     if (ro) {
3828         bdrv_reopen(image_bs, open_flags, &local_err);
3829         if (local_err) {
3830             error_propagate(errp, local_err); /* will preserve prior errp */
3831         }
3832     }
3833 
3834 out:
3835     aio_context_release(aio_context);
3836 }
3837 
3838 void qmp_blockdev_add(BlockdevOptions *options, Error **errp)
3839 {
3840     QmpOutputVisitor *ov = qmp_output_visitor_new();
3841     BlockDriverState *bs;
3842     BlockBackend *blk = NULL;
3843     QObject *obj;
3844     QDict *qdict;
3845     Error *local_err = NULL;
3846 
3847     /* TODO Sort it out in raw-posix and drive_new(): Reject aio=native with
3848      * cache.direct=false instead of silently switching to aio=threads, except
3849      * when called from drive_new().
3850      *
3851      * For now, simply forbidding the combination for all drivers will do. */
3852     if (options->has_aio && options->aio == BLOCKDEV_AIO_OPTIONS_NATIVE) {
3853         bool direct = options->has_cache &&
3854                       options->cache->has_direct &&
3855                       options->cache->direct;
3856         if (!direct) {
3857             error_setg(errp, "aio=native requires cache.direct=true");
3858             goto fail;
3859         }
3860     }
3861 
3862     visit_type_BlockdevOptions(qmp_output_get_visitor(ov),
3863                                &options, NULL, &local_err);
3864     if (local_err) {
3865         error_propagate(errp, local_err);
3866         goto fail;
3867     }
3868 
3869     obj = qmp_output_get_qobject(ov);
3870     qdict = qobject_to_qdict(obj);
3871 
3872     qdict_flatten(qdict);
3873 
3874     if (options->has_id) {
3875         blk = blockdev_init(NULL, qdict, &local_err);
3876         if (local_err) {
3877             error_propagate(errp, local_err);
3878             goto fail;
3879         }
3880 
3881         bs = blk_bs(blk);
3882     } else {
3883         if (!qdict_get_try_str(qdict, "node-name")) {
3884             error_setg(errp, "'id' and/or 'node-name' need to be specified for "
3885                        "the root node");
3886             goto fail;
3887         }
3888 
3889         bs = bds_tree_init(qdict, errp);
3890         if (!bs) {
3891             goto fail;
3892         }
3893 
3894         QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list);
3895     }
3896 
3897     if (bs && bdrv_key_required(bs)) {
3898         if (blk) {
3899             blk_unref(blk);
3900         } else {
3901             QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
3902             bdrv_unref(bs);
3903         }
3904         error_setg(errp, "blockdev-add doesn't support encrypted devices");
3905         goto fail;
3906     }
3907 
3908 fail:
3909     qmp_output_visitor_cleanup(ov);
3910 }
3911 
3912 void qmp_x_blockdev_del(bool has_id, const char *id,
3913                         bool has_node_name, const char *node_name, Error **errp)
3914 {
3915     AioContext *aio_context;
3916     BlockBackend *blk;
3917     BlockDriverState *bs;
3918 
3919     if (has_id && has_node_name) {
3920         error_setg(errp, "Only one of id and node-name must be specified");
3921         return;
3922     } else if (!has_id && !has_node_name) {
3923         error_setg(errp, "No block device specified");
3924         return;
3925     }
3926 
3927     if (has_id) {
3928         blk = blk_by_name(id);
3929         if (!blk) {
3930             error_setg(errp, "Cannot find block backend %s", id);
3931             return;
3932         }
3933         if (blk_get_refcnt(blk) > 1) {
3934             error_setg(errp, "Block backend %s is in use", id);
3935             return;
3936         }
3937         bs = blk_bs(blk);
3938         aio_context = blk_get_aio_context(blk);
3939     } else {
3940         bs = bdrv_find_node(node_name);
3941         if (!bs) {
3942             error_setg(errp, "Cannot find node %s", node_name);
3943             return;
3944         }
3945         blk = bs->blk;
3946         if (blk) {
3947             error_setg(errp, "Node %s is in use by %s",
3948                        node_name, blk_name(blk));
3949             return;
3950         }
3951         aio_context = bdrv_get_aio_context(bs);
3952     }
3953 
3954     aio_context_acquire(aio_context);
3955 
3956     if (bs) {
3957         if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) {
3958             goto out;
3959         }
3960 
3961         if (!blk && !bs->monitor_list.tqe_prev) {
3962             error_setg(errp, "Node %s is not owned by the monitor",
3963                        bs->node_name);
3964             goto out;
3965         }
3966 
3967         if (bs->refcnt > 1) {
3968             error_setg(errp, "Block device %s is in use",
3969                        bdrv_get_device_or_node_name(bs));
3970             goto out;
3971         }
3972     }
3973 
3974     if (blk) {
3975         blk_unref(blk);
3976     } else {
3977         QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
3978         bdrv_unref(bs);
3979     }
3980 
3981 out:
3982     aio_context_release(aio_context);
3983 }
3984 
3985 BlockJobInfoList *qmp_query_block_jobs(Error **errp)
3986 {
3987     BlockJobInfoList *head = NULL, **p_next = &head;
3988     BlockDriverState *bs;
3989 
3990     for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) {
3991         AioContext *aio_context = bdrv_get_aio_context(bs);
3992 
3993         aio_context_acquire(aio_context);
3994 
3995         if (bs->job) {
3996             BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1);
3997             elem->value = block_job_query(bs->job);
3998             *p_next = elem;
3999             p_next = &elem->next;
4000         }
4001 
4002         aio_context_release(aio_context);
4003     }
4004 
4005     return head;
4006 }
4007 
4008 QemuOptsList qemu_common_drive_opts = {
4009     .name = "drive",
4010     .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
4011     .desc = {
4012         {
4013             .name = "snapshot",
4014             .type = QEMU_OPT_BOOL,
4015             .help = "enable/disable snapshot mode",
4016         },{
4017             .name = "discard",
4018             .type = QEMU_OPT_STRING,
4019             .help = "discard operation (ignore/off, unmap/on)",
4020         },{
4021             .name = "aio",
4022             .type = QEMU_OPT_STRING,
4023             .help = "host AIO implementation (threads, native)",
4024         },{
4025             .name = "format",
4026             .type = QEMU_OPT_STRING,
4027             .help = "disk format (raw, qcow2, ...)",
4028         },{
4029             .name = "rerror",
4030             .type = QEMU_OPT_STRING,
4031             .help = "read error action",
4032         },{
4033             .name = "werror",
4034             .type = QEMU_OPT_STRING,
4035             .help = "write error action",
4036         },{
4037             .name = "read-only",
4038             .type = QEMU_OPT_BOOL,
4039             .help = "open drive file as read-only",
4040         },{
4041             .name = "throttling.iops-total",
4042             .type = QEMU_OPT_NUMBER,
4043             .help = "limit total I/O operations per second",
4044         },{
4045             .name = "throttling.iops-read",
4046             .type = QEMU_OPT_NUMBER,
4047             .help = "limit read operations per second",
4048         },{
4049             .name = "throttling.iops-write",
4050             .type = QEMU_OPT_NUMBER,
4051             .help = "limit write operations per second",
4052         },{
4053             .name = "throttling.bps-total",
4054             .type = QEMU_OPT_NUMBER,
4055             .help = "limit total bytes per second",
4056         },{
4057             .name = "throttling.bps-read",
4058             .type = QEMU_OPT_NUMBER,
4059             .help = "limit read bytes per second",
4060         },{
4061             .name = "throttling.bps-write",
4062             .type = QEMU_OPT_NUMBER,
4063             .help = "limit write bytes per second",
4064         },{
4065             .name = "throttling.iops-total-max",
4066             .type = QEMU_OPT_NUMBER,
4067             .help = "I/O operations burst",
4068         },{
4069             .name = "throttling.iops-read-max",
4070             .type = QEMU_OPT_NUMBER,
4071             .help = "I/O operations read burst",
4072         },{
4073             .name = "throttling.iops-write-max",
4074             .type = QEMU_OPT_NUMBER,
4075             .help = "I/O operations write burst",
4076         },{
4077             .name = "throttling.bps-total-max",
4078             .type = QEMU_OPT_NUMBER,
4079             .help = "total bytes burst",
4080         },{
4081             .name = "throttling.bps-read-max",
4082             .type = QEMU_OPT_NUMBER,
4083             .help = "total bytes read burst",
4084         },{
4085             .name = "throttling.bps-write-max",
4086             .type = QEMU_OPT_NUMBER,
4087             .help = "total bytes write burst",
4088         },{
4089             .name = "throttling.iops-size",
4090             .type = QEMU_OPT_NUMBER,
4091             .help = "when limiting by iops max size of an I/O in bytes",
4092         },{
4093             .name = "throttling.group",
4094             .type = QEMU_OPT_STRING,
4095             .help = "name of the block throttling group",
4096         },{
4097             .name = "copy-on-read",
4098             .type = QEMU_OPT_BOOL,
4099             .help = "copy read data from backing file into image file",
4100         },{
4101             .name = "detect-zeroes",
4102             .type = QEMU_OPT_STRING,
4103             .help = "try to optimize zero writes (off, on, unmap)",
4104         },{
4105             .name = "stats-account-invalid",
4106             .type = QEMU_OPT_BOOL,
4107             .help = "whether to account for invalid I/O operations "
4108                     "in the statistics",
4109         },{
4110             .name = "stats-account-failed",
4111             .type = QEMU_OPT_BOOL,
4112             .help = "whether to account for failed I/O operations "
4113                     "in the statistics",
4114         },
4115         { /* end of list */ }
4116     },
4117 };
4118 
4119 static QemuOptsList qemu_root_bds_opts = {
4120     .name = "root-bds",
4121     .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
4122     .desc = {
4123         {
4124             .name = "discard",
4125             .type = QEMU_OPT_STRING,
4126             .help = "discard operation (ignore/off, unmap/on)",
4127         },{
4128             .name = "aio",
4129             .type = QEMU_OPT_STRING,
4130             .help = "host AIO implementation (threads, native)",
4131         },{
4132             .name = "read-only",
4133             .type = QEMU_OPT_BOOL,
4134             .help = "open drive file as read-only",
4135         },{
4136             .name = "copy-on-read",
4137             .type = QEMU_OPT_BOOL,
4138             .help = "copy read data from backing file into image file",
4139         },{
4140             .name = "detect-zeroes",
4141             .type = QEMU_OPT_STRING,
4142             .help = "try to optimize zero writes (off, on, unmap)",
4143         },
4144         { /* end of list */ }
4145     },
4146 };
4147 
4148 QemuOptsList qemu_drive_opts = {
4149     .name = "drive",
4150     .head = QTAILQ_HEAD_INITIALIZER(qemu_drive_opts.head),
4151     .desc = {
4152         /*
4153          * no elements => accept any params
4154          * validation will happen later
4155          */
4156         { /* end of list */ }
4157     },
4158 };
4159