xref: /openbmc/qemu/block/qapi-sysemu.c (revision 99b16e8e)
1 /*
2  * QMP command handlers specific to the system emulators
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or
7  * later.  See the COPYING file in the top-level directory.
8  *
9  * This file incorporates work covered by the following copyright and
10  * permission notice:
11  *
12  * Copyright (c) 2003-2008 Fabrice Bellard
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this software and associated documentation files (the "Software"), to deal
16  * in the Software without restriction, including without limitation the rights
17  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18  * copies of the Software, and to permit persons to whom the Software is
19  * furnished to do so, subject to the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30  * THE SOFTWARE.
31  */
32 
33 #include "qemu/osdep.h"
34 
35 #include "block/block_int.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-commands-block.h"
38 #include "qapi/qmp/qdict.h"
39 #include "sysemu/block-backend.h"
40 #include "sysemu/blockdev.h"
41 
42 static BlockBackend *qmp_get_blk(const char *blk_name, const char *qdev_id,
43                                  Error **errp)
44 {
45     BlockBackend *blk;
46 
47     if (!blk_name == !qdev_id) {
48         error_setg(errp, "Need exactly one of 'device' and 'id'");
49         return NULL;
50     }
51 
52     if (qdev_id) {
53         blk = blk_by_qdev_id(qdev_id, errp);
54     } else {
55         blk = blk_by_name(blk_name);
56         if (blk == NULL) {
57             error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
58                       "Device '%s' not found", blk_name);
59         }
60     }
61 
62     return blk;
63 }
64 
65 /*
66  * Attempt to open the tray of @device.
67  * If @force, ignore its tray lock.
68  * Else, if the tray is locked, don't open it, but ask the guest to open it.
69  * On error, store an error through @errp and return -errno.
70  * If @device does not exist, return -ENODEV.
71  * If it has no removable media, return -ENOTSUP.
72  * If it has no tray, return -ENOSYS.
73  * If the guest was asked to open the tray, return -EINPROGRESS.
74  * Else, return 0.
75  */
76 static int do_open_tray(const char *blk_name, const char *qdev_id,
77                         bool force, Error **errp)
78 {
79     BlockBackend *blk;
80     const char *device = qdev_id ?: blk_name;
81     bool locked;
82 
83     blk = qmp_get_blk(blk_name, qdev_id, errp);
84     if (!blk) {
85         return -ENODEV;
86     }
87 
88     if (!blk_dev_has_removable_media(blk)) {
89         error_setg(errp, "Device '%s' is not removable", device);
90         return -ENOTSUP;
91     }
92 
93     if (!blk_dev_has_tray(blk)) {
94         error_setg(errp, "Device '%s' does not have a tray", device);
95         return -ENOSYS;
96     }
97 
98     if (blk_dev_is_tray_open(blk)) {
99         return 0;
100     }
101 
102     locked = blk_dev_is_medium_locked(blk);
103     if (locked) {
104         blk_dev_eject_request(blk, force);
105     }
106 
107     if (!locked || force) {
108         blk_dev_change_media_cb(blk, false, &error_abort);
109     }
110 
111     if (locked && !force) {
112         error_setg(errp, "Device '%s' is locked and force was not specified, "
113                    "wait for tray to open and try again", device);
114         return -EINPROGRESS;
115     }
116 
117     return 0;
118 }
119 
120 void qmp_blockdev_open_tray(const char *device,
121                             const char *id,
122                             bool has_force, bool force,
123                             Error **errp)
124 {
125     Error *local_err = NULL;
126     int rc;
127 
128     if (!has_force) {
129         force = false;
130     }
131     rc = do_open_tray(device, id, force, &local_err);
132     if (rc && rc != -ENOSYS && rc != -EINPROGRESS) {
133         error_propagate(errp, local_err);
134         return;
135     }
136     error_free(local_err);
137 }
138 
139 void qmp_blockdev_close_tray(const char *device,
140                              const char *id,
141                              Error **errp)
142 {
143     BlockBackend *blk;
144     Error *local_err = NULL;
145 
146     blk = qmp_get_blk(device, id, errp);
147     if (!blk) {
148         return;
149     }
150 
151     if (!blk_dev_has_removable_media(blk)) {
152         error_setg(errp, "Device '%s' is not removable", device ?: id);
153         return;
154     }
155 
156     if (!blk_dev_has_tray(blk)) {
157         /* Ignore this command on tray-less devices */
158         return;
159     }
160 
161     if (!blk_dev_is_tray_open(blk)) {
162         return;
163     }
164 
165     blk_dev_change_media_cb(blk, true, &local_err);
166     if (local_err) {
167         error_propagate(errp, local_err);
168         return;
169     }
170 }
171 
172 static void GRAPH_UNLOCKED
173 blockdev_remove_medium(const char *device, const char *id, Error **errp)
174 {
175     BlockBackend *blk;
176     BlockDriverState *bs;
177     AioContext *aio_context;
178     bool has_attached_device;
179 
180     GLOBAL_STATE_CODE();
181 
182     blk = qmp_get_blk(device, id, errp);
183     if (!blk) {
184         return;
185     }
186 
187     /* For BBs without a device, we can exchange the BDS tree at will */
188     has_attached_device = blk_get_attached_dev(blk);
189 
190     if (has_attached_device && !blk_dev_has_removable_media(blk)) {
191         error_setg(errp, "Device '%s' is not removable", device ?: id);
192         return;
193     }
194 
195     if (has_attached_device && blk_dev_has_tray(blk) &&
196         !blk_dev_is_tray_open(blk))
197     {
198         error_setg(errp, "Tray of device '%s' is not open", device ?: id);
199         return;
200     }
201 
202     bs = blk_bs(blk);
203     if (!bs) {
204         return;
205     }
206 
207     aio_context = bdrv_get_aio_context(bs);
208     aio_context_acquire(aio_context);
209 
210     bdrv_graph_rdlock_main_loop();
211     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
212         bdrv_graph_rdunlock_main_loop();
213         goto out;
214     }
215     bdrv_graph_rdunlock_main_loop();
216 
217     blk_remove_bs(blk);
218 
219     if (!blk_dev_has_tray(blk)) {
220         /* For tray-less devices, blockdev-open-tray is a no-op (or may not be
221          * called at all); therefore, the medium needs to be ejected here.
222          * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load
223          * value passed here (i.e. false). */
224         blk_dev_change_media_cb(blk, false, &error_abort);
225     }
226 
227 out:
228     aio_context_release(aio_context);
229 }
230 
231 void qmp_blockdev_remove_medium(const char *id, Error **errp)
232 {
233     blockdev_remove_medium(NULL, id, errp);
234 }
235 
236 static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
237                                             BlockDriverState *bs, Error **errp)
238 {
239     Error *local_err = NULL;
240     AioContext *ctx;
241     bool has_device;
242     int ret;
243 
244     /* For BBs without a device, we can exchange the BDS tree at will */
245     has_device = blk_get_attached_dev(blk);
246 
247     if (has_device && !blk_dev_has_removable_media(blk)) {
248         error_setg(errp, "Device is not removable");
249         return;
250     }
251 
252     if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
253         error_setg(errp, "Tray of the device is not open");
254         return;
255     }
256 
257     if (blk_bs(blk)) {
258         error_setg(errp, "There already is a medium in the device");
259         return;
260     }
261 
262     ctx = bdrv_get_aio_context(bs);
263     aio_context_acquire(ctx);
264     ret = blk_insert_bs(blk, bs, errp);
265     aio_context_release(ctx);
266 
267     if (ret < 0) {
268         return;
269     }
270 
271     if (!blk_dev_has_tray(blk)) {
272         /* For tray-less devices, blockdev-close-tray is a no-op (or may not be
273          * called at all); therefore, the medium needs to be pushed into the
274          * slot here.
275          * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load
276          * value passed here (i.e. true). */
277         blk_dev_change_media_cb(blk, true, &local_err);
278         if (local_err) {
279             error_propagate(errp, local_err);
280             blk_remove_bs(blk);
281             return;
282         }
283     }
284 }
285 
286 static void blockdev_insert_medium(const char *device, const char *id,
287                                    const char *node_name, Error **errp)
288 {
289     BlockBackend *blk;
290     BlockDriverState *bs;
291 
292     GRAPH_RDLOCK_GUARD_MAINLOOP();
293 
294     blk = qmp_get_blk(device, id, errp);
295     if (!blk) {
296         return;
297     }
298 
299     bs = bdrv_find_node(node_name);
300     if (!bs) {
301         error_setg(errp, "Node '%s' not found", node_name);
302         return;
303     }
304 
305     if (bdrv_has_blk(bs)) {
306         error_setg(errp, "Node '%s' is already in use", node_name);
307         return;
308     }
309 
310     qmp_blockdev_insert_anon_medium(blk, bs, errp);
311 }
312 
313 void qmp_blockdev_insert_medium(const char *id, const char *node_name,
314                                 Error **errp)
315 {
316     blockdev_insert_medium(NULL, id, node_name, errp);
317 }
318 
319 void qmp_blockdev_change_medium(const char *device,
320                                 const char *id,
321                                 const char *filename,
322                                 const char *format,
323                                 bool has_force, bool force,
324                                 bool has_read_only,
325                                 BlockdevChangeReadOnlyMode read_only,
326                                 Error **errp)
327 {
328     BlockBackend *blk;
329     BlockDriverState *medium_bs = NULL;
330     int bdrv_flags;
331     bool detect_zeroes;
332     int rc;
333     QDict *options = NULL;
334     Error *err = NULL;
335 
336     blk = qmp_get_blk(device, id, errp);
337     if (!blk) {
338         goto fail;
339     }
340 
341     if (blk_bs(blk)) {
342         blk_update_root_state(blk);
343     }
344 
345     bdrv_flags = blk_get_open_flags_from_root_state(blk);
346     bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING |
347         BDRV_O_PROTOCOL | BDRV_O_AUTO_RDONLY);
348 
349     if (!has_read_only) {
350         read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN;
351     }
352 
353     switch (read_only) {
354     case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN:
355         break;
356 
357     case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY:
358         bdrv_flags &= ~BDRV_O_RDWR;
359         break;
360 
361     case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE:
362         bdrv_flags |= BDRV_O_RDWR;
363         break;
364 
365     default:
366         abort();
367     }
368 
369     options = qdict_new();
370     detect_zeroes = blk_get_detect_zeroes_from_root_state(blk);
371     qdict_put_str(options, "detect-zeroes", detect_zeroes ? "on" : "off");
372 
373     if (format) {
374         qdict_put_str(options, "driver", format);
375     }
376 
377     aio_context_acquire(qemu_get_aio_context());
378     medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp);
379     aio_context_release(qemu_get_aio_context());
380 
381     if (!medium_bs) {
382         goto fail;
383     }
384 
385     rc = do_open_tray(device, id, force, &err);
386     if (rc && rc != -ENOSYS) {
387         error_propagate(errp, err);
388         goto fail;
389     }
390     error_free(err);
391     err = NULL;
392 
393     blockdev_remove_medium(device, id, &err);
394     if (err) {
395         error_propagate(errp, err);
396         goto fail;
397     }
398 
399     qmp_blockdev_insert_anon_medium(blk, medium_bs, &err);
400     if (err) {
401         error_propagate(errp, err);
402         goto fail;
403     }
404 
405     qmp_blockdev_close_tray(device, id, errp);
406 
407 fail:
408     /* If the medium has been inserted, the device has its own reference, so
409      * ours must be relinquished; and if it has not been inserted successfully,
410      * the reference must be relinquished anyway */
411     bdrv_unref(medium_bs);
412 }
413 
414 void qmp_eject(const char *device, const char *id,
415                bool has_force, bool force, Error **errp)
416 {
417     Error *local_err = NULL;
418     int rc;
419 
420     if (!has_force) {
421         force = false;
422     }
423 
424     rc = do_open_tray(device, id, force, &local_err);
425     if (rc && rc != -ENOSYS) {
426         error_propagate(errp, local_err);
427         return;
428     }
429     error_free(local_err);
430 
431     blockdev_remove_medium(device, id, errp);
432 }
433 
434 /* throttling disk I/O limits */
435 void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
436 {
437     ThrottleConfig cfg;
438     BlockDriverState *bs;
439     BlockBackend *blk;
440     AioContext *aio_context;
441 
442     blk = qmp_get_blk(arg->device, arg->id, errp);
443     if (!blk) {
444         return;
445     }
446 
447     aio_context = blk_get_aio_context(blk);
448     aio_context_acquire(aio_context);
449 
450     bs = blk_bs(blk);
451     if (!bs) {
452         error_setg(errp, "Device has no medium");
453         goto out;
454     }
455 
456     throttle_config_init(&cfg);
457     cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps;
458     cfg.buckets[THROTTLE_BPS_READ].avg  = arg->bps_rd;
459     cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr;
460 
461     cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops;
462     cfg.buckets[THROTTLE_OPS_READ].avg  = arg->iops_rd;
463     cfg.buckets[THROTTLE_OPS_WRITE].avg = arg->iops_wr;
464 
465     if (arg->has_bps_max) {
466         cfg.buckets[THROTTLE_BPS_TOTAL].max = arg->bps_max;
467     }
468     if (arg->has_bps_rd_max) {
469         cfg.buckets[THROTTLE_BPS_READ].max = arg->bps_rd_max;
470     }
471     if (arg->has_bps_wr_max) {
472         cfg.buckets[THROTTLE_BPS_WRITE].max = arg->bps_wr_max;
473     }
474     if (arg->has_iops_max) {
475         cfg.buckets[THROTTLE_OPS_TOTAL].max = arg->iops_max;
476     }
477     if (arg->has_iops_rd_max) {
478         cfg.buckets[THROTTLE_OPS_READ].max = arg->iops_rd_max;
479     }
480     if (arg->has_iops_wr_max) {
481         cfg.buckets[THROTTLE_OPS_WRITE].max = arg->iops_wr_max;
482     }
483 
484     if (arg->has_bps_max_length) {
485         cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_max_length;
486     }
487     if (arg->has_bps_rd_max_length) {
488         cfg.buckets[THROTTLE_BPS_READ].burst_length = arg->bps_rd_max_length;
489     }
490     if (arg->has_bps_wr_max_length) {
491         cfg.buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_wr_max_length;
492     }
493     if (arg->has_iops_max_length) {
494         cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_max_length;
495     }
496     if (arg->has_iops_rd_max_length) {
497         cfg.buckets[THROTTLE_OPS_READ].burst_length = arg->iops_rd_max_length;
498     }
499     if (arg->has_iops_wr_max_length) {
500         cfg.buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_wr_max_length;
501     }
502 
503     if (arg->has_iops_size) {
504         cfg.op_size = arg->iops_size;
505     }
506 
507     if (!throttle_is_valid(&cfg, errp)) {
508         goto out;
509     }
510 
511     if (throttle_enabled(&cfg)) {
512         /* Enable I/O limits if they're not enabled yet, otherwise
513          * just update the throttling group. */
514         if (!blk_get_public(blk)->throttle_group_member.throttle_state) {
515             blk_io_limits_enable(blk, arg->group ?: arg->device ?: arg->id);
516         } else if (arg->group) {
517             blk_io_limits_update_group(blk, arg->group);
518         }
519         /* Set the new throttling configuration */
520         blk_set_io_limits(blk, &cfg);
521     } else if (blk_get_public(blk)->throttle_group_member.throttle_state) {
522         /* If all throttling settings are set to 0, disable I/O limits */
523         blk_io_limits_disable(blk);
524     }
525 
526 out:
527     aio_context_release(aio_context);
528 }
529 
530 void qmp_block_latency_histogram_set(
531     const char *id,
532     bool has_boundaries, uint64List *boundaries,
533     bool has_boundaries_read, uint64List *boundaries_read,
534     bool has_boundaries_write, uint64List *boundaries_write,
535     bool has_boundaries_append, uint64List *boundaries_append,
536     bool has_boundaries_flush, uint64List *boundaries_flush,
537     Error **errp)
538 {
539     BlockBackend *blk = qmp_get_blk(NULL, id, errp);
540     BlockAcctStats *stats;
541     int ret;
542 
543     if (!blk) {
544         return;
545     }
546 
547     stats = blk_get_stats(blk);
548 
549     if (!has_boundaries && !has_boundaries_read && !has_boundaries_write &&
550         !has_boundaries_flush)
551     {
552         block_latency_histograms_clear(stats);
553         return;
554     }
555 
556     if (has_boundaries || has_boundaries_read) {
557         ret = block_latency_histogram_set(
558             stats, BLOCK_ACCT_READ,
559             has_boundaries_read ? boundaries_read : boundaries);
560         if (ret) {
561             error_setg(errp, "Device '%s' set read boundaries fail", id);
562             return;
563         }
564     }
565 
566     if (has_boundaries || has_boundaries_write) {
567         ret = block_latency_histogram_set(
568             stats, BLOCK_ACCT_WRITE,
569             has_boundaries_write ? boundaries_write : boundaries);
570         if (ret) {
571             error_setg(errp, "Device '%s' set write boundaries fail", id);
572             return;
573         }
574     }
575 
576     if (has_boundaries || has_boundaries_append) {
577         ret = block_latency_histogram_set(
578                 stats, BLOCK_ACCT_ZONE_APPEND,
579                 has_boundaries_append ? boundaries_append : boundaries);
580         if (ret) {
581             error_setg(errp, "Device '%s' set append write boundaries fail", id);
582             return;
583         }
584     }
585 
586     if (has_boundaries || has_boundaries_flush) {
587         ret = block_latency_histogram_set(
588             stats, BLOCK_ACCT_FLUSH,
589             has_boundaries_flush ? boundaries_flush : boundaries);
590         if (ret) {
591             error_setg(errp, "Device '%s' set flush boundaries fail", id);
592             return;
593         }
594     }
595 }
596