1 /* 2 * Block layer qmp and info dump related functions 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/cutils.h" 27 #include "block/qapi.h" 28 #include "block/block_int.h" 29 #include "block/throttle-groups.h" 30 #include "block/write-threshold.h" 31 #include "qapi/error.h" 32 #include "qapi/qapi-commands-block-core.h" 33 #include "qapi/qobject-output-visitor.h" 34 #include "qapi/qapi-visit-block-core.h" 35 #include "qapi/qmp/qbool.h" 36 #include "qapi/qmp/qdict.h" 37 #include "qapi/qmp/qlist.h" 38 #include "qapi/qmp/qnum.h" 39 #include "qapi/qmp/qstring.h" 40 #include "qemu/qemu-print.h" 41 #include "sysemu/block-backend.h" 42 #include "qemu/cutils.h" 43 44 BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, 45 BlockDriverState *bs, 46 bool flat, 47 Error **errp) 48 { 49 ImageInfo **p_image_info; 50 BlockDriverState *bs0; 51 BlockDeviceInfo *info; 52 53 if (!bs->drv) { 54 error_setg(errp, "Block device %s is ejected", bs->node_name); 55 return NULL; 56 } 57 58 bdrv_refresh_filename(bs); 59 60 info = g_malloc0(sizeof(*info)); 61 info->file = g_strdup(bs->filename); 62 info->ro = bs->read_only; 63 info->drv = g_strdup(bs->drv->format_name); 64 info->encrypted = bs->encrypted; 65 info->encryption_key_missing = false; 66 67 info->cache = g_new(BlockdevCacheInfo, 1); 68 *info->cache = (BlockdevCacheInfo) { 69 .writeback = blk ? blk_enable_write_cache(blk) : true, 70 .direct = !!(bs->open_flags & BDRV_O_NOCACHE), 71 .no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH), 72 }; 73 74 if (bs->node_name[0]) { 75 info->has_node_name = true; 76 info->node_name = g_strdup(bs->node_name); 77 } 78 79 if (bs->backing_file[0]) { 80 info->has_backing_file = true; 81 info->backing_file = g_strdup(bs->backing_file); 82 } 83 84 if (!QLIST_EMPTY(&bs->dirty_bitmaps)) { 85 info->has_dirty_bitmaps = true; 86 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs); 87 } 88 89 info->detect_zeroes = bs->detect_zeroes; 90 91 if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) { 92 ThrottleConfig cfg; 93 BlockBackendPublic *blkp = blk_get_public(blk); 94 95 throttle_group_get_config(&blkp->throttle_group_member, &cfg); 96 97 info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg; 98 info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg; 99 info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg; 100 101 info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg; 102 info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg; 103 info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg; 104 105 info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max; 106 info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max; 107 info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max; 108 info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max; 109 info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max; 110 info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max; 111 112 info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max; 113 info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max; 114 info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max; 115 info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max; 116 info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max; 117 info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max; 118 119 info->has_bps_max_length = info->has_bps_max; 120 info->bps_max_length = 121 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length; 122 info->has_bps_rd_max_length = info->has_bps_rd_max; 123 info->bps_rd_max_length = 124 cfg.buckets[THROTTLE_BPS_READ].burst_length; 125 info->has_bps_wr_max_length = info->has_bps_wr_max; 126 info->bps_wr_max_length = 127 cfg.buckets[THROTTLE_BPS_WRITE].burst_length; 128 129 info->has_iops_max_length = info->has_iops_max; 130 info->iops_max_length = 131 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length; 132 info->has_iops_rd_max_length = info->has_iops_rd_max; 133 info->iops_rd_max_length = 134 cfg.buckets[THROTTLE_OPS_READ].burst_length; 135 info->has_iops_wr_max_length = info->has_iops_wr_max; 136 info->iops_wr_max_length = 137 cfg.buckets[THROTTLE_OPS_WRITE].burst_length; 138 139 info->has_iops_size = cfg.op_size; 140 info->iops_size = cfg.op_size; 141 142 info->has_group = true; 143 info->group = 144 g_strdup(throttle_group_get_name(&blkp->throttle_group_member)); 145 } 146 147 info->write_threshold = bdrv_write_threshold_get(bs); 148 149 bs0 = bs; 150 p_image_info = &info->image; 151 info->backing_file_depth = 0; 152 while (1) { 153 Error *local_err = NULL; 154 bdrv_query_image_info(bs0, p_image_info, &local_err); 155 if (local_err) { 156 error_propagate(errp, local_err); 157 qapi_free_BlockDeviceInfo(info); 158 return NULL; 159 } 160 161 /* stop gathering data for flat output */ 162 if (flat) { 163 break; 164 } 165 166 if (bs0->drv && bs0->backing) { 167 info->backing_file_depth++; 168 bs0 = bs0->backing->bs; 169 (*p_image_info)->has_backing_image = true; 170 p_image_info = &((*p_image_info)->backing_image); 171 } else { 172 break; 173 } 174 175 /* Skip automatically inserted nodes that the user isn't aware of for 176 * query-block (blk != NULL), but not for query-named-block-nodes */ 177 while (blk && bs0->drv && bs0->implicit) { 178 bs0 = backing_bs(bs0); 179 assert(bs0); 180 } 181 } 182 183 return info; 184 } 185 186 /* 187 * Returns 0 on success, with *p_list either set to describe snapshot 188 * information, or NULL because there are no snapshots. Returns -errno on 189 * error, with *p_list untouched. 190 */ 191 int bdrv_query_snapshot_info_list(BlockDriverState *bs, 192 SnapshotInfoList **p_list, 193 Error **errp) 194 { 195 int i, sn_count; 196 QEMUSnapshotInfo *sn_tab = NULL; 197 SnapshotInfoList *info_list, *cur_item = NULL, *head = NULL; 198 SnapshotInfo *info; 199 200 sn_count = bdrv_snapshot_list(bs, &sn_tab); 201 if (sn_count < 0) { 202 const char *dev = bdrv_get_device_name(bs); 203 switch (sn_count) { 204 case -ENOMEDIUM: 205 error_setg(errp, "Device '%s' is not inserted", dev); 206 break; 207 case -ENOTSUP: 208 error_setg(errp, 209 "Device '%s' does not support internal snapshots", 210 dev); 211 break; 212 default: 213 error_setg_errno(errp, -sn_count, 214 "Can't list snapshots of device '%s'", dev); 215 break; 216 } 217 return sn_count; 218 } 219 220 for (i = 0; i < sn_count; i++) { 221 info = g_new0(SnapshotInfo, 1); 222 info->id = g_strdup(sn_tab[i].id_str); 223 info->name = g_strdup(sn_tab[i].name); 224 info->vm_state_size = sn_tab[i].vm_state_size; 225 info->date_sec = sn_tab[i].date_sec; 226 info->date_nsec = sn_tab[i].date_nsec; 227 info->vm_clock_sec = sn_tab[i].vm_clock_nsec / 1000000000; 228 info->vm_clock_nsec = sn_tab[i].vm_clock_nsec % 1000000000; 229 230 info_list = g_new0(SnapshotInfoList, 1); 231 info_list->value = info; 232 233 /* XXX: waiting for the qapi to support qemu-queue.h types */ 234 if (!cur_item) { 235 head = cur_item = info_list; 236 } else { 237 cur_item->next = info_list; 238 cur_item = info_list; 239 } 240 241 } 242 243 g_free(sn_tab); 244 *p_list = head; 245 return 0; 246 } 247 248 /** 249 * bdrv_query_image_info: 250 * @bs: block device to examine 251 * @p_info: location to store image information 252 * @errp: location to store error information 253 * 254 * Store "flat" image information in @p_info. 255 * 256 * "Flat" means it does *not* query backing image information, 257 * i.e. (*pinfo)->has_backing_image will be set to false and 258 * (*pinfo)->backing_image to NULL even when the image does in fact have 259 * a backing image. 260 * 261 * @p_info will be set only on success. On error, store error in @errp. 262 */ 263 void bdrv_query_image_info(BlockDriverState *bs, 264 ImageInfo **p_info, 265 Error **errp) 266 { 267 int64_t size; 268 const char *backing_filename; 269 BlockDriverInfo bdi; 270 int ret; 271 Error *err = NULL; 272 ImageInfo *info; 273 274 aio_context_acquire(bdrv_get_aio_context(bs)); 275 276 size = bdrv_getlength(bs); 277 if (size < 0) { 278 error_setg_errno(errp, -size, "Can't get image size '%s'", 279 bs->exact_filename); 280 goto out; 281 } 282 283 bdrv_refresh_filename(bs); 284 285 info = g_new0(ImageInfo, 1); 286 info->filename = g_strdup(bs->filename); 287 info->format = g_strdup(bdrv_get_format_name(bs)); 288 info->virtual_size = size; 289 info->actual_size = bdrv_get_allocated_file_size(bs); 290 info->has_actual_size = info->actual_size >= 0; 291 if (bdrv_is_encrypted(bs)) { 292 info->encrypted = true; 293 info->has_encrypted = true; 294 } 295 if (bdrv_get_info(bs, &bdi) >= 0) { 296 if (bdi.cluster_size != 0) { 297 info->cluster_size = bdi.cluster_size; 298 info->has_cluster_size = true; 299 } 300 info->dirty_flag = bdi.is_dirty; 301 info->has_dirty_flag = true; 302 } 303 info->format_specific = bdrv_get_specific_info(bs, &err); 304 if (err) { 305 error_propagate(errp, err); 306 qapi_free_ImageInfo(info); 307 goto out; 308 } 309 info->has_format_specific = info->format_specific != NULL; 310 311 backing_filename = bs->backing_file; 312 if (backing_filename[0] != '\0') { 313 char *backing_filename2; 314 info->backing_filename = g_strdup(backing_filename); 315 info->has_backing_filename = true; 316 backing_filename2 = bdrv_get_full_backing_filename(bs, NULL); 317 318 /* Always report the full_backing_filename if present, even if it's the 319 * same as backing_filename. That they are same is useful info. */ 320 if (backing_filename2) { 321 info->full_backing_filename = g_strdup(backing_filename2); 322 info->has_full_backing_filename = true; 323 } 324 325 if (bs->backing_format[0]) { 326 info->backing_filename_format = g_strdup(bs->backing_format); 327 info->has_backing_filename_format = true; 328 } 329 g_free(backing_filename2); 330 } 331 332 ret = bdrv_query_snapshot_info_list(bs, &info->snapshots, &err); 333 switch (ret) { 334 case 0: 335 if (info->snapshots) { 336 info->has_snapshots = true; 337 } 338 break; 339 /* recoverable error */ 340 case -ENOMEDIUM: 341 case -ENOTSUP: 342 error_free(err); 343 break; 344 default: 345 error_propagate(errp, err); 346 qapi_free_ImageInfo(info); 347 goto out; 348 } 349 350 *p_info = info; 351 352 out: 353 aio_context_release(bdrv_get_aio_context(bs)); 354 } 355 356 /* @p_info will be set only on success. */ 357 static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, 358 Error **errp) 359 { 360 BlockInfo *info = g_malloc0(sizeof(*info)); 361 BlockDriverState *bs = blk_bs(blk); 362 char *qdev; 363 364 /* Skip automatically inserted nodes that the user isn't aware of */ 365 while (bs && bs->drv && bs->implicit) { 366 bs = backing_bs(bs); 367 } 368 369 info->device = g_strdup(blk_name(blk)); 370 info->type = g_strdup("unknown"); 371 info->locked = blk_dev_is_medium_locked(blk); 372 info->removable = blk_dev_has_removable_media(blk); 373 374 qdev = blk_get_attached_dev_id(blk); 375 if (qdev && *qdev) { 376 info->has_qdev = true; 377 info->qdev = qdev; 378 } else { 379 g_free(qdev); 380 } 381 382 if (blk_dev_has_tray(blk)) { 383 info->has_tray_open = true; 384 info->tray_open = blk_dev_is_tray_open(blk); 385 } 386 387 if (blk_iostatus_is_enabled(blk)) { 388 info->has_io_status = true; 389 info->io_status = blk_iostatus(blk); 390 } 391 392 if (bs && !QLIST_EMPTY(&bs->dirty_bitmaps)) { 393 info->has_dirty_bitmaps = true; 394 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs); 395 } 396 397 if (bs && bs->drv) { 398 info->has_inserted = true; 399 info->inserted = bdrv_block_device_info(blk, bs, false, errp); 400 if (info->inserted == NULL) { 401 goto err; 402 } 403 } 404 405 *p_info = info; 406 return; 407 408 err: 409 qapi_free_BlockInfo(info); 410 } 411 412 static uint64List *uint64_list(uint64_t *list, int size) 413 { 414 int i; 415 uint64List *out_list = NULL; 416 uint64List **pout_list = &out_list; 417 418 for (i = 0; i < size; i++) { 419 uint64List *entry = g_new(uint64List, 1); 420 entry->value = list[i]; 421 *pout_list = entry; 422 pout_list = &entry->next; 423 } 424 425 *pout_list = NULL; 426 427 return out_list; 428 } 429 430 static void bdrv_latency_histogram_stats(BlockLatencyHistogram *hist, 431 bool *not_null, 432 BlockLatencyHistogramInfo **info) 433 { 434 *not_null = hist->bins != NULL; 435 if (*not_null) { 436 *info = g_new0(BlockLatencyHistogramInfo, 1); 437 438 (*info)->boundaries = uint64_list(hist->boundaries, hist->nbins - 1); 439 (*info)->bins = uint64_list(hist->bins, hist->nbins); 440 } 441 } 442 443 static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) 444 { 445 BlockAcctStats *stats = blk_get_stats(blk); 446 BlockAcctTimedStats *ts = NULL; 447 448 ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ]; 449 ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE]; 450 ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP]; 451 ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ]; 452 ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE]; 453 ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP]; 454 455 ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ]; 456 ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE]; 457 ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH]; 458 ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP]; 459 460 ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ]; 461 ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE]; 462 ds->invalid_flush_operations = 463 stats->invalid_ops[BLOCK_ACCT_FLUSH]; 464 ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP]; 465 466 ds->rd_merged = stats->merged[BLOCK_ACCT_READ]; 467 ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE]; 468 ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP]; 469 ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH]; 470 ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE]; 471 ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ]; 472 ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH]; 473 ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP]; 474 475 ds->has_idle_time_ns = stats->last_access_time_ns > 0; 476 if (ds->has_idle_time_ns) { 477 ds->idle_time_ns = block_acct_idle_time_ns(stats); 478 } 479 480 ds->account_invalid = stats->account_invalid; 481 ds->account_failed = stats->account_failed; 482 483 while ((ts = block_acct_interval_next(stats, ts))) { 484 BlockDeviceTimedStatsList *timed_stats = 485 g_malloc0(sizeof(*timed_stats)); 486 BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats)); 487 timed_stats->next = ds->timed_stats; 488 timed_stats->value = dev_stats; 489 ds->timed_stats = timed_stats; 490 491 TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ]; 492 TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE]; 493 TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH]; 494 495 dev_stats->interval_length = ts->interval_length; 496 497 dev_stats->min_rd_latency_ns = timed_average_min(rd); 498 dev_stats->max_rd_latency_ns = timed_average_max(rd); 499 dev_stats->avg_rd_latency_ns = timed_average_avg(rd); 500 501 dev_stats->min_wr_latency_ns = timed_average_min(wr); 502 dev_stats->max_wr_latency_ns = timed_average_max(wr); 503 dev_stats->avg_wr_latency_ns = timed_average_avg(wr); 504 505 dev_stats->min_flush_latency_ns = timed_average_min(fl); 506 dev_stats->max_flush_latency_ns = timed_average_max(fl); 507 dev_stats->avg_flush_latency_ns = timed_average_avg(fl); 508 509 dev_stats->avg_rd_queue_depth = 510 block_acct_queue_depth(ts, BLOCK_ACCT_READ); 511 dev_stats->avg_wr_queue_depth = 512 block_acct_queue_depth(ts, BLOCK_ACCT_WRITE); 513 } 514 515 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_READ], 516 &ds->has_rd_latency_histogram, 517 &ds->rd_latency_histogram); 518 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_WRITE], 519 &ds->has_wr_latency_histogram, 520 &ds->wr_latency_histogram); 521 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_FLUSH], 522 &ds->has_flush_latency_histogram, 523 &ds->flush_latency_histogram); 524 } 525 526 static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs, 527 bool blk_level) 528 { 529 BlockStats *s = NULL; 530 531 s = g_malloc0(sizeof(*s)); 532 s->stats = g_malloc0(sizeof(*s->stats)); 533 534 if (!bs) { 535 return s; 536 } 537 538 /* Skip automatically inserted nodes that the user isn't aware of in 539 * a BlockBackend-level command. Stay at the exact node for a node-level 540 * command. */ 541 while (blk_level && bs->drv && bs->implicit) { 542 bs = backing_bs(bs); 543 assert(bs); 544 } 545 546 if (bdrv_get_node_name(bs)[0]) { 547 s->has_node_name = true; 548 s->node_name = g_strdup(bdrv_get_node_name(bs)); 549 } 550 551 s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset); 552 553 s->driver_specific = bdrv_get_specific_stats(bs); 554 if (s->driver_specific) { 555 s->has_driver_specific = true; 556 } 557 558 if (bs->file) { 559 s->has_parent = true; 560 s->parent = bdrv_query_bds_stats(bs->file->bs, blk_level); 561 } 562 563 if (blk_level && bs->backing) { 564 s->has_backing = true; 565 s->backing = bdrv_query_bds_stats(bs->backing->bs, blk_level); 566 } 567 568 return s; 569 } 570 571 BlockInfoList *qmp_query_block(Error **errp) 572 { 573 BlockInfoList *head = NULL, **p_next = &head; 574 BlockBackend *blk; 575 Error *local_err = NULL; 576 577 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) { 578 BlockInfoList *info; 579 580 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) { 581 continue; 582 } 583 584 info = g_malloc0(sizeof(*info)); 585 bdrv_query_info(blk, &info->value, &local_err); 586 if (local_err) { 587 error_propagate(errp, local_err); 588 g_free(info); 589 qapi_free_BlockInfoList(head); 590 return NULL; 591 } 592 593 *p_next = info; 594 p_next = &info->next; 595 } 596 597 return head; 598 } 599 600 BlockStatsList *qmp_query_blockstats(bool has_query_nodes, 601 bool query_nodes, 602 Error **errp) 603 { 604 BlockStatsList *head = NULL, **p_next = &head; 605 BlockBackend *blk; 606 BlockDriverState *bs; 607 608 /* Just to be safe if query_nodes is not always initialized */ 609 if (has_query_nodes && query_nodes) { 610 for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) { 611 BlockStatsList *info = g_malloc0(sizeof(*info)); 612 AioContext *ctx = bdrv_get_aio_context(bs); 613 614 aio_context_acquire(ctx); 615 info->value = bdrv_query_bds_stats(bs, false); 616 aio_context_release(ctx); 617 618 *p_next = info; 619 p_next = &info->next; 620 } 621 } else { 622 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) { 623 BlockStatsList *info; 624 AioContext *ctx = blk_get_aio_context(blk); 625 BlockStats *s; 626 char *qdev; 627 628 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) { 629 continue; 630 } 631 632 aio_context_acquire(ctx); 633 s = bdrv_query_bds_stats(blk_bs(blk), true); 634 s->has_device = true; 635 s->device = g_strdup(blk_name(blk)); 636 637 qdev = blk_get_attached_dev_id(blk); 638 if (qdev && *qdev) { 639 s->has_qdev = true; 640 s->qdev = qdev; 641 } else { 642 g_free(qdev); 643 } 644 645 bdrv_query_blk_stats(s->stats, blk); 646 aio_context_release(ctx); 647 648 info = g_malloc0(sizeof(*info)); 649 info->value = s; 650 *p_next = info; 651 p_next = &info->next; 652 } 653 } 654 655 return head; 656 } 657 658 void bdrv_snapshot_dump(QEMUSnapshotInfo *sn) 659 { 660 char date_buf[128], clock_buf[128]; 661 struct tm tm; 662 time_t ti; 663 int64_t secs; 664 char *sizing = NULL; 665 666 if (!sn) { 667 qemu_printf("%-10s%-20s%11s%20s%15s", 668 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK"); 669 } else { 670 ti = sn->date_sec; 671 localtime_r(&ti, &tm); 672 strftime(date_buf, sizeof(date_buf), 673 "%Y-%m-%d %H:%M:%S", &tm); 674 secs = sn->vm_clock_nsec / 1000000000; 675 snprintf(clock_buf, sizeof(clock_buf), 676 "%02d:%02d:%02d.%03d", 677 (int)(secs / 3600), 678 (int)((secs / 60) % 60), 679 (int)(secs % 60), 680 (int)((sn->vm_clock_nsec / 1000000) % 1000)); 681 sizing = size_to_str(sn->vm_state_size); 682 qemu_printf("%-10s%-20s%11s%20s%15s", 683 sn->id_str, sn->name, 684 sizing, 685 date_buf, 686 clock_buf); 687 } 688 g_free(sizing); 689 } 690 691 static void dump_qdict(int indentation, QDict *dict); 692 static void dump_qlist(int indentation, QList *list); 693 694 static void dump_qobject(int comp_indent, QObject *obj) 695 { 696 switch (qobject_type(obj)) { 697 case QTYPE_QNUM: { 698 QNum *value = qobject_to(QNum, obj); 699 char *tmp = qnum_to_string(value); 700 qemu_printf("%s", tmp); 701 g_free(tmp); 702 break; 703 } 704 case QTYPE_QSTRING: { 705 QString *value = qobject_to(QString, obj); 706 qemu_printf("%s", qstring_get_str(value)); 707 break; 708 } 709 case QTYPE_QDICT: { 710 QDict *value = qobject_to(QDict, obj); 711 dump_qdict(comp_indent, value); 712 break; 713 } 714 case QTYPE_QLIST: { 715 QList *value = qobject_to(QList, obj); 716 dump_qlist(comp_indent, value); 717 break; 718 } 719 case QTYPE_QBOOL: { 720 QBool *value = qobject_to(QBool, obj); 721 qemu_printf("%s", qbool_get_bool(value) ? "true" : "false"); 722 break; 723 } 724 default: 725 abort(); 726 } 727 } 728 729 static void dump_qlist(int indentation, QList *list) 730 { 731 const QListEntry *entry; 732 int i = 0; 733 734 for (entry = qlist_first(list); entry; entry = qlist_next(entry), i++) { 735 QType type = qobject_type(entry->value); 736 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST); 737 qemu_printf("%*s[%i]:%c", indentation * 4, "", i, 738 composite ? '\n' : ' '); 739 dump_qobject(indentation + 1, entry->value); 740 if (!composite) { 741 qemu_printf("\n"); 742 } 743 } 744 } 745 746 static void dump_qdict(int indentation, QDict *dict) 747 { 748 const QDictEntry *entry; 749 750 for (entry = qdict_first(dict); entry; entry = qdict_next(dict, entry)) { 751 QType type = qobject_type(entry->value); 752 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST); 753 char *key = g_malloc(strlen(entry->key) + 1); 754 int i; 755 756 /* replace dashes with spaces in key (variable) names */ 757 for (i = 0; entry->key[i]; i++) { 758 key[i] = entry->key[i] == '-' ? ' ' : entry->key[i]; 759 } 760 key[i] = 0; 761 qemu_printf("%*s%s:%c", indentation * 4, "", key, 762 composite ? '\n' : ' '); 763 dump_qobject(indentation + 1, entry->value); 764 if (!composite) { 765 qemu_printf("\n"); 766 } 767 g_free(key); 768 } 769 } 770 771 void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec) 772 { 773 QObject *obj, *data; 774 Visitor *v = qobject_output_visitor_new(&obj); 775 776 visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort); 777 visit_complete(v, &obj); 778 data = qdict_get(qobject_to(QDict, obj), "data"); 779 dump_qobject(1, data); 780 qobject_unref(obj); 781 visit_free(v); 782 } 783 784 void bdrv_image_info_dump(ImageInfo *info) 785 { 786 char *size_buf, *dsize_buf; 787 if (!info->has_actual_size) { 788 dsize_buf = g_strdup("unavailable"); 789 } else { 790 dsize_buf = size_to_str(info->actual_size); 791 } 792 size_buf = size_to_str(info->virtual_size); 793 qemu_printf("image: %s\n" 794 "file format: %s\n" 795 "virtual size: %s (%" PRId64 " bytes)\n" 796 "disk size: %s\n", 797 info->filename, info->format, size_buf, 798 info->virtual_size, 799 dsize_buf); 800 g_free(size_buf); 801 g_free(dsize_buf); 802 803 if (info->has_encrypted && info->encrypted) { 804 qemu_printf("encrypted: yes\n"); 805 } 806 807 if (info->has_cluster_size) { 808 qemu_printf("cluster_size: %" PRId64 "\n", 809 info->cluster_size); 810 } 811 812 if (info->has_dirty_flag && info->dirty_flag) { 813 qemu_printf("cleanly shut down: no\n"); 814 } 815 816 if (info->has_backing_filename) { 817 qemu_printf("backing file: %s", info->backing_filename); 818 if (!info->has_full_backing_filename) { 819 qemu_printf(" (cannot determine actual path)"); 820 } else if (strcmp(info->backing_filename, 821 info->full_backing_filename) != 0) { 822 qemu_printf(" (actual path: %s)", info->full_backing_filename); 823 } 824 qemu_printf("\n"); 825 if (info->has_backing_filename_format) { 826 qemu_printf("backing file format: %s\n", 827 info->backing_filename_format); 828 } 829 } 830 831 if (info->has_snapshots) { 832 SnapshotInfoList *elem; 833 834 qemu_printf("Snapshot list:\n"); 835 bdrv_snapshot_dump(NULL); 836 qemu_printf("\n"); 837 838 /* Ideally bdrv_snapshot_dump() would operate on SnapshotInfoList but 839 * we convert to the block layer's native QEMUSnapshotInfo for now. 840 */ 841 for (elem = info->snapshots; elem; elem = elem->next) { 842 QEMUSnapshotInfo sn = { 843 .vm_state_size = elem->value->vm_state_size, 844 .date_sec = elem->value->date_sec, 845 .date_nsec = elem->value->date_nsec, 846 .vm_clock_nsec = elem->value->vm_clock_sec * 1000000000ULL + 847 elem->value->vm_clock_nsec, 848 }; 849 850 pstrcpy(sn.id_str, sizeof(sn.id_str), elem->value->id); 851 pstrcpy(sn.name, sizeof(sn.name), elem->value->name); 852 bdrv_snapshot_dump(&sn); 853 qemu_printf("\n"); 854 } 855 } 856 857 if (info->has_format_specific) { 858 qemu_printf("Format specific information:\n"); 859 bdrv_image_info_specific_dump(info->format_specific); 860 } 861 } 862