1 /* 2 * Dirtyrate implement code 3 * 4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD. 5 * 6 * Authors: 7 * Chuan Zheng <zhengchuan@huawei.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qemu/error-report.h" 15 #include <zlib.h> 16 #include "qapi/error.h" 17 #include "cpu.h" 18 #include "exec/ramblock.h" 19 #include "exec/ram_addr.h" 20 #include "qemu/rcu_queue.h" 21 #include "qemu/main-loop.h" 22 #include "qapi/qapi-commands-migration.h" 23 #include "ram.h" 24 #include "trace.h" 25 #include "dirtyrate.h" 26 #include "monitor/hmp.h" 27 #include "monitor/monitor.h" 28 #include "qapi/qmp/qdict.h" 29 #include "sysemu/kvm.h" 30 #include "sysemu/runstate.h" 31 #include "exec/memory.h" 32 33 /* 34 * total_dirty_pages is procted by BQL and is used 35 * to stat dirty pages during the period of two 36 * memory_global_dirty_log_sync 37 */ 38 uint64_t total_dirty_pages; 39 40 typedef struct DirtyPageRecord { 41 uint64_t start_pages; 42 uint64_t end_pages; 43 } DirtyPageRecord; 44 45 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED; 46 static struct DirtyRateStat DirtyStat; 47 static DirtyRateMeasureMode dirtyrate_mode = 48 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; 49 50 static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time) 51 { 52 int64_t current_time; 53 54 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 55 if ((current_time - initial_time) >= msec) { 56 msec = current_time - initial_time; 57 } else { 58 g_usleep((msec + initial_time - current_time) * 1000); 59 } 60 61 return msec; 62 } 63 64 static inline void record_dirtypages(DirtyPageRecord *dirty_pages, 65 CPUState *cpu, bool start) 66 { 67 if (start) { 68 dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages; 69 } else { 70 dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages; 71 } 72 } 73 74 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, 75 int64_t calc_time_ms) 76 { 77 uint64_t memory_size_MB; 78 uint64_t increased_dirty_pages = 79 dirty_pages.end_pages - dirty_pages.start_pages; 80 81 memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; 82 83 return memory_size_MB * 1000 / calc_time_ms; 84 } 85 86 void global_dirty_log_change(unsigned int flag, bool start) 87 { 88 qemu_mutex_lock_iothread(); 89 if (start) { 90 memory_global_dirty_log_start(flag); 91 } else { 92 memory_global_dirty_log_stop(flag); 93 } 94 qemu_mutex_unlock_iothread(); 95 } 96 97 /* 98 * global_dirty_log_sync 99 * 1. sync dirty log from kvm 100 * 2. stop dirty tracking if needed. 101 */ 102 static void global_dirty_log_sync(unsigned int flag, bool one_shot) 103 { 104 qemu_mutex_lock_iothread(); 105 memory_global_dirty_log_sync(); 106 if (one_shot) { 107 memory_global_dirty_log_stop(flag); 108 } 109 qemu_mutex_unlock_iothread(); 110 } 111 112 static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) 113 { 114 CPUState *cpu; 115 int nvcpu = 0; 116 117 CPU_FOREACH(cpu) { 118 nvcpu++; 119 } 120 121 stat->nvcpu = nvcpu; 122 stat->rates = g_new0(DirtyRateVcpu, nvcpu); 123 124 return g_new0(DirtyPageRecord, nvcpu); 125 } 126 127 static void vcpu_dirty_stat_collect(VcpuStat *stat, 128 DirtyPageRecord *records, 129 bool start) 130 { 131 CPUState *cpu; 132 133 CPU_FOREACH(cpu) { 134 record_dirtypages(records, cpu, start); 135 } 136 } 137 138 int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, 139 VcpuStat *stat, 140 unsigned int flag, 141 bool one_shot) 142 { 143 DirtyPageRecord *records; 144 int64_t init_time_ms; 145 int64_t duration; 146 int64_t dirtyrate; 147 int i = 0; 148 unsigned int gen_id; 149 150 retry: 151 init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 152 153 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { 154 gen_id = cpu_list_generation_id_get(); 155 records = vcpu_dirty_stat_alloc(stat); 156 vcpu_dirty_stat_collect(stat, records, true); 157 } 158 159 duration = dirty_stat_wait(calc_time_ms, init_time_ms); 160 161 global_dirty_log_sync(flag, one_shot); 162 163 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { 164 if (gen_id != cpu_list_generation_id_get()) { 165 g_free(records); 166 g_free(stat->rates); 167 cpu_list_unlock(); 168 goto retry; 169 } 170 vcpu_dirty_stat_collect(stat, records, false); 171 } 172 173 for (i = 0; i < stat->nvcpu; i++) { 174 dirtyrate = do_calculate_dirtyrate(records[i], duration); 175 176 stat->rates[i].id = i; 177 stat->rates[i].dirty_rate = dirtyrate; 178 179 trace_dirtyrate_do_calculate_vcpu(i, dirtyrate); 180 } 181 182 g_free(records); 183 184 return duration; 185 } 186 187 static bool is_sample_period_valid(int64_t sec) 188 { 189 if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC || 190 sec > MAX_FETCH_DIRTYRATE_TIME_SEC) { 191 return false; 192 } 193 194 return true; 195 } 196 197 static bool is_sample_pages_valid(int64_t pages) 198 { 199 return pages >= MIN_SAMPLE_PAGE_COUNT && 200 pages <= MAX_SAMPLE_PAGE_COUNT; 201 } 202 203 static int dirtyrate_set_state(int *state, int old_state, int new_state) 204 { 205 assert(new_state < DIRTY_RATE_STATUS__MAX); 206 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state)); 207 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 208 return 0; 209 } else { 210 return -1; 211 } 212 } 213 214 static struct DirtyRateInfo *query_dirty_rate_info(void) 215 { 216 int i; 217 int64_t dirty_rate = DirtyStat.dirty_rate; 218 struct DirtyRateInfo *info = g_new0(DirtyRateInfo, 1); 219 DirtyRateVcpuList *head = NULL, **tail = &head; 220 221 info->status = CalculatingState; 222 info->start_time = DirtyStat.start_time; 223 info->calc_time = DirtyStat.calc_time; 224 info->sample_pages = DirtyStat.sample_pages; 225 info->mode = dirtyrate_mode; 226 227 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) { 228 info->has_dirty_rate = true; 229 info->dirty_rate = dirty_rate; 230 231 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { 232 /* 233 * set sample_pages with 0 to indicate page sampling 234 * isn't enabled 235 **/ 236 info->sample_pages = 0; 237 info->has_vcpu_dirty_rate = true; 238 for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) { 239 DirtyRateVcpu *rate = g_new0(DirtyRateVcpu, 1); 240 rate->id = DirtyStat.dirty_ring.rates[i].id; 241 rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate; 242 QAPI_LIST_APPEND(tail, rate); 243 } 244 info->vcpu_dirty_rate = head; 245 } 246 247 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) { 248 info->sample_pages = 0; 249 } 250 } 251 252 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState)); 253 254 return info; 255 } 256 257 static void init_dirtyrate_stat(int64_t start_time, 258 struct DirtyRateConfig config) 259 { 260 DirtyStat.dirty_rate = -1; 261 DirtyStat.start_time = start_time; 262 DirtyStat.calc_time = config.sample_period_seconds; 263 DirtyStat.sample_pages = config.sample_pages_per_gigabytes; 264 265 switch (config.mode) { 266 case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING: 267 DirtyStat.page_sampling.total_dirty_samples = 0; 268 DirtyStat.page_sampling.total_sample_count = 0; 269 DirtyStat.page_sampling.total_block_mem_MB = 0; 270 break; 271 case DIRTY_RATE_MEASURE_MODE_DIRTY_RING: 272 DirtyStat.dirty_ring.nvcpu = -1; 273 DirtyStat.dirty_ring.rates = NULL; 274 break; 275 default: 276 break; 277 } 278 } 279 280 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config) 281 { 282 /* last calc-dirty-rate qmp use dirty ring mode */ 283 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { 284 free(DirtyStat.dirty_ring.rates); 285 DirtyStat.dirty_ring.rates = NULL; 286 } 287 } 288 289 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info) 290 { 291 DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count; 292 DirtyStat.page_sampling.total_sample_count += info->sample_pages_count; 293 /* size of total pages in MB */ 294 DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages * 295 TARGET_PAGE_SIZE) >> 20; 296 } 297 298 static void update_dirtyrate(uint64_t msec) 299 { 300 uint64_t dirtyrate; 301 uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples; 302 uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count; 303 uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB; 304 305 dirtyrate = total_dirty_samples * total_block_mem_MB * 306 1000 / (total_sample_count * msec); 307 308 DirtyStat.dirty_rate = dirtyrate; 309 } 310 311 /* 312 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE 313 * in ramblock, which starts from ramblock base address. 314 */ 315 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info, 316 uint64_t vfn) 317 { 318 uint32_t crc; 319 320 crc = crc32(0, (info->ramblock_addr + 321 vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE); 322 323 trace_get_ramblock_vfn_hash(info->idstr, vfn, crc); 324 return crc; 325 } 326 327 static bool save_ramblock_hash(struct RamblockDirtyInfo *info) 328 { 329 unsigned int sample_pages_count; 330 int i; 331 GRand *rand; 332 333 sample_pages_count = info->sample_pages_count; 334 335 /* ramblock size less than one page, return success to skip this ramblock */ 336 if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) { 337 return true; 338 } 339 340 info->hash_result = g_try_malloc0_n(sample_pages_count, 341 sizeof(uint32_t)); 342 if (!info->hash_result) { 343 return false; 344 } 345 346 info->sample_page_vfn = g_try_malloc0_n(sample_pages_count, 347 sizeof(uint64_t)); 348 if (!info->sample_page_vfn) { 349 g_free(info->hash_result); 350 return false; 351 } 352 353 rand = g_rand_new(); 354 for (i = 0; i < sample_pages_count; i++) { 355 info->sample_page_vfn[i] = g_rand_int_range(rand, 0, 356 info->ramblock_pages - 1); 357 info->hash_result[i] = get_ramblock_vfn_hash(info, 358 info->sample_page_vfn[i]); 359 } 360 g_rand_free(rand); 361 362 return true; 363 } 364 365 static void get_ramblock_dirty_info(RAMBlock *block, 366 struct RamblockDirtyInfo *info, 367 struct DirtyRateConfig *config) 368 { 369 uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes; 370 371 /* Right shift 30 bits to calc ramblock size in GB */ 372 info->sample_pages_count = (qemu_ram_get_used_length(block) * 373 sample_pages_per_gigabytes) >> 30; 374 /* Right shift TARGET_PAGE_BITS to calc page count */ 375 info->ramblock_pages = qemu_ram_get_used_length(block) >> 376 TARGET_PAGE_BITS; 377 info->ramblock_addr = qemu_ram_get_host_addr(block); 378 strcpy(info->idstr, qemu_ram_get_idstr(block)); 379 } 380 381 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count) 382 { 383 int i; 384 385 if (!infos) { 386 return; 387 } 388 389 for (i = 0; i < count; i++) { 390 g_free(infos[i].sample_page_vfn); 391 g_free(infos[i].hash_result); 392 } 393 g_free(infos); 394 } 395 396 static bool skip_sample_ramblock(RAMBlock *block) 397 { 398 /* 399 * Sample only blocks larger than MIN_RAMBLOCK_SIZE. 400 */ 401 if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) { 402 trace_skip_sample_ramblock(block->idstr, 403 qemu_ram_get_used_length(block)); 404 return true; 405 } 406 407 return false; 408 } 409 410 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo, 411 struct DirtyRateConfig config, 412 int *block_count) 413 { 414 struct RamblockDirtyInfo *info = NULL; 415 struct RamblockDirtyInfo *dinfo = NULL; 416 RAMBlock *block = NULL; 417 int total_count = 0; 418 int index = 0; 419 bool ret = false; 420 421 RAMBLOCK_FOREACH_MIGRATABLE(block) { 422 if (skip_sample_ramblock(block)) { 423 continue; 424 } 425 total_count++; 426 } 427 428 dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo)); 429 if (dinfo == NULL) { 430 goto out; 431 } 432 433 RAMBLOCK_FOREACH_MIGRATABLE(block) { 434 if (skip_sample_ramblock(block)) { 435 continue; 436 } 437 if (index >= total_count) { 438 break; 439 } 440 info = &dinfo[index]; 441 get_ramblock_dirty_info(block, info, &config); 442 if (!save_ramblock_hash(info)) { 443 goto out; 444 } 445 index++; 446 } 447 ret = true; 448 449 out: 450 *block_count = index; 451 *block_dinfo = dinfo; 452 return ret; 453 } 454 455 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info) 456 { 457 uint32_t crc; 458 int i; 459 460 for (i = 0; i < info->sample_pages_count; i++) { 461 crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]); 462 if (crc != info->hash_result[i]) { 463 trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]); 464 info->sample_dirty_count++; 465 } 466 } 467 } 468 469 static struct RamblockDirtyInfo * 470 find_block_matched(RAMBlock *block, int count, 471 struct RamblockDirtyInfo *infos) 472 { 473 int i; 474 475 for (i = 0; i < count; i++) { 476 if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) { 477 break; 478 } 479 } 480 481 if (i == count) { 482 return NULL; 483 } 484 485 if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) || 486 infos[i].ramblock_pages != 487 (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) { 488 trace_find_page_matched(block->idstr); 489 return NULL; 490 } 491 492 return &infos[i]; 493 } 494 495 static bool compare_page_hash_info(struct RamblockDirtyInfo *info, 496 int block_count) 497 { 498 struct RamblockDirtyInfo *block_dinfo = NULL; 499 RAMBlock *block = NULL; 500 501 RAMBLOCK_FOREACH_MIGRATABLE(block) { 502 if (skip_sample_ramblock(block)) { 503 continue; 504 } 505 block_dinfo = find_block_matched(block, block_count, info); 506 if (block_dinfo == NULL) { 507 continue; 508 } 509 calc_page_dirty_rate(block_dinfo); 510 update_dirtyrate_stat(block_dinfo); 511 } 512 513 if (DirtyStat.page_sampling.total_sample_count == 0) { 514 return false; 515 } 516 517 return true; 518 } 519 520 static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages, 521 bool start) 522 { 523 if (start) { 524 dirty_pages->start_pages = total_dirty_pages; 525 } else { 526 dirty_pages->end_pages = total_dirty_pages; 527 } 528 } 529 530 static inline void dirtyrate_manual_reset_protect(void) 531 { 532 RAMBlock *block = NULL; 533 534 WITH_RCU_READ_LOCK_GUARD() { 535 RAMBLOCK_FOREACH_MIGRATABLE(block) { 536 memory_region_clear_dirty_bitmap(block->mr, 0, 537 block->used_length); 538 } 539 } 540 } 541 542 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) 543 { 544 int64_t msec = 0; 545 int64_t start_time; 546 DirtyPageRecord dirty_pages; 547 548 qemu_mutex_lock_iothread(); 549 memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); 550 551 /* 552 * 1'round of log sync may return all 1 bits with 553 * KVM_DIRTY_LOG_INITIALLY_SET enable 554 * skip it unconditionally and start dirty tracking 555 * from 2'round of log sync 556 */ 557 memory_global_dirty_log_sync(); 558 559 /* 560 * reset page protect manually and unconditionally. 561 * this make sure kvm dirty log be cleared if 562 * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled. 563 */ 564 dirtyrate_manual_reset_protect(); 565 qemu_mutex_unlock_iothread(); 566 567 record_dirtypages_bitmap(&dirty_pages, true); 568 569 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 570 DirtyStat.start_time = start_time / 1000; 571 572 msec = config.sample_period_seconds * 1000; 573 msec = dirty_stat_wait(msec, start_time); 574 DirtyStat.calc_time = msec / 1000; 575 576 /* 577 * do two things. 578 * 1. fetch dirty bitmap from kvm 579 * 2. stop dirty tracking 580 */ 581 global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true); 582 583 record_dirtypages_bitmap(&dirty_pages, false); 584 585 DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages, msec); 586 } 587 588 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config) 589 { 590 int64_t duration; 591 uint64_t dirtyrate = 0; 592 uint64_t dirtyrate_sum = 0; 593 int i = 0; 594 595 /* start log sync */ 596 global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true); 597 598 DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; 599 600 /* calculate vcpu dirtyrate */ 601 duration = vcpu_calculate_dirtyrate(config.sample_period_seconds * 1000, 602 &DirtyStat.dirty_ring, 603 GLOBAL_DIRTY_DIRTY_RATE, 604 true); 605 606 DirtyStat.calc_time = duration / 1000; 607 608 /* calculate vm dirtyrate */ 609 for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) { 610 dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate; 611 DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate; 612 dirtyrate_sum += dirtyrate; 613 } 614 615 DirtyStat.dirty_rate = dirtyrate_sum; 616 } 617 618 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config) 619 { 620 struct RamblockDirtyInfo *block_dinfo = NULL; 621 int block_count = 0; 622 int64_t msec = 0; 623 int64_t initial_time; 624 625 rcu_read_lock(); 626 initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 627 if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) { 628 goto out; 629 } 630 rcu_read_unlock(); 631 632 msec = config.sample_period_seconds * 1000; 633 msec = dirty_stat_wait(msec, initial_time); 634 DirtyStat.start_time = initial_time / 1000; 635 DirtyStat.calc_time = msec / 1000; 636 637 rcu_read_lock(); 638 if (!compare_page_hash_info(block_dinfo, block_count)) { 639 goto out; 640 } 641 642 update_dirtyrate(msec); 643 644 out: 645 rcu_read_unlock(); 646 free_ramblock_dirty_info(block_dinfo, block_count); 647 } 648 649 static void calculate_dirtyrate(struct DirtyRateConfig config) 650 { 651 if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) { 652 calculate_dirtyrate_dirty_bitmap(config); 653 } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { 654 calculate_dirtyrate_dirty_ring(config); 655 } else { 656 calculate_dirtyrate_sample_vm(config); 657 } 658 659 trace_dirtyrate_calculate(DirtyStat.dirty_rate); 660 } 661 662 void *get_dirtyrate_thread(void *arg) 663 { 664 struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg; 665 int ret; 666 rcu_register_thread(); 667 668 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED, 669 DIRTY_RATE_STATUS_MEASURING); 670 if (ret == -1) { 671 error_report("change dirtyrate state failed."); 672 return NULL; 673 } 674 675 calculate_dirtyrate(config); 676 677 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING, 678 DIRTY_RATE_STATUS_MEASURED); 679 if (ret == -1) { 680 error_report("change dirtyrate state failed."); 681 } 682 683 rcu_unregister_thread(); 684 return NULL; 685 } 686 687 void qmp_calc_dirty_rate(int64_t calc_time, 688 bool has_sample_pages, 689 int64_t sample_pages, 690 bool has_mode, 691 DirtyRateMeasureMode mode, 692 Error **errp) 693 { 694 static struct DirtyRateConfig config; 695 QemuThread thread; 696 int ret; 697 int64_t start_time; 698 699 /* 700 * If the dirty rate is already being measured, don't attempt to start. 701 */ 702 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) { 703 error_setg(errp, "the dirty rate is already being measured."); 704 return; 705 } 706 707 if (!is_sample_period_valid(calc_time)) { 708 error_setg(errp, "calc-time is out of range[%d, %d].", 709 MIN_FETCH_DIRTYRATE_TIME_SEC, 710 MAX_FETCH_DIRTYRATE_TIME_SEC); 711 return; 712 } 713 714 if (!has_mode) { 715 mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; 716 } 717 718 if (has_sample_pages && mode != DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) { 719 error_setg(errp, "sample-pages is used only in page-sampling mode"); 720 return; 721 } 722 723 if (has_sample_pages) { 724 if (!is_sample_pages_valid(sample_pages)) { 725 error_setg(errp, "sample-pages is out of range[%d, %d].", 726 MIN_SAMPLE_PAGE_COUNT, 727 MAX_SAMPLE_PAGE_COUNT); 728 return; 729 } 730 } else { 731 sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES; 732 } 733 734 /* 735 * dirty ring mode only works when kvm dirty ring is enabled. 736 * on the contrary, dirty bitmap mode is not. 737 */ 738 if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) && 739 !kvm_dirty_ring_enabled()) || 740 ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) && 741 kvm_dirty_ring_enabled())) { 742 error_setg(errp, "mode %s is not enabled, use other method instead.", 743 DirtyRateMeasureMode_str(mode)); 744 return; 745 } 746 747 /* 748 * Init calculation state as unstarted. 749 */ 750 ret = dirtyrate_set_state(&CalculatingState, CalculatingState, 751 DIRTY_RATE_STATUS_UNSTARTED); 752 if (ret == -1) { 753 error_setg(errp, "init dirty rate calculation state failed."); 754 return; 755 } 756 757 config.sample_period_seconds = calc_time; 758 config.sample_pages_per_gigabytes = sample_pages; 759 config.mode = mode; 760 761 cleanup_dirtyrate_stat(config); 762 763 /* 764 * update dirty rate mode so that we can figure out what mode has 765 * been used in last calculation 766 **/ 767 dirtyrate_mode = mode; 768 769 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; 770 init_dirtyrate_stat(start_time, config); 771 772 qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread, 773 (void *)&config, QEMU_THREAD_DETACHED); 774 } 775 776 struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp) 777 { 778 return query_dirty_rate_info(); 779 } 780 781 void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict) 782 { 783 DirtyRateInfo *info = query_dirty_rate_info(); 784 785 monitor_printf(mon, "Status: %s\n", 786 DirtyRateStatus_str(info->status)); 787 monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n", 788 info->start_time); 789 if (info->mode == DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) { 790 monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n", 791 info->sample_pages); 792 } 793 monitor_printf(mon, "Period: %"PRIi64" (sec)\n", 794 info->calc_time); 795 monitor_printf(mon, "Mode: %s\n", 796 DirtyRateMeasureMode_str(info->mode)); 797 monitor_printf(mon, "Dirty rate: "); 798 if (info->has_dirty_rate) { 799 monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate); 800 if (info->has_vcpu_dirty_rate) { 801 DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate; 802 for (rate = head; rate != NULL; rate = rate->next) { 803 monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64 804 " (MB/s)\n", rate->value->id, 805 rate->value->dirty_rate); 806 } 807 } 808 } else { 809 monitor_printf(mon, "(not ready)\n"); 810 } 811 812 qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate); 813 g_free(info); 814 } 815 816 void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict) 817 { 818 int64_t sec = qdict_get_try_int(qdict, "second", 0); 819 int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1); 820 bool has_sample_pages = (sample_pages != -1); 821 bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false); 822 bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false); 823 DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; 824 Error *err = NULL; 825 826 if (!sec) { 827 monitor_printf(mon, "Incorrect period length specified!\n"); 828 return; 829 } 830 831 if (dirty_ring && dirty_bitmap) { 832 monitor_printf(mon, "Either dirty ring or dirty bitmap " 833 "can be specified!\n"); 834 return; 835 } 836 837 if (dirty_bitmap) { 838 mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP; 839 } else if (dirty_ring) { 840 mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING; 841 } 842 843 qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, true, 844 mode, &err); 845 if (err) { 846 hmp_handle_error(mon, err); 847 return; 848 } 849 850 monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64 851 " seconds\n", sec); 852 monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n"); 853 } 854