xref: /openbmc/qemu/migration/dirtyrate.c (revision a969fe97)
1 /*
2  * Dirtyrate implement code
3  *
4  * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5  *
6  * Authors:
7  *  Chuan Zheng <zhengchuan@huawei.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include <zlib.h>
16 #include "hw/core/cpu.h"
17 #include "qapi/error.h"
18 #include "exec/ramblock.h"
19 #include "exec/target_page.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/main-loop.h"
22 #include "qapi/qapi-commands-migration.h"
23 #include "ram.h"
24 #include "trace.h"
25 #include "dirtyrate.h"
26 #include "monitor/hmp.h"
27 #include "monitor/monitor.h"
28 #include "qapi/qmp/qdict.h"
29 #include "sysemu/kvm.h"
30 #include "sysemu/runstate.h"
31 #include "exec/memory.h"
32 #include "qemu/xxhash.h"
33 
34 /*
35  * total_dirty_pages is procted by BQL and is used
36  * to stat dirty pages during the period of two
37  * memory_global_dirty_log_sync
38  */
39 uint64_t total_dirty_pages;
40 
41 typedef struct DirtyPageRecord {
42     uint64_t start_pages;
43     uint64_t end_pages;
44 } DirtyPageRecord;
45 
46 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
47 static struct DirtyRateStat DirtyStat;
48 static DirtyRateMeasureMode dirtyrate_mode =
49                 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
50 
51 static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time)
52 {
53     int64_t current_time;
54 
55     current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56     if ((current_time - initial_time) >= msec) {
57         msec = current_time - initial_time;
58     } else {
59         g_usleep((msec + initial_time - current_time) * 1000);
60         /* g_usleep may overshoot */
61         msec = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - initial_time;
62     }
63 
64     return msec;
65 }
66 
67 static inline void record_dirtypages(DirtyPageRecord *dirty_pages,
68                                      CPUState *cpu, bool start)
69 {
70     if (start) {
71         dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages;
72     } else {
73         dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages;
74     }
75 }
76 
77 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
78                                       int64_t calc_time_ms)
79 {
80     uint64_t increased_dirty_pages =
81         dirty_pages.end_pages - dirty_pages.start_pages;
82 
83     /*
84      * multiply by 1000ms/s _before_ converting down to megabytes
85      * to avoid losing precision
86      */
87     return qemu_target_pages_to_MiB(increased_dirty_pages * 1000) /
88         calc_time_ms;
89 }
90 
91 void global_dirty_log_change(unsigned int flag, bool start)
92 {
93     qemu_mutex_lock_iothread();
94     if (start) {
95         memory_global_dirty_log_start(flag);
96     } else {
97         memory_global_dirty_log_stop(flag);
98     }
99     qemu_mutex_unlock_iothread();
100 }
101 
102 /*
103  * global_dirty_log_sync
104  * 1. sync dirty log from kvm
105  * 2. stop dirty tracking if needed.
106  */
107 static void global_dirty_log_sync(unsigned int flag, bool one_shot)
108 {
109     qemu_mutex_lock_iothread();
110     memory_global_dirty_log_sync(false);
111     if (one_shot) {
112         memory_global_dirty_log_stop(flag);
113     }
114     qemu_mutex_unlock_iothread();
115 }
116 
117 static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
118 {
119     CPUState *cpu;
120     int nvcpu = 0;
121 
122     CPU_FOREACH(cpu) {
123         nvcpu++;
124     }
125 
126     stat->nvcpu = nvcpu;
127     stat->rates = g_new0(DirtyRateVcpu, nvcpu);
128 
129     return g_new0(DirtyPageRecord, nvcpu);
130 }
131 
132 static void vcpu_dirty_stat_collect(VcpuStat *stat,
133                                     DirtyPageRecord *records,
134                                     bool start)
135 {
136     CPUState *cpu;
137 
138     CPU_FOREACH(cpu) {
139         record_dirtypages(records, cpu, start);
140     }
141 }
142 
143 int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
144                                  VcpuStat *stat,
145                                  unsigned int flag,
146                                  bool one_shot)
147 {
148     DirtyPageRecord *records;
149     int64_t init_time_ms;
150     int64_t duration;
151     int64_t dirtyrate;
152     int i = 0;
153     unsigned int gen_id;
154 
155 retry:
156     init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
157 
158     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
159         gen_id = cpu_list_generation_id_get();
160         records = vcpu_dirty_stat_alloc(stat);
161         vcpu_dirty_stat_collect(stat, records, true);
162     }
163 
164     duration = dirty_stat_wait(calc_time_ms, init_time_ms);
165 
166     global_dirty_log_sync(flag, one_shot);
167 
168     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
169         if (gen_id != cpu_list_generation_id_get()) {
170             g_free(records);
171             g_free(stat->rates);
172             cpu_list_unlock();
173             goto retry;
174         }
175         vcpu_dirty_stat_collect(stat, records, false);
176     }
177 
178     for (i = 0; i < stat->nvcpu; i++) {
179         dirtyrate = do_calculate_dirtyrate(records[i], duration);
180 
181         stat->rates[i].id = i;
182         stat->rates[i].dirty_rate = dirtyrate;
183 
184         trace_dirtyrate_do_calculate_vcpu(i, dirtyrate);
185     }
186 
187     g_free(records);
188 
189     return duration;
190 }
191 
192 static bool is_sample_period_valid(int64_t sec)
193 {
194     if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC ||
195         sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
196         return false;
197     }
198 
199     return true;
200 }
201 
202 static bool is_sample_pages_valid(int64_t pages)
203 {
204     return pages >= MIN_SAMPLE_PAGE_COUNT &&
205            pages <= MAX_SAMPLE_PAGE_COUNT;
206 }
207 
208 static int dirtyrate_set_state(int *state, int old_state, int new_state)
209 {
210     assert(new_state < DIRTY_RATE_STATUS__MAX);
211     trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
212     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
213         return 0;
214     } else {
215         return -1;
216     }
217 }
218 
219 static struct DirtyRateInfo *query_dirty_rate_info(void)
220 {
221     int i;
222     int64_t dirty_rate = DirtyStat.dirty_rate;
223     struct DirtyRateInfo *info = g_new0(DirtyRateInfo, 1);
224     DirtyRateVcpuList *head = NULL, **tail = &head;
225 
226     info->status = CalculatingState;
227     info->start_time = DirtyStat.start_time;
228     info->calc_time = DirtyStat.calc_time;
229     info->sample_pages = DirtyStat.sample_pages;
230     info->mode = dirtyrate_mode;
231 
232     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
233         info->has_dirty_rate = true;
234         info->dirty_rate = dirty_rate;
235 
236         if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
237             /*
238              * set sample_pages with 0 to indicate page sampling
239              * isn't enabled
240              **/
241             info->sample_pages = 0;
242             info->has_vcpu_dirty_rate = true;
243             for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
244                 DirtyRateVcpu *rate = g_new0(DirtyRateVcpu, 1);
245                 rate->id = DirtyStat.dirty_ring.rates[i].id;
246                 rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate;
247                 QAPI_LIST_APPEND(tail, rate);
248             }
249             info->vcpu_dirty_rate = head;
250         }
251 
252         if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
253             info->sample_pages = 0;
254         }
255     }
256 
257     trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
258 
259     return info;
260 }
261 
262 static void init_dirtyrate_stat(int64_t start_time,
263                                 struct DirtyRateConfig config)
264 {
265     DirtyStat.dirty_rate = -1;
266     DirtyStat.start_time = start_time;
267     DirtyStat.calc_time = config.sample_period_seconds;
268     DirtyStat.sample_pages = config.sample_pages_per_gigabytes;
269 
270     switch (config.mode) {
271     case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING:
272         DirtyStat.page_sampling.total_dirty_samples = 0;
273         DirtyStat.page_sampling.total_sample_count = 0;
274         DirtyStat.page_sampling.total_block_mem_MB = 0;
275         break;
276     case DIRTY_RATE_MEASURE_MODE_DIRTY_RING:
277         DirtyStat.dirty_ring.nvcpu = -1;
278         DirtyStat.dirty_ring.rates = NULL;
279         break;
280     default:
281         break;
282     }
283 }
284 
285 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config)
286 {
287     /* last calc-dirty-rate qmp use dirty ring mode */
288     if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
289         free(DirtyStat.dirty_ring.rates);
290         DirtyStat.dirty_ring.rates = NULL;
291     }
292 }
293 
294 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
295 {
296     DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
297     DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
298     /* size of total pages in MB */
299     DirtyStat.page_sampling.total_block_mem_MB +=
300         qemu_target_pages_to_MiB(info->ramblock_pages);
301 }
302 
303 static void update_dirtyrate(uint64_t msec)
304 {
305     uint64_t dirtyrate;
306     uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples;
307     uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count;
308     uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB;
309 
310     dirtyrate = total_dirty_samples * total_block_mem_MB *
311                 1000 / (total_sample_count * msec);
312 
313     DirtyStat.dirty_rate = dirtyrate;
314 }
315 
316 /*
317  * Compute hash of a single page of size TARGET_PAGE_SIZE.
318  */
319 static uint32_t compute_page_hash(void *ptr)
320 {
321     size_t page_size = qemu_target_page_size();
322     uint32_t i;
323     uint64_t v1, v2, v3, v4;
324     uint64_t res;
325     const uint64_t *p = ptr;
326 
327     v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
328     v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
329     v3 = QEMU_XXHASH_SEED + 0;
330     v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
331     for (i = 0; i < page_size / 8; i += 4) {
332         v1 = XXH64_round(v1, p[i + 0]);
333         v2 = XXH64_round(v2, p[i + 1]);
334         v3 = XXH64_round(v3, p[i + 2]);
335         v4 = XXH64_round(v4, p[i + 3]);
336     }
337     res = XXH64_mergerounds(v1, v2, v3, v4);
338     res += page_size;
339     res = XXH64_avalanche(res);
340     return (uint32_t)(res & UINT32_MAX);
341 }
342 
343 
344 /*
345  * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
346  * in ramblock, which starts from ramblock base address.
347  */
348 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
349                                       uint64_t vfn)
350 {
351     uint32_t hash;
352 
353     hash = compute_page_hash(info->ramblock_addr +
354                              vfn * qemu_target_page_size());
355 
356     trace_get_ramblock_vfn_hash(info->idstr, vfn, hash);
357     return hash;
358 }
359 
360 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
361 {
362     unsigned int sample_pages_count;
363     int i;
364     GRand *rand;
365 
366     sample_pages_count = info->sample_pages_count;
367 
368     /* ramblock size less than one page, return success to skip this ramblock */
369     if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
370         return true;
371     }
372 
373     info->hash_result = g_try_malloc0_n(sample_pages_count,
374                                         sizeof(uint32_t));
375     if (!info->hash_result) {
376         return false;
377     }
378 
379     info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
380                                             sizeof(uint64_t));
381     if (!info->sample_page_vfn) {
382         g_free(info->hash_result);
383         return false;
384     }
385 
386     rand  = g_rand_new();
387     for (i = 0; i < sample_pages_count; i++) {
388         info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
389                                                     info->ramblock_pages - 1);
390         info->hash_result[i] = get_ramblock_vfn_hash(info,
391                                                      info->sample_page_vfn[i]);
392     }
393     g_rand_free(rand);
394 
395     return true;
396 }
397 
398 static void get_ramblock_dirty_info(RAMBlock *block,
399                                     struct RamblockDirtyInfo *info,
400                                     struct DirtyRateConfig *config)
401 {
402     uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
403 
404     /* Right shift 30 bits to calc ramblock size in GB */
405     info->sample_pages_count = (qemu_ram_get_used_length(block) *
406                                 sample_pages_per_gigabytes) >> 30;
407     /* Right shift TARGET_PAGE_BITS to calc page count */
408     info->ramblock_pages = qemu_ram_get_used_length(block) >>
409                            qemu_target_page_bits();
410     info->ramblock_addr = qemu_ram_get_host_addr(block);
411     strcpy(info->idstr, qemu_ram_get_idstr(block));
412 }
413 
414 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
415 {
416     int i;
417 
418     if (!infos) {
419         return;
420     }
421 
422     for (i = 0; i < count; i++) {
423         g_free(infos[i].sample_page_vfn);
424         g_free(infos[i].hash_result);
425     }
426     g_free(infos);
427 }
428 
429 static bool skip_sample_ramblock(RAMBlock *block)
430 {
431     /*
432      * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
433      */
434     if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
435         trace_skip_sample_ramblock(block->idstr,
436                                    qemu_ram_get_used_length(block));
437         return true;
438     }
439 
440     return false;
441 }
442 
443 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
444                                       struct DirtyRateConfig config,
445                                       int *block_count)
446 {
447     struct RamblockDirtyInfo *info = NULL;
448     struct RamblockDirtyInfo *dinfo = NULL;
449     RAMBlock *block = NULL;
450     int total_count = 0;
451     int index = 0;
452     bool ret = false;
453 
454     RAMBLOCK_FOREACH_MIGRATABLE(block) {
455         if (skip_sample_ramblock(block)) {
456             continue;
457         }
458         total_count++;
459     }
460 
461     dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
462     if (dinfo == NULL) {
463         goto out;
464     }
465 
466     RAMBLOCK_FOREACH_MIGRATABLE(block) {
467         if (skip_sample_ramblock(block)) {
468             continue;
469         }
470         if (index >= total_count) {
471             break;
472         }
473         info = &dinfo[index];
474         get_ramblock_dirty_info(block, info, &config);
475         if (!save_ramblock_hash(info)) {
476             goto out;
477         }
478         index++;
479     }
480     ret = true;
481 
482 out:
483     *block_count = index;
484     *block_dinfo = dinfo;
485     return ret;
486 }
487 
488 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
489 {
490     uint32_t hash;
491     int i;
492 
493     for (i = 0; i < info->sample_pages_count; i++) {
494         hash = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
495         if (hash != info->hash_result[i]) {
496             trace_calc_page_dirty_rate(info->idstr, hash, info->hash_result[i]);
497             info->sample_dirty_count++;
498         }
499     }
500 }
501 
502 static struct RamblockDirtyInfo *
503 find_block_matched(RAMBlock *block, int count,
504                   struct RamblockDirtyInfo *infos)
505 {
506     int i;
507 
508     for (i = 0; i < count; i++) {
509         if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
510             break;
511         }
512     }
513 
514     if (i == count) {
515         return NULL;
516     }
517 
518     if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
519         infos[i].ramblock_pages !=
520             (qemu_ram_get_used_length(block) >> qemu_target_page_bits())) {
521         trace_find_page_matched(block->idstr);
522         return NULL;
523     }
524 
525     return &infos[i];
526 }
527 
528 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
529                                   int block_count)
530 {
531     struct RamblockDirtyInfo *block_dinfo = NULL;
532     RAMBlock *block = NULL;
533 
534     RAMBLOCK_FOREACH_MIGRATABLE(block) {
535         if (skip_sample_ramblock(block)) {
536             continue;
537         }
538         block_dinfo = find_block_matched(block, block_count, info);
539         if (block_dinfo == NULL) {
540             continue;
541         }
542         calc_page_dirty_rate(block_dinfo);
543         update_dirtyrate_stat(block_dinfo);
544     }
545 
546     if (DirtyStat.page_sampling.total_sample_count == 0) {
547         return false;
548     }
549 
550     return true;
551 }
552 
553 static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages,
554                                             bool start)
555 {
556     if (start) {
557         dirty_pages->start_pages = total_dirty_pages;
558     } else {
559         dirty_pages->end_pages = total_dirty_pages;
560     }
561 }
562 
563 static inline void dirtyrate_manual_reset_protect(void)
564 {
565     RAMBlock *block = NULL;
566 
567     WITH_RCU_READ_LOCK_GUARD() {
568         RAMBLOCK_FOREACH_MIGRATABLE(block) {
569             memory_region_clear_dirty_bitmap(block->mr, 0,
570                                              block->used_length);
571         }
572     }
573 }
574 
575 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
576 {
577     int64_t msec = 0;
578     int64_t start_time;
579     DirtyPageRecord dirty_pages;
580 
581     qemu_mutex_lock_iothread();
582     memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
583 
584     /*
585      * 1'round of log sync may return all 1 bits with
586      * KVM_DIRTY_LOG_INITIALLY_SET enable
587      * skip it unconditionally and start dirty tracking
588      * from 2'round of log sync
589      */
590     memory_global_dirty_log_sync(false);
591 
592     /*
593      * reset page protect manually and unconditionally.
594      * this make sure kvm dirty log be cleared if
595      * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
596      */
597     dirtyrate_manual_reset_protect();
598     qemu_mutex_unlock_iothread();
599 
600     record_dirtypages_bitmap(&dirty_pages, true);
601 
602     start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
603     DirtyStat.start_time = start_time / 1000;
604 
605     msec = config.sample_period_seconds * 1000;
606     msec = dirty_stat_wait(msec, start_time);
607     DirtyStat.calc_time = msec / 1000;
608 
609     /*
610      * do two things.
611      * 1. fetch dirty bitmap from kvm
612      * 2. stop dirty tracking
613      */
614     global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true);
615 
616     record_dirtypages_bitmap(&dirty_pages, false);
617 
618     DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages, msec);
619 }
620 
621 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config)
622 {
623     int64_t duration;
624     uint64_t dirtyrate = 0;
625     uint64_t dirtyrate_sum = 0;
626     int i = 0;
627 
628     /* start log sync */
629     global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true);
630 
631     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
632 
633     /* calculate vcpu dirtyrate */
634     duration = vcpu_calculate_dirtyrate(config.sample_period_seconds * 1000,
635                                         &DirtyStat.dirty_ring,
636                                         GLOBAL_DIRTY_DIRTY_RATE,
637                                         true);
638 
639     DirtyStat.calc_time = duration / 1000;
640 
641     /* calculate vm dirtyrate */
642     for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
643         dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate;
644         DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate;
645         dirtyrate_sum += dirtyrate;
646     }
647 
648     DirtyStat.dirty_rate = dirtyrate_sum;
649 }
650 
651 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config)
652 {
653     struct RamblockDirtyInfo *block_dinfo = NULL;
654     int block_count = 0;
655     int64_t msec = 0;
656     int64_t initial_time;
657 
658     rcu_read_lock();
659     initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
660     if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
661         goto out;
662     }
663     rcu_read_unlock();
664 
665     msec = config.sample_period_seconds * 1000;
666     msec = dirty_stat_wait(msec, initial_time);
667     DirtyStat.start_time = initial_time / 1000;
668     DirtyStat.calc_time = msec / 1000;
669 
670     rcu_read_lock();
671     if (!compare_page_hash_info(block_dinfo, block_count)) {
672         goto out;
673     }
674 
675     update_dirtyrate(msec);
676 
677 out:
678     rcu_read_unlock();
679     free_ramblock_dirty_info(block_dinfo, block_count);
680 }
681 
682 static void calculate_dirtyrate(struct DirtyRateConfig config)
683 {
684     if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
685         calculate_dirtyrate_dirty_bitmap(config);
686     } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
687         calculate_dirtyrate_dirty_ring(config);
688     } else {
689         calculate_dirtyrate_sample_vm(config);
690     }
691 
692     trace_dirtyrate_calculate(DirtyStat.dirty_rate);
693 }
694 
695 void *get_dirtyrate_thread(void *arg)
696 {
697     struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
698     int ret;
699     rcu_register_thread();
700 
701     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
702                               DIRTY_RATE_STATUS_MEASURING);
703     if (ret == -1) {
704         error_report("change dirtyrate state failed.");
705         return NULL;
706     }
707 
708     calculate_dirtyrate(config);
709 
710     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
711                               DIRTY_RATE_STATUS_MEASURED);
712     if (ret == -1) {
713         error_report("change dirtyrate state failed.");
714     }
715 
716     rcu_unregister_thread();
717     return NULL;
718 }
719 
720 void qmp_calc_dirty_rate(int64_t calc_time,
721                          bool has_sample_pages,
722                          int64_t sample_pages,
723                          bool has_mode,
724                          DirtyRateMeasureMode mode,
725                          Error **errp)
726 {
727     static struct DirtyRateConfig config;
728     QemuThread thread;
729     int ret;
730     int64_t start_time;
731 
732     /*
733      * If the dirty rate is already being measured, don't attempt to start.
734      */
735     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
736         error_setg(errp, "the dirty rate is already being measured.");
737         return;
738     }
739 
740     if (!is_sample_period_valid(calc_time)) {
741         error_setg(errp, "calc-time is out of range[%d, %d].",
742                          MIN_FETCH_DIRTYRATE_TIME_SEC,
743                          MAX_FETCH_DIRTYRATE_TIME_SEC);
744         return;
745     }
746 
747     if (!has_mode) {
748         mode =  DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
749     }
750 
751     if (has_sample_pages && mode != DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
752         error_setg(errp, "sample-pages is used only in page-sampling mode");
753         return;
754     }
755 
756     if (has_sample_pages) {
757         if (!is_sample_pages_valid(sample_pages)) {
758             error_setg(errp, "sample-pages is out of range[%d, %d].",
759                             MIN_SAMPLE_PAGE_COUNT,
760                             MAX_SAMPLE_PAGE_COUNT);
761             return;
762         }
763     } else {
764         sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
765     }
766 
767     /*
768      * dirty ring mode only works when kvm dirty ring is enabled.
769      * on the contrary, dirty bitmap mode is not.
770      */
771     if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) &&
772         !kvm_dirty_ring_enabled()) ||
773         ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) &&
774          kvm_dirty_ring_enabled())) {
775         error_setg(errp, "mode %s is not enabled, use other method instead.",
776                          DirtyRateMeasureMode_str(mode));
777          return;
778     }
779 
780     /*
781      * Init calculation state as unstarted.
782      */
783     ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
784                               DIRTY_RATE_STATUS_UNSTARTED);
785     if (ret == -1) {
786         error_setg(errp, "init dirty rate calculation state failed.");
787         return;
788     }
789 
790     config.sample_period_seconds = calc_time;
791     config.sample_pages_per_gigabytes = sample_pages;
792     config.mode = mode;
793 
794     cleanup_dirtyrate_stat(config);
795 
796     /*
797      * update dirty rate mode so that we can figure out what mode has
798      * been used in last calculation
799      **/
800     dirtyrate_mode = mode;
801 
802     start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
803     init_dirtyrate_stat(start_time, config);
804 
805     qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
806                        (void *)&config, QEMU_THREAD_DETACHED);
807 }
808 
809 struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp)
810 {
811     return query_dirty_rate_info();
812 }
813 
814 void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict)
815 {
816     DirtyRateInfo *info = query_dirty_rate_info();
817 
818     monitor_printf(mon, "Status: %s\n",
819                    DirtyRateStatus_str(info->status));
820     monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n",
821                    info->start_time);
822     if (info->mode == DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
823         monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n",
824                        info->sample_pages);
825     }
826     monitor_printf(mon, "Period: %"PRIi64" (sec)\n",
827                    info->calc_time);
828     monitor_printf(mon, "Mode: %s\n",
829                    DirtyRateMeasureMode_str(info->mode));
830     monitor_printf(mon, "Dirty rate: ");
831     if (info->has_dirty_rate) {
832         monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate);
833         if (info->has_vcpu_dirty_rate) {
834             DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate;
835             for (rate = head; rate != NULL; rate = rate->next) {
836                 monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64
837                                " (MB/s)\n", rate->value->id,
838                                rate->value->dirty_rate);
839             }
840         }
841     } else {
842         monitor_printf(mon, "(not ready)\n");
843     }
844 
845     qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate);
846     g_free(info);
847 }
848 
849 void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict)
850 {
851     int64_t sec = qdict_get_try_int(qdict, "second", 0);
852     int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1);
853     bool has_sample_pages = (sample_pages != -1);
854     bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false);
855     bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false);
856     DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
857     Error *err = NULL;
858 
859     if (!sec) {
860         monitor_printf(mon, "Incorrect period length specified!\n");
861         return;
862     }
863 
864     if (dirty_ring && dirty_bitmap) {
865         monitor_printf(mon, "Either dirty ring or dirty bitmap "
866                        "can be specified!\n");
867         return;
868     }
869 
870     if (dirty_bitmap) {
871         mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP;
872     } else if (dirty_ring) {
873         mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING;
874     }
875 
876     qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, true,
877                         mode, &err);
878     if (err) {
879         hmp_handle_error(mon, err);
880         return;
881     }
882 
883     monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64
884                    " seconds\n", sec);
885     monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n");
886 }
887