xref: /openbmc/qemu/migration/dirtyrate.c (revision 7afa08cd)
1 /*
2  * Dirtyrate implement code
3  *
4  * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5  *
6  * Authors:
7  *  Chuan Zheng <zhengchuan@huawei.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include <zlib.h>
15 #include "qapi/error.h"
16 #include "cpu.h"
17 #include "exec/ramblock.h"
18 #include "qemu/rcu_queue.h"
19 #include "qapi/qapi-commands-migration.h"
20 #include "ram.h"
21 #include "trace.h"
22 #include "dirtyrate.h"
23 
24 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
25 static struct DirtyRateStat DirtyStat;
26 
27 static int64_t set_sample_page_period(int64_t msec, int64_t initial_time)
28 {
29     int64_t current_time;
30 
31     current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
32     if ((current_time - initial_time) >= msec) {
33         msec = current_time - initial_time;
34     } else {
35         g_usleep((msec + initial_time - current_time) * 1000);
36     }
37 
38     return msec;
39 }
40 
41 static bool is_sample_period_valid(int64_t sec)
42 {
43     if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC ||
44         sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
45         return false;
46     }
47 
48     return true;
49 }
50 
51 static bool is_sample_pages_valid(int64_t pages)
52 {
53     return pages >= MIN_SAMPLE_PAGE_COUNT &&
54            pages <= MAX_SAMPLE_PAGE_COUNT;
55 }
56 
57 static int dirtyrate_set_state(int *state, int old_state, int new_state)
58 {
59     assert(new_state < DIRTY_RATE_STATUS__MAX);
60     trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
61     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
62         return 0;
63     } else {
64         return -1;
65     }
66 }
67 
68 static struct DirtyRateInfo *query_dirty_rate_info(void)
69 {
70     int64_t dirty_rate = DirtyStat.dirty_rate;
71     struct DirtyRateInfo *info = g_malloc0(sizeof(DirtyRateInfo));
72 
73     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
74         info->has_dirty_rate = true;
75         info->dirty_rate = dirty_rate;
76     }
77 
78     info->status = CalculatingState;
79     info->start_time = DirtyStat.start_time;
80     info->calc_time = DirtyStat.calc_time;
81     info->sample_pages = DirtyStat.sample_pages;
82 
83     trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
84 
85     return info;
86 }
87 
88 static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time,
89                                 uint64_t sample_pages)
90 {
91     DirtyStat.total_dirty_samples = 0;
92     DirtyStat.total_sample_count = 0;
93     DirtyStat.total_block_mem_MB = 0;
94     DirtyStat.dirty_rate = -1;
95     DirtyStat.start_time = start_time;
96     DirtyStat.calc_time = calc_time;
97     DirtyStat.sample_pages = sample_pages;
98 }
99 
100 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
101 {
102     DirtyStat.total_dirty_samples += info->sample_dirty_count;
103     DirtyStat.total_sample_count += info->sample_pages_count;
104     /* size of total pages in MB */
105     DirtyStat.total_block_mem_MB += (info->ramblock_pages *
106                                      TARGET_PAGE_SIZE) >> 20;
107 }
108 
109 static void update_dirtyrate(uint64_t msec)
110 {
111     uint64_t dirtyrate;
112     uint64_t total_dirty_samples = DirtyStat.total_dirty_samples;
113     uint64_t total_sample_count = DirtyStat.total_sample_count;
114     uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB;
115 
116     dirtyrate = total_dirty_samples * total_block_mem_MB *
117                 1000 / (total_sample_count * msec);
118 
119     DirtyStat.dirty_rate = dirtyrate;
120 }
121 
122 /*
123  * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
124  * in ramblock, which starts from ramblock base address.
125  */
126 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
127                                       uint64_t vfn)
128 {
129     uint32_t crc;
130 
131     crc = crc32(0, (info->ramblock_addr +
132                 vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
133 
134     trace_get_ramblock_vfn_hash(info->idstr, vfn, crc);
135     return crc;
136 }
137 
138 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
139 {
140     unsigned int sample_pages_count;
141     int i;
142     GRand *rand;
143 
144     sample_pages_count = info->sample_pages_count;
145 
146     /* ramblock size less than one page, return success to skip this ramblock */
147     if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
148         return true;
149     }
150 
151     info->hash_result = g_try_malloc0_n(sample_pages_count,
152                                         sizeof(uint32_t));
153     if (!info->hash_result) {
154         return false;
155     }
156 
157     info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
158                                             sizeof(uint64_t));
159     if (!info->sample_page_vfn) {
160         g_free(info->hash_result);
161         return false;
162     }
163 
164     rand  = g_rand_new();
165     for (i = 0; i < sample_pages_count; i++) {
166         info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
167                                                     info->ramblock_pages - 1);
168         info->hash_result[i] = get_ramblock_vfn_hash(info,
169                                                      info->sample_page_vfn[i]);
170     }
171     g_rand_free(rand);
172 
173     return true;
174 }
175 
176 static void get_ramblock_dirty_info(RAMBlock *block,
177                                     struct RamblockDirtyInfo *info,
178                                     struct DirtyRateConfig *config)
179 {
180     uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
181 
182     /* Right shift 30 bits to calc ramblock size in GB */
183     info->sample_pages_count = (qemu_ram_get_used_length(block) *
184                                 sample_pages_per_gigabytes) >> 30;
185     /* Right shift TARGET_PAGE_BITS to calc page count */
186     info->ramblock_pages = qemu_ram_get_used_length(block) >>
187                            TARGET_PAGE_BITS;
188     info->ramblock_addr = qemu_ram_get_host_addr(block);
189     strcpy(info->idstr, qemu_ram_get_idstr(block));
190 }
191 
192 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
193 {
194     int i;
195 
196     if (!infos) {
197         return;
198     }
199 
200     for (i = 0; i < count; i++) {
201         g_free(infos[i].sample_page_vfn);
202         g_free(infos[i].hash_result);
203     }
204     g_free(infos);
205 }
206 
207 static bool skip_sample_ramblock(RAMBlock *block)
208 {
209     /*
210      * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
211      */
212     if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
213         trace_skip_sample_ramblock(block->idstr,
214                                    qemu_ram_get_used_length(block));
215         return true;
216     }
217 
218     return false;
219 }
220 
221 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
222                                       struct DirtyRateConfig config,
223                                       int *block_count)
224 {
225     struct RamblockDirtyInfo *info = NULL;
226     struct RamblockDirtyInfo *dinfo = NULL;
227     RAMBlock *block = NULL;
228     int total_count = 0;
229     int index = 0;
230     bool ret = false;
231 
232     RAMBLOCK_FOREACH_MIGRATABLE(block) {
233         if (skip_sample_ramblock(block)) {
234             continue;
235         }
236         total_count++;
237     }
238 
239     dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
240     if (dinfo == NULL) {
241         goto out;
242     }
243 
244     RAMBLOCK_FOREACH_MIGRATABLE(block) {
245         if (skip_sample_ramblock(block)) {
246             continue;
247         }
248         if (index >= total_count) {
249             break;
250         }
251         info = &dinfo[index];
252         get_ramblock_dirty_info(block, info, &config);
253         if (!save_ramblock_hash(info)) {
254             goto out;
255         }
256         index++;
257     }
258     ret = true;
259 
260 out:
261     *block_count = index;
262     *block_dinfo = dinfo;
263     return ret;
264 }
265 
266 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
267 {
268     uint32_t crc;
269     int i;
270 
271     for (i = 0; i < info->sample_pages_count; i++) {
272         crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
273         if (crc != info->hash_result[i]) {
274             trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]);
275             info->sample_dirty_count++;
276         }
277     }
278 }
279 
280 static struct RamblockDirtyInfo *
281 find_block_matched(RAMBlock *block, int count,
282                   struct RamblockDirtyInfo *infos)
283 {
284     int i;
285     struct RamblockDirtyInfo *matched;
286 
287     for (i = 0; i < count; i++) {
288         if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
289             break;
290         }
291     }
292 
293     if (i == count) {
294         return NULL;
295     }
296 
297     if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
298         infos[i].ramblock_pages !=
299             (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
300         trace_find_page_matched(block->idstr);
301         return NULL;
302     }
303 
304     matched = &infos[i];
305 
306     return matched;
307 }
308 
309 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
310                                   int block_count)
311 {
312     struct RamblockDirtyInfo *block_dinfo = NULL;
313     RAMBlock *block = NULL;
314 
315     RAMBLOCK_FOREACH_MIGRATABLE(block) {
316         if (skip_sample_ramblock(block)) {
317             continue;
318         }
319         block_dinfo = find_block_matched(block, block_count, info);
320         if (block_dinfo == NULL) {
321             continue;
322         }
323         calc_page_dirty_rate(block_dinfo);
324         update_dirtyrate_stat(block_dinfo);
325     }
326 
327     if (DirtyStat.total_sample_count == 0) {
328         return false;
329     }
330 
331     return true;
332 }
333 
334 static void calculate_dirtyrate(struct DirtyRateConfig config)
335 {
336     struct RamblockDirtyInfo *block_dinfo = NULL;
337     int block_count = 0;
338     int64_t msec = 0;
339     int64_t initial_time;
340 
341     rcu_register_thread();
342     rcu_read_lock();
343     initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
344     if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
345         goto out;
346     }
347     rcu_read_unlock();
348 
349     msec = config.sample_period_seconds * 1000;
350     msec = set_sample_page_period(msec, initial_time);
351     DirtyStat.start_time = initial_time / 1000;
352     DirtyStat.calc_time = msec / 1000;
353 
354     rcu_read_lock();
355     if (!compare_page_hash_info(block_dinfo, block_count)) {
356         goto out;
357     }
358 
359     update_dirtyrate(msec);
360 
361 out:
362     rcu_read_unlock();
363     free_ramblock_dirty_info(block_dinfo, block_count);
364     rcu_unregister_thread();
365 }
366 
367 void *get_dirtyrate_thread(void *arg)
368 {
369     struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
370     int ret;
371     int64_t start_time;
372     int64_t calc_time;
373     uint64_t sample_pages;
374 
375     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
376                               DIRTY_RATE_STATUS_MEASURING);
377     if (ret == -1) {
378         error_report("change dirtyrate state failed.");
379         return NULL;
380     }
381 
382     start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
383     calc_time = config.sample_period_seconds;
384     sample_pages = config.sample_pages_per_gigabytes;
385     init_dirtyrate_stat(start_time, calc_time, sample_pages);
386 
387     calculate_dirtyrate(config);
388 
389     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
390                               DIRTY_RATE_STATUS_MEASURED);
391     if (ret == -1) {
392         error_report("change dirtyrate state failed.");
393     }
394     return NULL;
395 }
396 
397 void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages,
398                          int64_t sample_pages, Error **errp)
399 {
400     static struct DirtyRateConfig config;
401     QemuThread thread;
402     int ret;
403 
404     /*
405      * If the dirty rate is already being measured, don't attempt to start.
406      */
407     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
408         error_setg(errp, "the dirty rate is already being measured.");
409         return;
410     }
411 
412     if (!is_sample_period_valid(calc_time)) {
413         error_setg(errp, "calc-time is out of range[%d, %d].",
414                          MIN_FETCH_DIRTYRATE_TIME_SEC,
415                          MAX_FETCH_DIRTYRATE_TIME_SEC);
416         return;
417     }
418 
419     if (has_sample_pages) {
420         if (!is_sample_pages_valid(sample_pages)) {
421             error_setg(errp, "sample-pages is out of range[%d, %d].",
422                             MIN_SAMPLE_PAGE_COUNT,
423                             MAX_SAMPLE_PAGE_COUNT);
424             return;
425         }
426     } else {
427         sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
428     }
429 
430     /*
431      * Init calculation state as unstarted.
432      */
433     ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
434                               DIRTY_RATE_STATUS_UNSTARTED);
435     if (ret == -1) {
436         error_setg(errp, "init dirty rate calculation state failed.");
437         return;
438     }
439 
440     config.sample_period_seconds = calc_time;
441     config.sample_pages_per_gigabytes = sample_pages;
442     qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
443                        (void *)&config, QEMU_THREAD_DETACHED);
444 }
445 
446 struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp)
447 {
448     return query_dirty_rate_info();
449 }
450