1 /* 2 * Dirtyrate implement code 3 * 4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD. 5 * 6 * Authors: 7 * Chuan Zheng <zhengchuan@huawei.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 */ 12 13 #include <zlib.h> 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "cpu.h" 17 #include "qemu/config-file.h" 18 #include "exec/memory.h" 19 #include "exec/ramblock.h" 20 #include "exec/target_page.h" 21 #include "qemu/rcu_queue.h" 22 #include "qapi/qapi-commands-migration.h" 23 #include "migration.h" 24 #include "ram.h" 25 #include "trace.h" 26 #include "dirtyrate.h" 27 28 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED; 29 static struct DirtyRateStat DirtyStat; 30 31 static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) 32 { 33 int64_t current_time; 34 35 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 36 if ((current_time - initial_time) >= msec) { 37 msec = current_time - initial_time; 38 } else { 39 g_usleep((msec + initial_time - current_time) * 1000); 40 } 41 42 return msec; 43 } 44 45 static bool is_sample_period_valid(int64_t sec) 46 { 47 if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC || 48 sec > MAX_FETCH_DIRTYRATE_TIME_SEC) { 49 return false; 50 } 51 52 return true; 53 } 54 55 static int dirtyrate_set_state(int *state, int old_state, int new_state) 56 { 57 assert(new_state < DIRTY_RATE_STATUS__MAX); 58 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state)); 59 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 60 return 0; 61 } else { 62 return -1; 63 } 64 } 65 66 static struct DirtyRateInfo *query_dirty_rate_info(void) 67 { 68 int64_t dirty_rate = DirtyStat.dirty_rate; 69 struct DirtyRateInfo *info = g_malloc0(sizeof(DirtyRateInfo)); 70 71 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) { 72 info->has_dirty_rate = true; 73 info->dirty_rate = dirty_rate; 74 } 75 76 info->status = CalculatingState; 77 info->start_time = DirtyStat.start_time; 78 info->calc_time = DirtyStat.calc_time; 79 80 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState)); 81 82 return info; 83 } 84 85 static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time) 86 { 87 DirtyStat.total_dirty_samples = 0; 88 DirtyStat.total_sample_count = 0; 89 DirtyStat.total_block_mem_MB = 0; 90 DirtyStat.dirty_rate = -1; 91 DirtyStat.start_time = start_time; 92 DirtyStat.calc_time = calc_time; 93 } 94 95 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info) 96 { 97 DirtyStat.total_dirty_samples += info->sample_dirty_count; 98 DirtyStat.total_sample_count += info->sample_pages_count; 99 /* size of total pages in MB */ 100 DirtyStat.total_block_mem_MB += (info->ramblock_pages * 101 TARGET_PAGE_SIZE) >> 20; 102 } 103 104 static void update_dirtyrate(uint64_t msec) 105 { 106 uint64_t dirtyrate; 107 uint64_t total_dirty_samples = DirtyStat.total_dirty_samples; 108 uint64_t total_sample_count = DirtyStat.total_sample_count; 109 uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB; 110 111 dirtyrate = total_dirty_samples * total_block_mem_MB * 112 1000 / (total_sample_count * msec); 113 114 DirtyStat.dirty_rate = dirtyrate; 115 } 116 117 /* 118 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE 119 * in ramblock, which starts from ramblock base address. 120 */ 121 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info, 122 uint64_t vfn) 123 { 124 uint32_t crc; 125 126 crc = crc32(0, (info->ramblock_addr + 127 vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE); 128 129 trace_get_ramblock_vfn_hash(info->idstr, vfn, crc); 130 return crc; 131 } 132 133 static bool save_ramblock_hash(struct RamblockDirtyInfo *info) 134 { 135 unsigned int sample_pages_count; 136 int i; 137 GRand *rand; 138 139 sample_pages_count = info->sample_pages_count; 140 141 /* ramblock size less than one page, return success to skip this ramblock */ 142 if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) { 143 return true; 144 } 145 146 info->hash_result = g_try_malloc0_n(sample_pages_count, 147 sizeof(uint32_t)); 148 if (!info->hash_result) { 149 return false; 150 } 151 152 info->sample_page_vfn = g_try_malloc0_n(sample_pages_count, 153 sizeof(uint64_t)); 154 if (!info->sample_page_vfn) { 155 g_free(info->hash_result); 156 return false; 157 } 158 159 rand = g_rand_new(); 160 for (i = 0; i < sample_pages_count; i++) { 161 info->sample_page_vfn[i] = g_rand_int_range(rand, 0, 162 info->ramblock_pages - 1); 163 info->hash_result[i] = get_ramblock_vfn_hash(info, 164 info->sample_page_vfn[i]); 165 } 166 g_rand_free(rand); 167 168 return true; 169 } 170 171 static void get_ramblock_dirty_info(RAMBlock *block, 172 struct RamblockDirtyInfo *info, 173 struct DirtyRateConfig *config) 174 { 175 uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes; 176 177 /* Right shift 30 bits to calc ramblock size in GB */ 178 info->sample_pages_count = (qemu_ram_get_used_length(block) * 179 sample_pages_per_gigabytes) >> 30; 180 /* Right shift TARGET_PAGE_BITS to calc page count */ 181 info->ramblock_pages = qemu_ram_get_used_length(block) >> 182 TARGET_PAGE_BITS; 183 info->ramblock_addr = qemu_ram_get_host_addr(block); 184 strcpy(info->idstr, qemu_ram_get_idstr(block)); 185 } 186 187 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count) 188 { 189 int i; 190 191 if (!infos) { 192 return; 193 } 194 195 for (i = 0; i < count; i++) { 196 g_free(infos[i].sample_page_vfn); 197 g_free(infos[i].hash_result); 198 } 199 g_free(infos); 200 } 201 202 static bool skip_sample_ramblock(RAMBlock *block) 203 { 204 /* 205 * Sample only blocks larger than MIN_RAMBLOCK_SIZE. 206 */ 207 if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) { 208 trace_skip_sample_ramblock(block->idstr, 209 qemu_ram_get_used_length(block)); 210 return true; 211 } 212 213 return false; 214 } 215 216 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo, 217 struct DirtyRateConfig config, 218 int *block_count) 219 { 220 struct RamblockDirtyInfo *info = NULL; 221 struct RamblockDirtyInfo *dinfo = NULL; 222 RAMBlock *block = NULL; 223 int total_count = 0; 224 int index = 0; 225 bool ret = false; 226 227 RAMBLOCK_FOREACH_MIGRATABLE(block) { 228 if (skip_sample_ramblock(block)) { 229 continue; 230 } 231 total_count++; 232 } 233 234 dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo)); 235 if (dinfo == NULL) { 236 goto out; 237 } 238 239 RAMBLOCK_FOREACH_MIGRATABLE(block) { 240 if (skip_sample_ramblock(block)) { 241 continue; 242 } 243 if (index >= total_count) { 244 break; 245 } 246 info = &dinfo[index]; 247 get_ramblock_dirty_info(block, info, &config); 248 if (!save_ramblock_hash(info)) { 249 goto out; 250 } 251 index++; 252 } 253 ret = true; 254 255 out: 256 *block_count = index; 257 *block_dinfo = dinfo; 258 return ret; 259 } 260 261 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info) 262 { 263 uint32_t crc; 264 int i; 265 266 for (i = 0; i < info->sample_pages_count; i++) { 267 crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]); 268 if (crc != info->hash_result[i]) { 269 trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]); 270 info->sample_dirty_count++; 271 } 272 } 273 } 274 275 static struct RamblockDirtyInfo * 276 find_block_matched(RAMBlock *block, int count, 277 struct RamblockDirtyInfo *infos) 278 { 279 int i; 280 struct RamblockDirtyInfo *matched; 281 282 for (i = 0; i < count; i++) { 283 if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) { 284 break; 285 } 286 } 287 288 if (i == count) { 289 return NULL; 290 } 291 292 if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) || 293 infos[i].ramblock_pages != 294 (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) { 295 trace_find_page_matched(block->idstr); 296 return NULL; 297 } 298 299 matched = &infos[i]; 300 301 return matched; 302 } 303 304 static bool compare_page_hash_info(struct RamblockDirtyInfo *info, 305 int block_count) 306 { 307 struct RamblockDirtyInfo *block_dinfo = NULL; 308 RAMBlock *block = NULL; 309 310 RAMBLOCK_FOREACH_MIGRATABLE(block) { 311 if (skip_sample_ramblock(block)) { 312 continue; 313 } 314 block_dinfo = find_block_matched(block, block_count, info); 315 if (block_dinfo == NULL) { 316 continue; 317 } 318 calc_page_dirty_rate(block_dinfo); 319 update_dirtyrate_stat(block_dinfo); 320 } 321 322 if (DirtyStat.total_sample_count == 0) { 323 return false; 324 } 325 326 return true; 327 } 328 329 static void calculate_dirtyrate(struct DirtyRateConfig config) 330 { 331 struct RamblockDirtyInfo *block_dinfo = NULL; 332 int block_count = 0; 333 int64_t msec = 0; 334 int64_t initial_time; 335 336 rcu_register_thread(); 337 rcu_read_lock(); 338 initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 339 if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) { 340 goto out; 341 } 342 rcu_read_unlock(); 343 344 msec = config.sample_period_seconds * 1000; 345 msec = set_sample_page_period(msec, initial_time); 346 DirtyStat.start_time = initial_time / 1000; 347 DirtyStat.calc_time = msec / 1000; 348 349 rcu_read_lock(); 350 if (!compare_page_hash_info(block_dinfo, block_count)) { 351 goto out; 352 } 353 354 update_dirtyrate(msec); 355 356 out: 357 rcu_read_unlock(); 358 free_ramblock_dirty_info(block_dinfo, block_count); 359 rcu_unregister_thread(); 360 } 361 362 void *get_dirtyrate_thread(void *arg) 363 { 364 struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg; 365 int ret; 366 int64_t start_time; 367 int64_t calc_time; 368 369 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED, 370 DIRTY_RATE_STATUS_MEASURING); 371 if (ret == -1) { 372 error_report("change dirtyrate state failed."); 373 return NULL; 374 } 375 376 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; 377 calc_time = config.sample_period_seconds; 378 init_dirtyrate_stat(start_time, calc_time); 379 380 calculate_dirtyrate(config); 381 382 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING, 383 DIRTY_RATE_STATUS_MEASURED); 384 if (ret == -1) { 385 error_report("change dirtyrate state failed."); 386 } 387 return NULL; 388 } 389 390 void qmp_calc_dirty_rate(int64_t calc_time, Error **errp) 391 { 392 static struct DirtyRateConfig config; 393 QemuThread thread; 394 int ret; 395 396 /* 397 * If the dirty rate is already being measured, don't attempt to start. 398 */ 399 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) { 400 error_setg(errp, "the dirty rate is already being measured."); 401 return; 402 } 403 404 if (!is_sample_period_valid(calc_time)) { 405 error_setg(errp, "calc-time is out of range[%d, %d].", 406 MIN_FETCH_DIRTYRATE_TIME_SEC, 407 MAX_FETCH_DIRTYRATE_TIME_SEC); 408 return; 409 } 410 411 /* 412 * Init calculation state as unstarted. 413 */ 414 ret = dirtyrate_set_state(&CalculatingState, CalculatingState, 415 DIRTY_RATE_STATUS_UNSTARTED); 416 if (ret == -1) { 417 error_setg(errp, "init dirty rate calculation state failed."); 418 return; 419 } 420 421 config.sample_period_seconds = calc_time; 422 config.sample_pages_per_gigabytes = DIRTYRATE_DEFAULT_SAMPLE_PAGES; 423 qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread, 424 (void *)&config, QEMU_THREAD_DETACHED); 425 } 426 427 struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp) 428 { 429 return query_dirty_rate_info(); 430 } 431