xref: /openbmc/qemu/migration/cpu-throttle.c (revision 52ac968a)
1d481cec7SHyman Huang /*
2d481cec7SHyman Huang  * QEMU System Emulator
3d481cec7SHyman Huang  *
4d481cec7SHyman Huang  * Copyright (c) 2003-2008 Fabrice Bellard
5d481cec7SHyman Huang  *
6d481cec7SHyman Huang  * Permission is hereby granted, free of charge, to any person obtaining a copy
7d481cec7SHyman Huang  * of this software and associated documentation files (the "Software"), to deal
8d481cec7SHyman Huang  * in the Software without restriction, including without limitation the rights
9d481cec7SHyman Huang  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10d481cec7SHyman Huang  * copies of the Software, and to permit persons to whom the Software is
11d481cec7SHyman Huang  * furnished to do so, subject to the following conditions:
12d481cec7SHyman Huang  *
13d481cec7SHyman Huang  * The above copyright notice and this permission notice shall be included in
14d481cec7SHyman Huang  * all copies or substantial portions of the Software.
15d481cec7SHyman Huang  *
16d481cec7SHyman Huang  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17d481cec7SHyman Huang  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18d481cec7SHyman Huang  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19d481cec7SHyman Huang  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20d481cec7SHyman Huang  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21d481cec7SHyman Huang  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22d481cec7SHyman Huang  * THE SOFTWARE.
23d481cec7SHyman Huang  */
24d481cec7SHyman Huang 
25d481cec7SHyman Huang #include "qemu/osdep.h"
26d481cec7SHyman Huang #include "qemu/thread.h"
27d481cec7SHyman Huang #include "hw/core/cpu.h"
28d481cec7SHyman Huang #include "qemu/main-loop.h"
29d481cec7SHyman Huang #include "sysemu/cpus.h"
30d481cec7SHyman Huang #include "sysemu/cpu-throttle.h"
31*52ac968aSHyman Huang #include "migration.h"
32*52ac968aSHyman Huang #include "migration-stats.h"
33d481cec7SHyman Huang #include "trace.h"
34d481cec7SHyman Huang 
35d481cec7SHyman Huang /* vcpu throttling controls */
36*52ac968aSHyman Huang static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
37d481cec7SHyman Huang static unsigned int throttle_percentage;
38*52ac968aSHyman Huang static bool throttle_dirty_sync_timer_active;
39*52ac968aSHyman Huang static uint64_t throttle_dirty_sync_count_prev;
40d481cec7SHyman Huang 
41d481cec7SHyman Huang #define CPU_THROTTLE_PCT_MIN 1
42d481cec7SHyman Huang #define CPU_THROTTLE_PCT_MAX 99
43d481cec7SHyman Huang #define CPU_THROTTLE_TIMESLICE_NS 10000000
44d481cec7SHyman Huang 
45*52ac968aSHyman Huang /* Making sure RAMBlock dirty bitmap is synchronized every five seconds */
46*52ac968aSHyman Huang #define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000
47*52ac968aSHyman Huang 
cpu_throttle_thread(CPUState * cpu,run_on_cpu_data opaque)48d481cec7SHyman Huang static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
49d481cec7SHyman Huang {
50d481cec7SHyman Huang     double pct;
51d481cec7SHyman Huang     double throttle_ratio;
52d481cec7SHyman Huang     int64_t sleeptime_ns, endtime_ns;
53d481cec7SHyman Huang 
54d481cec7SHyman Huang     if (!cpu_throttle_get_percentage()) {
55d481cec7SHyman Huang         return;
56d481cec7SHyman Huang     }
57d481cec7SHyman Huang 
58d481cec7SHyman Huang     pct = (double)cpu_throttle_get_percentage() / 100;
59d481cec7SHyman Huang     throttle_ratio = pct / (1 - pct);
60d481cec7SHyman Huang     /* Add 1ns to fix double's rounding error (like 0.9999999...) */
61d481cec7SHyman Huang     sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
62d481cec7SHyman Huang     endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
63d481cec7SHyman Huang     while (sleeptime_ns > 0 && !cpu->stop) {
64d481cec7SHyman Huang         if (sleeptime_ns > SCALE_MS) {
65d481cec7SHyman Huang             qemu_cond_timedwait_bql(cpu->halt_cond,
66d481cec7SHyman Huang                                          sleeptime_ns / SCALE_MS);
67d481cec7SHyman Huang         } else {
68d481cec7SHyman Huang             bql_unlock();
69d481cec7SHyman Huang             g_usleep(sleeptime_ns / SCALE_US);
70d481cec7SHyman Huang             bql_lock();
71d481cec7SHyman Huang         }
72d481cec7SHyman Huang         sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
73d481cec7SHyman Huang     }
74d481cec7SHyman Huang     qatomic_set(&cpu->throttle_thread_scheduled, 0);
75d481cec7SHyman Huang }
76d481cec7SHyman Huang 
cpu_throttle_timer_tick(void * opaque)77d481cec7SHyman Huang static void cpu_throttle_timer_tick(void *opaque)
78d481cec7SHyman Huang {
79d481cec7SHyman Huang     CPUState *cpu;
80d481cec7SHyman Huang     double pct;
81d481cec7SHyman Huang 
82d481cec7SHyman Huang     /* Stop the timer if needed */
83d481cec7SHyman Huang     if (!cpu_throttle_get_percentage()) {
84d481cec7SHyman Huang         return;
85d481cec7SHyman Huang     }
86d481cec7SHyman Huang     CPU_FOREACH(cpu) {
87d481cec7SHyman Huang         if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
88d481cec7SHyman Huang             async_run_on_cpu(cpu, cpu_throttle_thread,
89d481cec7SHyman Huang                              RUN_ON_CPU_NULL);
90d481cec7SHyman Huang         }
91d481cec7SHyman Huang     }
92d481cec7SHyman Huang 
93d481cec7SHyman Huang     pct = (double)cpu_throttle_get_percentage() / 100;
94d481cec7SHyman Huang     timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
95d481cec7SHyman Huang                                    CPU_THROTTLE_TIMESLICE_NS / (1 - pct));
96d481cec7SHyman Huang }
97d481cec7SHyman Huang 
cpu_throttle_set(int new_throttle_pct)98d481cec7SHyman Huang void cpu_throttle_set(int new_throttle_pct)
99d481cec7SHyman Huang {
100d481cec7SHyman Huang     /*
101d481cec7SHyman Huang      * boolean to store whether throttle is already active or not,
102d481cec7SHyman Huang      * before modifying throttle_percentage
103d481cec7SHyman Huang      */
104d481cec7SHyman Huang     bool throttle_active = cpu_throttle_active();
105d481cec7SHyman Huang 
106d481cec7SHyman Huang     trace_cpu_throttle_set(new_throttle_pct);
107d481cec7SHyman Huang 
108d481cec7SHyman Huang     /* Ensure throttle percentage is within valid range */
109d481cec7SHyman Huang     new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
110d481cec7SHyman Huang     new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
111d481cec7SHyman Huang 
112d481cec7SHyman Huang     qatomic_set(&throttle_percentage, new_throttle_pct);
113d481cec7SHyman Huang 
114d481cec7SHyman Huang     if (!throttle_active) {
115d481cec7SHyman Huang         cpu_throttle_timer_tick(NULL);
116d481cec7SHyman Huang     }
117d481cec7SHyman Huang }
118d481cec7SHyman Huang 
cpu_throttle_stop(void)119d481cec7SHyman Huang void cpu_throttle_stop(void)
120d481cec7SHyman Huang {
121d481cec7SHyman Huang     qatomic_set(&throttle_percentage, 0);
122*52ac968aSHyman Huang     cpu_throttle_dirty_sync_timer(false);
123d481cec7SHyman Huang }
124d481cec7SHyman Huang 
cpu_throttle_active(void)125d481cec7SHyman Huang bool cpu_throttle_active(void)
126d481cec7SHyman Huang {
127d481cec7SHyman Huang     return (cpu_throttle_get_percentage() != 0);
128d481cec7SHyman Huang }
129d481cec7SHyman Huang 
cpu_throttle_get_percentage(void)130d481cec7SHyman Huang int cpu_throttle_get_percentage(void)
131d481cec7SHyman Huang {
132d481cec7SHyman Huang     return qatomic_read(&throttle_percentage);
133d481cec7SHyman Huang }
134d481cec7SHyman Huang 
cpu_throttle_dirty_sync_timer_tick(void * opaque)135*52ac968aSHyman Huang void cpu_throttle_dirty_sync_timer_tick(void *opaque)
136*52ac968aSHyman Huang {
137*52ac968aSHyman Huang     uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
138*52ac968aSHyman Huang 
139*52ac968aSHyman Huang     /*
140*52ac968aSHyman Huang      * The first iteration copies all memory anyhow and has no
141*52ac968aSHyman Huang      * effect on guest performance, therefore omit it to avoid
142*52ac968aSHyman Huang      * paying extra for the sync penalty.
143*52ac968aSHyman Huang      */
144*52ac968aSHyman Huang     if (sync_cnt <= 1) {
145*52ac968aSHyman Huang         goto end;
146*52ac968aSHyman Huang     }
147*52ac968aSHyman Huang 
148*52ac968aSHyman Huang     if (sync_cnt == throttle_dirty_sync_count_prev) {
149*52ac968aSHyman Huang         trace_cpu_throttle_dirty_sync();
150*52ac968aSHyman Huang         WITH_RCU_READ_LOCK_GUARD() {
151*52ac968aSHyman Huang             migration_bitmap_sync_precopy(false);
152*52ac968aSHyman Huang         }
153*52ac968aSHyman Huang     }
154*52ac968aSHyman Huang 
155*52ac968aSHyman Huang end:
156*52ac968aSHyman Huang     throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count);
157*52ac968aSHyman Huang 
158*52ac968aSHyman Huang     timer_mod(throttle_dirty_sync_timer,
159*52ac968aSHyman Huang         qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
160*52ac968aSHyman Huang             CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
161*52ac968aSHyman Huang }
162*52ac968aSHyman Huang 
cpu_throttle_dirty_sync_active(void)163*52ac968aSHyman Huang static bool cpu_throttle_dirty_sync_active(void)
164*52ac968aSHyman Huang {
165*52ac968aSHyman Huang     return qatomic_read(&throttle_dirty_sync_timer_active);
166*52ac968aSHyman Huang }
167*52ac968aSHyman Huang 
cpu_throttle_dirty_sync_timer(bool enable)168*52ac968aSHyman Huang void cpu_throttle_dirty_sync_timer(bool enable)
169*52ac968aSHyman Huang {
170*52ac968aSHyman Huang     assert(throttle_dirty_sync_timer);
171*52ac968aSHyman Huang 
172*52ac968aSHyman Huang     if (enable) {
173*52ac968aSHyman Huang         if (!cpu_throttle_dirty_sync_active()) {
174*52ac968aSHyman Huang             /*
175*52ac968aSHyman Huang              * Always reset the dirty sync count cache, in case migration
176*52ac968aSHyman Huang              * was cancelled once.
177*52ac968aSHyman Huang              */
178*52ac968aSHyman Huang             throttle_dirty_sync_count_prev = 0;
179*52ac968aSHyman Huang             timer_mod(throttle_dirty_sync_timer,
180*52ac968aSHyman Huang                 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
181*52ac968aSHyman Huang                     CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
182*52ac968aSHyman Huang             qatomic_set(&throttle_dirty_sync_timer_active, 1);
183*52ac968aSHyman Huang         }
184*52ac968aSHyman Huang     } else {
185*52ac968aSHyman Huang         if (cpu_throttle_dirty_sync_active()) {
186*52ac968aSHyman Huang             timer_del(throttle_dirty_sync_timer);
187*52ac968aSHyman Huang             qatomic_set(&throttle_dirty_sync_timer_active, 0);
188*52ac968aSHyman Huang         }
189*52ac968aSHyman Huang     }
190*52ac968aSHyman Huang }
191*52ac968aSHyman Huang 
cpu_throttle_init(void)192d481cec7SHyman Huang void cpu_throttle_init(void)
193d481cec7SHyman Huang {
194d481cec7SHyman Huang     throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
195d481cec7SHyman Huang                                   cpu_throttle_timer_tick, NULL);
196*52ac968aSHyman Huang     throttle_dirty_sync_timer =
197*52ac968aSHyman Huang         timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
198*52ac968aSHyman Huang                      cpu_throttle_dirty_sync_timer_tick, NULL);
199d481cec7SHyman Huang }
200