xref: /openbmc/qemu/migration/cpu-throttle.c (revision 52ac968a)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "qemu/thread.h"
27 #include "hw/core/cpu.h"
28 #include "qemu/main-loop.h"
29 #include "sysemu/cpus.h"
30 #include "sysemu/cpu-throttle.h"
31 #include "migration.h"
32 #include "migration-stats.h"
33 #include "trace.h"
34 
35 /* vcpu throttling controls */
36 static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
37 static unsigned int throttle_percentage;
38 static bool throttle_dirty_sync_timer_active;
39 static uint64_t throttle_dirty_sync_count_prev;
40 
41 #define CPU_THROTTLE_PCT_MIN 1
42 #define CPU_THROTTLE_PCT_MAX 99
43 #define CPU_THROTTLE_TIMESLICE_NS 10000000
44 
45 /* Making sure RAMBlock dirty bitmap is synchronized every five seconds */
46 #define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000
47 
cpu_throttle_thread(CPUState * cpu,run_on_cpu_data opaque)48 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
49 {
50     double pct;
51     double throttle_ratio;
52     int64_t sleeptime_ns, endtime_ns;
53 
54     if (!cpu_throttle_get_percentage()) {
55         return;
56     }
57 
58     pct = (double)cpu_throttle_get_percentage() / 100;
59     throttle_ratio = pct / (1 - pct);
60     /* Add 1ns to fix double's rounding error (like 0.9999999...) */
61     sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
62     endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
63     while (sleeptime_ns > 0 && !cpu->stop) {
64         if (sleeptime_ns > SCALE_MS) {
65             qemu_cond_timedwait_bql(cpu->halt_cond,
66                                          sleeptime_ns / SCALE_MS);
67         } else {
68             bql_unlock();
69             g_usleep(sleeptime_ns / SCALE_US);
70             bql_lock();
71         }
72         sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
73     }
74     qatomic_set(&cpu->throttle_thread_scheduled, 0);
75 }
76 
cpu_throttle_timer_tick(void * opaque)77 static void cpu_throttle_timer_tick(void *opaque)
78 {
79     CPUState *cpu;
80     double pct;
81 
82     /* Stop the timer if needed */
83     if (!cpu_throttle_get_percentage()) {
84         return;
85     }
86     CPU_FOREACH(cpu) {
87         if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
88             async_run_on_cpu(cpu, cpu_throttle_thread,
89                              RUN_ON_CPU_NULL);
90         }
91     }
92 
93     pct = (double)cpu_throttle_get_percentage() / 100;
94     timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
95                                    CPU_THROTTLE_TIMESLICE_NS / (1 - pct));
96 }
97 
cpu_throttle_set(int new_throttle_pct)98 void cpu_throttle_set(int new_throttle_pct)
99 {
100     /*
101      * boolean to store whether throttle is already active or not,
102      * before modifying throttle_percentage
103      */
104     bool throttle_active = cpu_throttle_active();
105 
106     trace_cpu_throttle_set(new_throttle_pct);
107 
108     /* Ensure throttle percentage is within valid range */
109     new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
110     new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
111 
112     qatomic_set(&throttle_percentage, new_throttle_pct);
113 
114     if (!throttle_active) {
115         cpu_throttle_timer_tick(NULL);
116     }
117 }
118 
cpu_throttle_stop(void)119 void cpu_throttle_stop(void)
120 {
121     qatomic_set(&throttle_percentage, 0);
122     cpu_throttle_dirty_sync_timer(false);
123 }
124 
cpu_throttle_active(void)125 bool cpu_throttle_active(void)
126 {
127     return (cpu_throttle_get_percentage() != 0);
128 }
129 
cpu_throttle_get_percentage(void)130 int cpu_throttle_get_percentage(void)
131 {
132     return qatomic_read(&throttle_percentage);
133 }
134 
cpu_throttle_dirty_sync_timer_tick(void * opaque)135 void cpu_throttle_dirty_sync_timer_tick(void *opaque)
136 {
137     uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
138 
139     /*
140      * The first iteration copies all memory anyhow and has no
141      * effect on guest performance, therefore omit it to avoid
142      * paying extra for the sync penalty.
143      */
144     if (sync_cnt <= 1) {
145         goto end;
146     }
147 
148     if (sync_cnt == throttle_dirty_sync_count_prev) {
149         trace_cpu_throttle_dirty_sync();
150         WITH_RCU_READ_LOCK_GUARD() {
151             migration_bitmap_sync_precopy(false);
152         }
153     }
154 
155 end:
156     throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count);
157 
158     timer_mod(throttle_dirty_sync_timer,
159         qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
160             CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
161 }
162 
cpu_throttle_dirty_sync_active(void)163 static bool cpu_throttle_dirty_sync_active(void)
164 {
165     return qatomic_read(&throttle_dirty_sync_timer_active);
166 }
167 
cpu_throttle_dirty_sync_timer(bool enable)168 void cpu_throttle_dirty_sync_timer(bool enable)
169 {
170     assert(throttle_dirty_sync_timer);
171 
172     if (enable) {
173         if (!cpu_throttle_dirty_sync_active()) {
174             /*
175              * Always reset the dirty sync count cache, in case migration
176              * was cancelled once.
177              */
178             throttle_dirty_sync_count_prev = 0;
179             timer_mod(throttle_dirty_sync_timer,
180                 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
181                     CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
182             qatomic_set(&throttle_dirty_sync_timer_active, 1);
183         }
184     } else {
185         if (cpu_throttle_dirty_sync_active()) {
186             timer_del(throttle_dirty_sync_timer);
187             qatomic_set(&throttle_dirty_sync_timer_active, 0);
188         }
189     }
190 }
191 
cpu_throttle_init(void)192 void cpu_throttle_init(void)
193 {
194     throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
195                                   cpu_throttle_timer_tick, NULL);
196     throttle_dirty_sync_timer =
197         timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
198                      cpu_throttle_dirty_sync_timer_tick, NULL);
199 }
200