xref: /openbmc/qemu/system/cpu-timers.c (revision 9e6190ae)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "qemu/cutils.h"
27 #include "migration/vmstate.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
30 #include "sysemu/cpus.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/option.h"
33 #include "qemu/seqlock.h"
34 #include "sysemu/replay.h"
35 #include "sysemu/runstate.h"
36 #include "hw/core/cpu.h"
37 #include "sysemu/cpu-timers.h"
38 #include "sysemu/cpu-throttle.h"
39 #include "sysemu/cpu-timers-internal.h"
40 
41 /* clock and ticks */
42 
43 static int64_t cpu_get_ticks_locked(void)
44 {
45     int64_t ticks = timers_state.cpu_ticks_offset;
46     if (timers_state.cpu_ticks_enabled) {
47         ticks += cpu_get_host_ticks();
48     }
49 
50     if (timers_state.cpu_ticks_prev > ticks) {
51         /* Non increasing ticks may happen if the host uses software suspend. */
52         timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
53         ticks = timers_state.cpu_ticks_prev;
54     }
55 
56     timers_state.cpu_ticks_prev = ticks;
57     return ticks;
58 }
59 
60 /*
61  * return the time elapsed in VM between vm_start and vm_stop.
62  * cpu_get_ticks() uses units of the host CPU cycle counter.
63  */
64 int64_t cpu_get_ticks(void)
65 {
66     int64_t ticks;
67 
68     qemu_spin_lock(&timers_state.vm_clock_lock);
69     ticks = cpu_get_ticks_locked();
70     qemu_spin_unlock(&timers_state.vm_clock_lock);
71     return ticks;
72 }
73 
74 int64_t cpu_get_clock_locked(void)
75 {
76     int64_t time;
77 
78     time = timers_state.cpu_clock_offset;
79     if (timers_state.cpu_ticks_enabled) {
80         time += get_clock();
81     }
82 
83     return time;
84 }
85 
86 /*
87  * Return the monotonic time elapsed in VM, i.e.,
88  * the time between vm_start and vm_stop
89  */
90 int64_t cpu_get_clock(void)
91 {
92     int64_t ti;
93     unsigned start;
94 
95     do {
96         start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
97         ti = cpu_get_clock_locked();
98     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
99 
100     return ti;
101 }
102 
103 /*
104  * enable cpu_get_ticks()
105  * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
106  */
107 void cpu_enable_ticks(void)
108 {
109     seqlock_write_lock(&timers_state.vm_clock_seqlock,
110                        &timers_state.vm_clock_lock);
111     if (!timers_state.cpu_ticks_enabled) {
112         timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
113         timers_state.cpu_clock_offset -= get_clock();
114         timers_state.cpu_ticks_enabled = 1;
115     }
116     seqlock_write_unlock(&timers_state.vm_clock_seqlock,
117                        &timers_state.vm_clock_lock);
118 }
119 
120 /*
121  * disable cpu_get_ticks() : the clock is stopped. You must not call
122  * cpu_get_ticks() after that.
123  * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
124  */
125 void cpu_disable_ticks(void)
126 {
127     seqlock_write_lock(&timers_state.vm_clock_seqlock,
128                        &timers_state.vm_clock_lock);
129     if (timers_state.cpu_ticks_enabled) {
130         timers_state.cpu_ticks_offset += cpu_get_host_ticks();
131         timers_state.cpu_clock_offset = cpu_get_clock_locked();
132         timers_state.cpu_ticks_enabled = 0;
133     }
134     seqlock_write_unlock(&timers_state.vm_clock_seqlock,
135                          &timers_state.vm_clock_lock);
136 }
137 
138 static bool icount_state_needed(void *opaque)
139 {
140     return icount_enabled();
141 }
142 
143 static bool warp_timer_state_needed(void *opaque)
144 {
145     TimersState *s = opaque;
146     return s->icount_warp_timer != NULL;
147 }
148 
149 static bool adjust_timers_state_needed(void *opaque)
150 {
151     TimersState *s = opaque;
152     return s->icount_rt_timer != NULL;
153 }
154 
155 static bool icount_shift_state_needed(void *opaque)
156 {
157     return icount_enabled() == ICOUNT_ADAPTATIVE;
158 }
159 
160 /*
161  * Subsection for warp timer migration is optional, because may not be created
162  */
163 static const VMStateDescription icount_vmstate_warp_timer = {
164     .name = "timer/icount/warp_timer",
165     .version_id = 1,
166     .minimum_version_id = 1,
167     .needed = warp_timer_state_needed,
168     .fields = (const VMStateField[]) {
169         VMSTATE_INT64(vm_clock_warp_start, TimersState),
170         VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
171         VMSTATE_END_OF_LIST()
172     }
173 };
174 
175 static const VMStateDescription icount_vmstate_adjust_timers = {
176     .name = "timer/icount/timers",
177     .version_id = 1,
178     .minimum_version_id = 1,
179     .needed = adjust_timers_state_needed,
180     .fields = (const VMStateField[]) {
181         VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
182         VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
183         VMSTATE_END_OF_LIST()
184     }
185 };
186 
187 static const VMStateDescription icount_vmstate_shift = {
188     .name = "timer/icount/shift",
189     .version_id = 2,
190     .minimum_version_id = 2,
191     .needed = icount_shift_state_needed,
192     .fields = (const VMStateField[]) {
193         VMSTATE_INT16(icount_time_shift, TimersState),
194         VMSTATE_INT64(last_delta, TimersState),
195         VMSTATE_END_OF_LIST()
196     }
197 };
198 
199 /*
200  * This is a subsection for icount migration.
201  */
202 static const VMStateDescription icount_vmstate_timers = {
203     .name = "timer/icount",
204     .version_id = 1,
205     .minimum_version_id = 1,
206     .needed = icount_state_needed,
207     .fields = (const VMStateField[]) {
208         VMSTATE_INT64(qemu_icount_bias, TimersState),
209         VMSTATE_INT64(qemu_icount, TimersState),
210         VMSTATE_END_OF_LIST()
211     },
212     .subsections = (const VMStateDescription * const []) {
213         &icount_vmstate_warp_timer,
214         &icount_vmstate_adjust_timers,
215         &icount_vmstate_shift,
216         NULL
217     }
218 };
219 
220 static const VMStateDescription vmstate_timers = {
221     .name = "timer",
222     .version_id = 2,
223     .minimum_version_id = 1,
224     .fields = (const VMStateField[]) {
225         VMSTATE_INT64(cpu_ticks_offset, TimersState),
226         VMSTATE_UNUSED(8),
227         VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
228         VMSTATE_END_OF_LIST()
229     },
230     .subsections = (const VMStateDescription * const []) {
231         &icount_vmstate_timers,
232         NULL
233     }
234 };
235 
236 static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
237 {
238 }
239 
240 void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
241 {
242     if (!icount_enabled() || type != QEMU_CLOCK_VIRTUAL) {
243         qemu_notify_event();
244         return;
245     }
246 
247     if (qemu_in_vcpu_thread()) {
248         /*
249          * A CPU is currently running; kick it back out to the
250          * tcg_cpu_exec() loop so it will recalculate its
251          * icount deadline immediately.
252          */
253         qemu_cpu_kick(current_cpu);
254     } else if (first_cpu) {
255         /*
256          * qemu_cpu_kick is not enough to kick a halted CPU out of
257          * qemu_tcg_wait_io_event.  async_run_on_cpu, instead,
258          * causes cpu_thread_is_idle to return false.  This way,
259          * handle_icount_deadline can run.
260          * If we have no CPUs at all for some reason, we don't
261          * need to do anything.
262          */
263         async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
264     }
265 }
266 
267 TimersState timers_state;
268 
269 /* initialize timers state and the cpu throttle for convenience */
270 void cpu_timers_init(void)
271 {
272     seqlock_init(&timers_state.vm_clock_seqlock);
273     qemu_spin_init(&timers_state.vm_clock_lock);
274     vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
275 
276     cpu_throttle_init();
277 }
278