xref: /openbmc/qemu/system/cpu-timers.c (revision 4f752191)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "qemu/cutils.h"
27 #include "migration/vmstate.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
30 #include "sysemu/cpus.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/option.h"
33 #include "qemu/seqlock.h"
34 #include "sysemu/replay.h"
35 #include "sysemu/runstate.h"
36 #include "hw/core/cpu.h"
37 #include "sysemu/cpu-timers.h"
38 #include "sysemu/cpu-timers-internal.h"
39 
40 /* clock and ticks */
41 
42 static int64_t cpu_get_ticks_locked(void)
43 {
44     int64_t ticks = timers_state.cpu_ticks_offset;
45     if (timers_state.cpu_ticks_enabled) {
46         ticks += cpu_get_host_ticks();
47     }
48 
49     if (timers_state.cpu_ticks_prev > ticks) {
50         /* Non increasing ticks may happen if the host uses software suspend. */
51         timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
52         ticks = timers_state.cpu_ticks_prev;
53     }
54 
55     timers_state.cpu_ticks_prev = ticks;
56     return ticks;
57 }
58 
59 /*
60  * return the time elapsed in VM between vm_start and vm_stop.
61  * cpu_get_ticks() uses units of the host CPU cycle counter.
62  */
63 int64_t cpu_get_ticks(void)
64 {
65     int64_t ticks;
66 
67     qemu_spin_lock(&timers_state.vm_clock_lock);
68     ticks = cpu_get_ticks_locked();
69     qemu_spin_unlock(&timers_state.vm_clock_lock);
70     return ticks;
71 }
72 
73 int64_t cpu_get_clock_locked(void)
74 {
75     int64_t time;
76 
77     time = timers_state.cpu_clock_offset;
78     if (timers_state.cpu_ticks_enabled) {
79         time += get_clock();
80     }
81 
82     return time;
83 }
84 
85 /*
86  * Return the monotonic time elapsed in VM, i.e.,
87  * the time between vm_start and vm_stop
88  */
89 int64_t cpu_get_clock(void)
90 {
91     int64_t ti;
92     unsigned start;
93 
94     do {
95         start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
96         ti = cpu_get_clock_locked();
97     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
98 
99     return ti;
100 }
101 
102 /*
103  * enable cpu_get_ticks()
104  * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
105  */
106 void cpu_enable_ticks(void)
107 {
108     seqlock_write_lock(&timers_state.vm_clock_seqlock,
109                        &timers_state.vm_clock_lock);
110     if (!timers_state.cpu_ticks_enabled) {
111         timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
112         timers_state.cpu_clock_offset -= get_clock();
113         timers_state.cpu_ticks_enabled = 1;
114     }
115     seqlock_write_unlock(&timers_state.vm_clock_seqlock,
116                        &timers_state.vm_clock_lock);
117 }
118 
119 /*
120  * disable cpu_get_ticks() : the clock is stopped. You must not call
121  * cpu_get_ticks() after that.
122  * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
123  */
124 void cpu_disable_ticks(void)
125 {
126     seqlock_write_lock(&timers_state.vm_clock_seqlock,
127                        &timers_state.vm_clock_lock);
128     if (timers_state.cpu_ticks_enabled) {
129         timers_state.cpu_ticks_offset += cpu_get_host_ticks();
130         timers_state.cpu_clock_offset = cpu_get_clock_locked();
131         timers_state.cpu_ticks_enabled = 0;
132     }
133     seqlock_write_unlock(&timers_state.vm_clock_seqlock,
134                          &timers_state.vm_clock_lock);
135 }
136 
137 static bool icount_state_needed(void *opaque)
138 {
139     return icount_enabled();
140 }
141 
142 static bool warp_timer_state_needed(void *opaque)
143 {
144     TimersState *s = opaque;
145     return s->icount_warp_timer != NULL;
146 }
147 
148 static bool adjust_timers_state_needed(void *opaque)
149 {
150     TimersState *s = opaque;
151     return s->icount_rt_timer != NULL;
152 }
153 
154 static bool icount_shift_state_needed(void *opaque)
155 {
156     return icount_enabled() == ICOUNT_ADAPTATIVE;
157 }
158 
159 /*
160  * Subsection for warp timer migration is optional, because may not be created
161  */
162 static const VMStateDescription icount_vmstate_warp_timer = {
163     .name = "timer/icount/warp_timer",
164     .version_id = 1,
165     .minimum_version_id = 1,
166     .needed = warp_timer_state_needed,
167     .fields = (const VMStateField[]) {
168         VMSTATE_INT64(vm_clock_warp_start, TimersState),
169         VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
170         VMSTATE_END_OF_LIST()
171     }
172 };
173 
174 static const VMStateDescription icount_vmstate_adjust_timers = {
175     .name = "timer/icount/timers",
176     .version_id = 1,
177     .minimum_version_id = 1,
178     .needed = adjust_timers_state_needed,
179     .fields = (const VMStateField[]) {
180         VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
181         VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
182         VMSTATE_END_OF_LIST()
183     }
184 };
185 
186 static const VMStateDescription icount_vmstate_shift = {
187     .name = "timer/icount/shift",
188     .version_id = 2,
189     .minimum_version_id = 2,
190     .needed = icount_shift_state_needed,
191     .fields = (const VMStateField[]) {
192         VMSTATE_INT16(icount_time_shift, TimersState),
193         VMSTATE_INT64(last_delta, TimersState),
194         VMSTATE_END_OF_LIST()
195     }
196 };
197 
198 /*
199  * This is a subsection for icount migration.
200  */
201 static const VMStateDescription icount_vmstate_timers = {
202     .name = "timer/icount",
203     .version_id = 1,
204     .minimum_version_id = 1,
205     .needed = icount_state_needed,
206     .fields = (const VMStateField[]) {
207         VMSTATE_INT64(qemu_icount_bias, TimersState),
208         VMSTATE_INT64(qemu_icount, TimersState),
209         VMSTATE_END_OF_LIST()
210     },
211     .subsections = (const VMStateDescription * const []) {
212         &icount_vmstate_warp_timer,
213         &icount_vmstate_adjust_timers,
214         &icount_vmstate_shift,
215         NULL
216     }
217 };
218 
219 static const VMStateDescription vmstate_timers = {
220     .name = "timer",
221     .version_id = 2,
222     .minimum_version_id = 1,
223     .fields = (const VMStateField[]) {
224         VMSTATE_INT64(cpu_ticks_offset, TimersState),
225         VMSTATE_UNUSED(8),
226         VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
227         VMSTATE_END_OF_LIST()
228     },
229     .subsections = (const VMStateDescription * const []) {
230         &icount_vmstate_timers,
231         NULL
232     }
233 };
234 
235 static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
236 {
237 }
238 
239 void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
240 {
241     if (!icount_enabled() || type != QEMU_CLOCK_VIRTUAL) {
242         qemu_notify_event();
243         return;
244     }
245 
246     if (qemu_in_vcpu_thread()) {
247         /*
248          * A CPU is currently running; kick it back out to the
249          * tcg_cpu_exec() loop so it will recalculate its
250          * icount deadline immediately.
251          */
252         qemu_cpu_kick(current_cpu);
253     } else if (first_cpu) {
254         /*
255          * qemu_cpu_kick is not enough to kick a halted CPU out of
256          * qemu_tcg_wait_io_event.  async_run_on_cpu, instead,
257          * causes cpu_thread_is_idle to return false.  This way,
258          * handle_icount_deadline can run.
259          * If we have no CPUs at all for some reason, we don't
260          * need to do anything.
261          */
262         async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
263     }
264 }
265 
266 TimersState timers_state;
267 
268 /* initialize timers state and the cpu throttle for convenience */
269 void cpu_timers_init(void)
270 {
271     seqlock_init(&timers_state.vm_clock_seqlock);
272     qemu_spin_init(&timers_state.vm_clock_lock);
273     vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
274 }
275