1 /*
2 * Copyright 2008 IBM Corporation
3 * 2008 Red Hat, Inc.
4 * Copyright 2011 Intel Corporation
5 * Copyright 2016 Veertu, Inc.
6 * Copyright 2017 The Android Open Source Project
7 *
8 * QEMU Hypervisor.framework support
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 *
22 * This file contain code under public domain from the hvdos project:
23 * https://github.com/mist64/hvdos
24 *
25 * Parts Copyright (c) 2011 NetApp, Inc.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 *
37 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * SUCH DAMAGE.
48 */
49
50 #include "qemu/osdep.h"
51 #include "qemu/guest-random.h"
52 #include "qemu/main-loop.h"
53 #include "qemu/queue.h"
54 #include "gdbstub/enums.h"
55 #include "exec/cpu-common.h"
56 #include "hw/core/cpu.h"
57 #include "accel/accel-cpu-ops.h"
58 #include "system/cpus.h"
59 #include "system/hvf.h"
60 #include "system/hvf_int.h"
61 #include <mach/mach_time.h>
62
63 HVFState *hvf_state;
64
65 /* Memory slots */
66
hvf_find_overlap_slot(uint64_t start,uint64_t size)67 hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
68 {
69 hvf_slot *slot;
70 int x;
71 for (x = 0; x < hvf_state->num_slots; ++x) {
72 slot = &hvf_state->slots[x];
73 if (slot->size && start < (slot->start + slot->size) &&
74 (start + size) > slot->start) {
75 return slot;
76 }
77 }
78 return NULL;
79 }
80
do_hvf_cpu_synchronize_state(CPUState * cpu,run_on_cpu_data arg)81 static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
82 {
83 if (!cpu->vcpu_dirty) {
84 hvf_get_registers(cpu);
85 cpu->vcpu_dirty = true;
86 }
87 }
88
hvf_cpu_synchronize_state(CPUState * cpu)89 static void hvf_cpu_synchronize_state(CPUState *cpu)
90 {
91 if (!cpu->vcpu_dirty) {
92 run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
93 }
94 }
95
do_hvf_cpu_synchronize_set_dirty(CPUState * cpu,run_on_cpu_data arg)96 static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
97 run_on_cpu_data arg)
98 {
99 /* QEMU state is the reference, push it to HVF now and on next entry */
100 cpu->vcpu_dirty = true;
101 }
102
hvf_cpu_synchronize_post_reset(CPUState * cpu)103 static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
104 {
105 run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
106 }
107
hvf_cpu_synchronize_post_init(CPUState * cpu)108 static void hvf_cpu_synchronize_post_init(CPUState *cpu)
109 {
110 run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
111 }
112
hvf_cpu_synchronize_pre_loadvm(CPUState * cpu)113 static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
114 {
115 run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
116 }
117
dummy_signal(int sig)118 static void dummy_signal(int sig)
119 {
120 }
121
do_hvf_get_vcpu_exec_time(CPUState * cpu,run_on_cpu_data arg)122 static void do_hvf_get_vcpu_exec_time(CPUState *cpu, run_on_cpu_data arg)
123 {
124 int r = hv_vcpu_get_exec_time(cpu->accel->fd, arg.host_ptr);
125 assert_hvf_ok(r);
126 }
127
hvf_vcpu_destroy(CPUState * cpu)128 static void hvf_vcpu_destroy(CPUState *cpu)
129 {
130 hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
131 assert_hvf_ok(ret);
132
133 hvf_arch_vcpu_destroy(cpu);
134 g_free(cpu->accel);
135 cpu->accel = NULL;
136 }
137
hvf_init_vcpu(CPUState * cpu)138 static int hvf_init_vcpu(CPUState *cpu)
139 {
140 int r;
141
142 cpu->accel = g_new0(AccelCPUState, 1);
143
144 /* init cpu signals */
145 struct sigaction sigact;
146
147 memset(&sigact, 0, sizeof(sigact));
148 sigact.sa_handler = dummy_signal;
149 sigaction(SIG_IPI, &sigact, NULL);
150
151 pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask);
152 sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI);
153
154 #ifdef __aarch64__
155 r = hv_vcpu_create(&cpu->accel->fd,
156 (hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
157 #else
158 r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT);
159 #endif
160 assert_hvf_ok(r);
161 cpu->vcpu_dirty = true;
162
163 cpu->accel->guest_debug_enabled = false;
164
165 return hvf_arch_init_vcpu(cpu);
166 }
167
168 /*
169 * The HVF-specific vCPU thread function. This one should only run when the host
170 * CPU supports the VMX "unrestricted guest" feature.
171 */
hvf_cpu_thread_fn(void * arg)172 static void *hvf_cpu_thread_fn(void *arg)
173 {
174 CPUState *cpu = arg;
175
176 int r;
177
178 assert(hvf_enabled());
179
180 rcu_register_thread();
181
182 bql_lock();
183 qemu_thread_get_self(cpu->thread);
184
185 cpu->thread_id = qemu_get_thread_id();
186 current_cpu = cpu;
187
188 hvf_init_vcpu(cpu);
189
190 /* signal CPU creation */
191 cpu_thread_signal_created(cpu);
192 qemu_guest_random_seed_thread_part2(cpu->random_seed);
193
194 do {
195 if (cpu_can_run(cpu)) {
196 r = hvf_vcpu_exec(cpu);
197 if (r == EXCP_DEBUG) {
198 cpu_handle_guest_debug(cpu);
199 }
200 }
201 qemu_wait_io_event(cpu);
202 } while (!cpu->unplug || cpu_can_run(cpu));
203
204 hvf_vcpu_destroy(cpu);
205 cpu_thread_signal_destroyed(cpu);
206 bql_unlock();
207 rcu_unregister_thread();
208 return NULL;
209 }
210
hvf_start_vcpu_thread(CPUState * cpu)211 static void hvf_start_vcpu_thread(CPUState *cpu)
212 {
213 char thread_name[VCPU_THREAD_NAME_SIZE];
214
215 /*
216 * HVF currently does not support TCG, and only runs in
217 * unrestricted-guest mode.
218 */
219 assert(hvf_enabled());
220
221 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
222 cpu->cpu_index);
223 qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
224 cpu, QEMU_THREAD_JOINABLE);
225 }
226
hvf_find_sw_breakpoint(CPUState * cpu,vaddr pc)227 struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
228 {
229 struct hvf_sw_breakpoint *bp;
230
231 QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
232 if (bp->pc == pc) {
233 return bp;
234 }
235 }
236 return NULL;
237 }
238
hvf_sw_breakpoints_active(CPUState * cpu)239 int hvf_sw_breakpoints_active(CPUState *cpu)
240 {
241 return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
242 }
243
do_hvf_update_guest_debug(CPUState * cpu,run_on_cpu_data arg)244 static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg)
245 {
246 hvf_arch_update_guest_debug(cpu);
247 }
248
hvf_update_guest_debug(CPUState * cpu)249 int hvf_update_guest_debug(CPUState *cpu)
250 {
251 run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL);
252 return 0;
253 }
254
hvf_insert_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)255 static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
256 {
257 struct hvf_sw_breakpoint *bp;
258 int err;
259
260 if (type == GDB_BREAKPOINT_SW) {
261 bp = hvf_find_sw_breakpoint(cpu, addr);
262 if (bp) {
263 bp->use_count++;
264 return 0;
265 }
266
267 bp = g_new(struct hvf_sw_breakpoint, 1);
268 bp->pc = addr;
269 bp->use_count = 1;
270 err = hvf_arch_insert_sw_breakpoint(cpu, bp);
271 if (err) {
272 g_free(bp);
273 return err;
274 }
275
276 QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry);
277 } else {
278 err = hvf_arch_insert_hw_breakpoint(addr, len, type);
279 if (err) {
280 return err;
281 }
282 }
283
284 CPU_FOREACH(cpu) {
285 err = hvf_update_guest_debug(cpu);
286 if (err) {
287 return err;
288 }
289 }
290 return 0;
291 }
292
hvf_remove_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)293 static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
294 {
295 struct hvf_sw_breakpoint *bp;
296 int err;
297
298 if (type == GDB_BREAKPOINT_SW) {
299 bp = hvf_find_sw_breakpoint(cpu, addr);
300 if (!bp) {
301 return -ENOENT;
302 }
303
304 if (bp->use_count > 1) {
305 bp->use_count--;
306 return 0;
307 }
308
309 err = hvf_arch_remove_sw_breakpoint(cpu, bp);
310 if (err) {
311 return err;
312 }
313
314 QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
315 g_free(bp);
316 } else {
317 err = hvf_arch_remove_hw_breakpoint(addr, len, type);
318 if (err) {
319 return err;
320 }
321 }
322
323 CPU_FOREACH(cpu) {
324 err = hvf_update_guest_debug(cpu);
325 if (err) {
326 return err;
327 }
328 }
329 return 0;
330 }
331
hvf_remove_all_breakpoints(CPUState * cpu)332 static void hvf_remove_all_breakpoints(CPUState *cpu)
333 {
334 struct hvf_sw_breakpoint *bp, *next;
335 CPUState *tmpcpu;
336
337 QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) {
338 if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) {
339 /* Try harder to find a CPU that currently sees the breakpoint. */
340 CPU_FOREACH(tmpcpu)
341 {
342 if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
343 break;
344 }
345 }
346 }
347 QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
348 g_free(bp);
349 }
350 hvf_arch_remove_all_hw_breakpoints();
351
352 CPU_FOREACH(cpu) {
353 hvf_update_guest_debug(cpu);
354 }
355 }
356
hvf_get_vcpu_stats(CPUState * cpu,GString * buf)357 static void hvf_get_vcpu_stats(CPUState *cpu, GString *buf)
358 {
359 uint64_t time_mach; /* units of mach_absolute_time() */
360
361 run_on_cpu(cpu, do_hvf_get_vcpu_exec_time, RUN_ON_CPU_HOST_PTR(&time_mach));
362
363 mach_timebase_info_data_t timebase;
364 mach_timebase_info(&timebase);
365 uint64_t time_ns = time_mach * timebase.numer / timebase.denom;
366
367 g_string_append_printf(buf, "HVF cumulative execution time: %llu.%.3llus\n",
368 time_ns / 1000000000,
369 (time_ns % 1000000000) / 1000000);
370 }
371
hvf_accel_ops_class_init(ObjectClass * oc,const void * data)372 static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data)
373 {
374 AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
375
376 ops->create_vcpu_thread = hvf_start_vcpu_thread;
377 ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
378 ops->handle_interrupt = generic_handle_interrupt;
379
380 ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
381 ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
382 ops->synchronize_state = hvf_cpu_synchronize_state;
383 ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
384
385 ops->insert_breakpoint = hvf_insert_breakpoint;
386 ops->remove_breakpoint = hvf_remove_breakpoint;
387 ops->remove_all_breakpoints = hvf_remove_all_breakpoints;
388 ops->update_guest_debug = hvf_update_guest_debug;
389 ops->supports_guest_debug = hvf_arch_supports_guest_debug;
390
391 ops->get_vcpu_stats = hvf_get_vcpu_stats;
392 };
393
394 static const TypeInfo hvf_accel_ops_type = {
395 .name = ACCEL_OPS_NAME("hvf"),
396
397 .parent = TYPE_ACCEL_OPS,
398 .class_init = hvf_accel_ops_class_init,
399 .abstract = true,
400 };
401
hvf_accel_ops_register_types(void)402 static void hvf_accel_ops_register_types(void)
403 {
404 type_register_static(&hvf_accel_ops_type);
405 }
406
407 type_init(hvf_accel_ops_register_types);
408