xref: /openbmc/qemu/accel/hvf/hvf-accel-ops.c (revision 5a28fa5ba17254d0398a854657b47af3096bd86a)
1 /*
2  * Copyright 2008 IBM Corporation
3  *           2008 Red Hat, Inc.
4  * Copyright 2011 Intel Corporation
5  * Copyright 2016 Veertu, Inc.
6  * Copyright 2017 The Android Open Source Project
7  *
8  * QEMU Hypervisor.framework support
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of version 2 of the GNU General Public
12  * License as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, see <http://www.gnu.org/licenses/>.
21  *
22  * This file contain code under public domain from the hvdos project:
23  * https://github.com/mist64/hvdos
24  *
25  * Parts Copyright (c) 2011 NetApp, Inc.
26  * All rights reserved.
27  *
28  * Redistribution and use in source and binary forms, with or without
29  * modification, are permitted provided that the following conditions
30  * are met:
31  * 1. Redistributions of source code must retain the above copyright
32  *    notice, this list of conditions and the following disclaimer.
33  * 2. Redistributions in binary form must reproduce the above copyright
34  *    notice, this list of conditions and the following disclaimer in the
35  *    documentation and/or other materials provided with the distribution.
36  *
37  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
38  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
41  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47  * SUCH DAMAGE.
48  */
49 
50 #include "qemu/osdep.h"
51 #include "qemu/guest-random.h"
52 #include "qemu/main-loop.h"
53 #include "qemu/queue.h"
54 #include "gdbstub/enums.h"
55 #include "exec/cpu-common.h"
56 #include "hw/core/cpu.h"
57 #include "system/accel-ops.h"
58 #include "system/cpus.h"
59 #include "system/hvf.h"
60 #include "system/hvf_int.h"
61 
62 HVFState *hvf_state;
63 
64 /* Memory slots */
65 
66 hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
67 {
68     hvf_slot *slot;
69     int x;
70     for (x = 0; x < hvf_state->num_slots; ++x) {
71         slot = &hvf_state->slots[x];
72         if (slot->size && start < (slot->start + slot->size) &&
73             (start + size) > slot->start) {
74             return slot;
75         }
76     }
77     return NULL;
78 }
79 
80 static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
81 {
82     if (!cpu->vcpu_dirty) {
83         hvf_get_registers(cpu);
84         cpu->vcpu_dirty = true;
85     }
86 }
87 
88 static void hvf_cpu_synchronize_state(CPUState *cpu)
89 {
90     if (!cpu->vcpu_dirty) {
91         run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
92     }
93 }
94 
95 static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
96                                              run_on_cpu_data arg)
97 {
98     /* QEMU state is the reference, push it to HVF now and on next entry */
99     cpu->vcpu_dirty = true;
100 }
101 
102 static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
103 {
104     run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
105 }
106 
107 static void hvf_cpu_synchronize_post_init(CPUState *cpu)
108 {
109     run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
110 }
111 
112 static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
113 {
114     run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
115 }
116 
117 static void dummy_signal(int sig)
118 {
119 }
120 
121 static void hvf_vcpu_destroy(CPUState *cpu)
122 {
123     hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
124     assert_hvf_ok(ret);
125 
126     hvf_arch_vcpu_destroy(cpu);
127     g_free(cpu->accel);
128     cpu->accel = NULL;
129 }
130 
131 static int hvf_init_vcpu(CPUState *cpu)
132 {
133     int r;
134 
135     cpu->accel = g_new0(AccelCPUState, 1);
136 
137     /* init cpu signals */
138     struct sigaction sigact;
139 
140     memset(&sigact, 0, sizeof(sigact));
141     sigact.sa_handler = dummy_signal;
142     sigaction(SIG_IPI, &sigact, NULL);
143 
144     pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask);
145     sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI);
146 
147 #ifdef __aarch64__
148     r = hv_vcpu_create(&cpu->accel->fd,
149                        (hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
150 #else
151     r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT);
152 #endif
153     assert_hvf_ok(r);
154     cpu->vcpu_dirty = true;
155 
156     cpu->accel->guest_debug_enabled = false;
157 
158     return hvf_arch_init_vcpu(cpu);
159 }
160 
161 /*
162  * The HVF-specific vCPU thread function. This one should only run when the host
163  * CPU supports the VMX "unrestricted guest" feature.
164  */
165 static void *hvf_cpu_thread_fn(void *arg)
166 {
167     CPUState *cpu = arg;
168 
169     int r;
170 
171     assert(hvf_enabled());
172 
173     rcu_register_thread();
174 
175     bql_lock();
176     qemu_thread_get_self(cpu->thread);
177 
178     cpu->thread_id = qemu_get_thread_id();
179     current_cpu = cpu;
180 
181     hvf_init_vcpu(cpu);
182 
183     /* signal CPU creation */
184     cpu_thread_signal_created(cpu);
185     qemu_guest_random_seed_thread_part2(cpu->random_seed);
186 
187     do {
188         if (cpu_can_run(cpu)) {
189             r = hvf_vcpu_exec(cpu);
190             if (r == EXCP_DEBUG) {
191                 cpu_handle_guest_debug(cpu);
192             }
193         }
194         qemu_wait_io_event(cpu);
195     } while (!cpu->unplug || cpu_can_run(cpu));
196 
197     hvf_vcpu_destroy(cpu);
198     cpu_thread_signal_destroyed(cpu);
199     bql_unlock();
200     rcu_unregister_thread();
201     return NULL;
202 }
203 
204 static void hvf_start_vcpu_thread(CPUState *cpu)
205 {
206     char thread_name[VCPU_THREAD_NAME_SIZE];
207 
208     /*
209      * HVF currently does not support TCG, and only runs in
210      * unrestricted-guest mode.
211      */
212     assert(hvf_enabled());
213 
214     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
215              cpu->cpu_index);
216     qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
217                        cpu, QEMU_THREAD_JOINABLE);
218 }
219 
220 struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
221 {
222     struct hvf_sw_breakpoint *bp;
223 
224     QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
225         if (bp->pc == pc) {
226             return bp;
227         }
228     }
229     return NULL;
230 }
231 
232 int hvf_sw_breakpoints_active(CPUState *cpu)
233 {
234     return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
235 }
236 
237 static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg)
238 {
239     hvf_arch_update_guest_debug(cpu);
240 }
241 
242 int hvf_update_guest_debug(CPUState *cpu)
243 {
244     run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL);
245     return 0;
246 }
247 
248 static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
249 {
250     struct hvf_sw_breakpoint *bp;
251     int err;
252 
253     if (type == GDB_BREAKPOINT_SW) {
254         bp = hvf_find_sw_breakpoint(cpu, addr);
255         if (bp) {
256             bp->use_count++;
257             return 0;
258         }
259 
260         bp = g_new(struct hvf_sw_breakpoint, 1);
261         bp->pc = addr;
262         bp->use_count = 1;
263         err = hvf_arch_insert_sw_breakpoint(cpu, bp);
264         if (err) {
265             g_free(bp);
266             return err;
267         }
268 
269         QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry);
270     } else {
271         err = hvf_arch_insert_hw_breakpoint(addr, len, type);
272         if (err) {
273             return err;
274         }
275     }
276 
277     CPU_FOREACH(cpu) {
278         err = hvf_update_guest_debug(cpu);
279         if (err) {
280             return err;
281         }
282     }
283     return 0;
284 }
285 
286 static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
287 {
288     struct hvf_sw_breakpoint *bp;
289     int err;
290 
291     if (type == GDB_BREAKPOINT_SW) {
292         bp = hvf_find_sw_breakpoint(cpu, addr);
293         if (!bp) {
294             return -ENOENT;
295         }
296 
297         if (bp->use_count > 1) {
298             bp->use_count--;
299             return 0;
300         }
301 
302         err = hvf_arch_remove_sw_breakpoint(cpu, bp);
303         if (err) {
304             return err;
305         }
306 
307         QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
308         g_free(bp);
309     } else {
310         err = hvf_arch_remove_hw_breakpoint(addr, len, type);
311         if (err) {
312             return err;
313         }
314     }
315 
316     CPU_FOREACH(cpu) {
317         err = hvf_update_guest_debug(cpu);
318         if (err) {
319             return err;
320         }
321     }
322     return 0;
323 }
324 
325 static void hvf_remove_all_breakpoints(CPUState *cpu)
326 {
327     struct hvf_sw_breakpoint *bp, *next;
328     CPUState *tmpcpu;
329 
330     QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) {
331         if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) {
332             /* Try harder to find a CPU that currently sees the breakpoint. */
333             CPU_FOREACH(tmpcpu)
334             {
335                 if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
336                     break;
337                 }
338             }
339         }
340         QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
341         g_free(bp);
342     }
343     hvf_arch_remove_all_hw_breakpoints();
344 
345     CPU_FOREACH(cpu) {
346         hvf_update_guest_debug(cpu);
347     }
348 }
349 
350 static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data)
351 {
352     AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
353 
354     ops->create_vcpu_thread = hvf_start_vcpu_thread;
355     ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
356     ops->handle_interrupt = generic_handle_interrupt;
357 
358     ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
359     ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
360     ops->synchronize_state = hvf_cpu_synchronize_state;
361     ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
362 
363     ops->insert_breakpoint = hvf_insert_breakpoint;
364     ops->remove_breakpoint = hvf_remove_breakpoint;
365     ops->remove_all_breakpoints = hvf_remove_all_breakpoints;
366     ops->update_guest_debug = hvf_update_guest_debug;
367     ops->supports_guest_debug = hvf_arch_supports_guest_debug;
368 };
369 static const TypeInfo hvf_accel_ops_type = {
370     .name = ACCEL_OPS_NAME("hvf"),
371 
372     .parent = TYPE_ACCEL_OPS,
373     .class_init = hvf_accel_ops_class_init,
374     .abstract = true,
375 };
376 
377 static void hvf_accel_ops_register_types(void)
378 {
379     type_register_static(&hvf_accel_ops_type);
380 }
381 
382 type_init(hvf_accel_ops_register_types);
383