xref: /openbmc/qemu/accel/tcg/tcg-accel-ops.c (revision 73ec0ead)
1 /*
2  * QEMU TCG vCPU common functionality
3  *
4  * Functionality common to all TCG vCPU variants: mttcg, rr and icount.
5  *
6  * Copyright (c) 2003-2008 Fabrice Bellard
7  * Copyright (c) 2014 Red Hat Inc.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a copy
10  * of this software and associated documentation files (the "Software"), to deal
11  * in the Software without restriction, including without limitation the rights
12  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13  * copies of the Software, and to permit persons to whom the Software is
14  * furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25  * THE SOFTWARE.
26  */
27 
28 #include "qemu/osdep.h"
29 #include "sysemu/tcg.h"
30 #include "sysemu/replay.h"
31 #include "sysemu/cpu-timers.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/guest-random.h"
34 #include "exec/exec-all.h"
35 #include "exec/hwaddr.h"
36 #include "exec/gdbstub.h"
37 
38 #include "tcg-accel-ops.h"
39 #include "tcg-accel-ops-mttcg.h"
40 #include "tcg-accel-ops-rr.h"
41 #include "tcg-accel-ops-icount.h"
42 
43 /* common functionality among all TCG variants */
44 
45 void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
46 {
47     uint32_t cflags;
48 
49     /*
50      * Include the cluster number in the hash we use to look up TBs.
51      * This is important because a TB that is valid for one cluster at
52      * a given physical address and set of CPU flags is not necessarily
53      * valid for another:
54      * the two clusters may have different views of physical memory, or
55      * may have different CPU features (eg FPU present or absent).
56      */
57     cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
58 
59     cflags |= parallel ? CF_PARALLEL : 0;
60     cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
61     cpu->tcg_cflags = cflags;
62 }
63 
64 void tcg_cpus_destroy(CPUState *cpu)
65 {
66     cpu_thread_signal_destroyed(cpu);
67 }
68 
69 int tcg_cpus_exec(CPUState *cpu)
70 {
71     int ret;
72 #ifdef CONFIG_PROFILER
73     int64_t ti;
74 #endif
75     assert(tcg_enabled());
76 #ifdef CONFIG_PROFILER
77     ti = profile_getclock();
78 #endif
79     cpu_exec_start(cpu);
80     ret = cpu_exec(cpu);
81     cpu_exec_end(cpu);
82 #ifdef CONFIG_PROFILER
83     qatomic_set(&tcg_ctx->prof.cpu_exec_time,
84                 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
85 #endif
86     return ret;
87 }
88 
89 /* mask must never be zero, except for A20 change call */
90 void tcg_handle_interrupt(CPUState *cpu, int mask)
91 {
92     g_assert(qemu_mutex_iothread_locked());
93 
94     cpu->interrupt_request |= mask;
95 
96     /*
97      * If called from iothread context, wake the target cpu in
98      * case its halted.
99      */
100     if (!qemu_cpu_is_self(cpu)) {
101         qemu_cpu_kick(cpu);
102     } else {
103         qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
104     }
105 }
106 
107 static bool tcg_supports_guest_debug(void)
108 {
109     return true;
110 }
111 
112 /* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */
113 static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
114 {
115     static const int xlat[] = {
116         [GDB_WATCHPOINT_WRITE]  = BP_GDB | BP_MEM_WRITE,
117         [GDB_WATCHPOINT_READ]   = BP_GDB | BP_MEM_READ,
118         [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
119     };
120 
121     CPUClass *cc = CPU_GET_CLASS(cpu);
122     int cputype = xlat[gdbtype];
123 
124     if (cc->gdb_stop_before_watchpoint) {
125         cputype |= BP_STOP_BEFORE_ACCESS;
126     }
127     return cputype;
128 }
129 
130 static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
131 {
132     CPUState *cpu;
133     int err = 0;
134 
135     switch (type) {
136     case GDB_BREAKPOINT_SW:
137     case GDB_BREAKPOINT_HW:
138         CPU_FOREACH(cpu) {
139             err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
140             if (err) {
141                 break;
142             }
143         }
144         return err;
145     case GDB_WATCHPOINT_WRITE:
146     case GDB_WATCHPOINT_READ:
147     case GDB_WATCHPOINT_ACCESS:
148         CPU_FOREACH(cpu) {
149             err = cpu_watchpoint_insert(cpu, addr, len,
150                                         xlat_gdb_type(cpu, type), NULL);
151             if (err) {
152                 break;
153             }
154         }
155         return err;
156     default:
157         return -ENOSYS;
158     }
159 }
160 
161 static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
162 {
163     CPUState *cpu;
164     int err = 0;
165 
166     switch (type) {
167     case GDB_BREAKPOINT_SW:
168     case GDB_BREAKPOINT_HW:
169         CPU_FOREACH(cpu) {
170             err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
171             if (err) {
172                 break;
173             }
174         }
175         return err;
176     case GDB_WATCHPOINT_WRITE:
177     case GDB_WATCHPOINT_READ:
178     case GDB_WATCHPOINT_ACCESS:
179         CPU_FOREACH(cpu) {
180             err = cpu_watchpoint_remove(cpu, addr, len,
181                                         xlat_gdb_type(cpu, type));
182             if (err) {
183                 break;
184             }
185         }
186         return err;
187     default:
188         return -ENOSYS;
189     }
190 }
191 
192 static inline void tcg_remove_all_breakpoints(CPUState *cpu)
193 {
194     cpu_breakpoint_remove_all(cpu, BP_GDB);
195     cpu_watchpoint_remove_all(cpu, BP_GDB);
196 }
197 
198 static void tcg_accel_ops_init(AccelOpsClass *ops)
199 {
200     if (qemu_tcg_mttcg_enabled()) {
201         ops->create_vcpu_thread = mttcg_start_vcpu_thread;
202         ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
203         ops->handle_interrupt = tcg_handle_interrupt;
204     } else {
205         ops->create_vcpu_thread = rr_start_vcpu_thread;
206         ops->kick_vcpu_thread = rr_kick_vcpu_thread;
207 
208         if (icount_enabled()) {
209             ops->handle_interrupt = icount_handle_interrupt;
210             ops->get_virtual_clock = icount_get;
211             ops->get_elapsed_ticks = icount_get;
212         } else {
213             ops->handle_interrupt = tcg_handle_interrupt;
214         }
215     }
216 
217     ops->supports_guest_debug = tcg_supports_guest_debug;
218     ops->insert_breakpoint = tcg_insert_breakpoint;
219     ops->remove_breakpoint = tcg_remove_breakpoint;
220     ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
221 }
222 
223 static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
224 {
225     AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
226 
227     ops->ops_init = tcg_accel_ops_init;
228 }
229 
230 static const TypeInfo tcg_accel_ops_type = {
231     .name = ACCEL_OPS_NAME("tcg"),
232 
233     .parent = TYPE_ACCEL_OPS,
234     .class_init = tcg_accel_ops_class_init,
235     .abstract = true,
236 };
237 module_obj(ACCEL_OPS_NAME("tcg"));
238 
239 static void tcg_accel_ops_register_types(void)
240 {
241     type_register_static(&tcg_accel_ops_type);
242 }
243 type_init(tcg_accel_ops_register_types);
244