xref: /openbmc/qemu/accel/tcg/tcg-accel-ops.c (revision 28004fb7)
1 /*
2  * QEMU TCG vCPU common functionality
3  *
4  * Functionality common to all TCG vCPU variants: mttcg, rr and icount.
5  *
6  * Copyright (c) 2003-2008 Fabrice Bellard
7  * Copyright (c) 2014 Red Hat Inc.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a copy
10  * of this software and associated documentation files (the "Software"), to deal
11  * in the Software without restriction, including without limitation the rights
12  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13  * copies of the Software, and to permit persons to whom the Software is
14  * furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25  * THE SOFTWARE.
26  */
27 
28 #include "qemu/osdep.h"
29 #include "sysemu/tcg.h"
30 #include "sysemu/replay.h"
31 #include "sysemu/cpu-timers.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/guest-random.h"
34 #include "qemu/timer.h"
35 #include "exec/exec-all.h"
36 #include "exec/hwaddr.h"
37 #include "exec/tb-flush.h"
38 #include "exec/gdbstub.h"
39 
40 #include "tcg-accel-ops.h"
41 #include "tcg-accel-ops-mttcg.h"
42 #include "tcg-accel-ops-rr.h"
43 #include "tcg-accel-ops-icount.h"
44 
45 /* common functionality among all TCG variants */
46 
47 void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
48 {
49     uint32_t cflags;
50 
51     /*
52      * Include the cluster number in the hash we use to look up TBs.
53      * This is important because a TB that is valid for one cluster at
54      * a given physical address and set of CPU flags is not necessarily
55      * valid for another:
56      * the two clusters may have different views of physical memory, or
57      * may have different CPU features (eg FPU present or absent).
58      */
59     cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
60 
61     cflags |= parallel ? CF_PARALLEL : 0;
62     cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
63     cpu->tcg_cflags |= cflags;
64 }
65 
66 void tcg_cpu_destroy(CPUState *cpu)
67 {
68     cpu_thread_signal_destroyed(cpu);
69 }
70 
71 int tcg_cpu_exec(CPUState *cpu)
72 {
73     int ret;
74     assert(tcg_enabled());
75     cpu_exec_start(cpu);
76     ret = cpu_exec(cpu);
77     cpu_exec_end(cpu);
78     return ret;
79 }
80 
81 static void tcg_cpu_reset_hold(CPUState *cpu)
82 {
83     tcg_flush_jmp_cache(cpu);
84 
85     tlb_flush(cpu);
86 }
87 
88 /* mask must never be zero, except for A20 change call */
89 void tcg_handle_interrupt(CPUState *cpu, int mask)
90 {
91     g_assert(bql_locked());
92 
93     cpu->interrupt_request |= mask;
94 
95     /*
96      * If called from iothread context, wake the target cpu in
97      * case its halted.
98      */
99     if (!qemu_cpu_is_self(cpu)) {
100         qemu_cpu_kick(cpu);
101     } else {
102         qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
103     }
104 }
105 
106 static bool tcg_supports_guest_debug(void)
107 {
108     return true;
109 }
110 
111 /* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */
112 static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
113 {
114     static const int xlat[] = {
115         [GDB_WATCHPOINT_WRITE]  = BP_GDB | BP_MEM_WRITE,
116         [GDB_WATCHPOINT_READ]   = BP_GDB | BP_MEM_READ,
117         [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
118     };
119 
120     CPUClass *cc = CPU_GET_CLASS(cpu);
121     int cputype = xlat[gdbtype];
122 
123     if (cc->gdb_stop_before_watchpoint) {
124         cputype |= BP_STOP_BEFORE_ACCESS;
125     }
126     return cputype;
127 }
128 
129 static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
130 {
131     CPUState *cpu;
132     int err = 0;
133 
134     switch (type) {
135     case GDB_BREAKPOINT_SW:
136     case GDB_BREAKPOINT_HW:
137         CPU_FOREACH(cpu) {
138             err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
139             if (err) {
140                 break;
141             }
142         }
143         return err;
144     case GDB_WATCHPOINT_WRITE:
145     case GDB_WATCHPOINT_READ:
146     case GDB_WATCHPOINT_ACCESS:
147         CPU_FOREACH(cpu) {
148             err = cpu_watchpoint_insert(cpu, addr, len,
149                                         xlat_gdb_type(cpu, type), NULL);
150             if (err) {
151                 break;
152             }
153         }
154         return err;
155     default:
156         return -ENOSYS;
157     }
158 }
159 
160 static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
161 {
162     CPUState *cpu;
163     int err = 0;
164 
165     switch (type) {
166     case GDB_BREAKPOINT_SW:
167     case GDB_BREAKPOINT_HW:
168         CPU_FOREACH(cpu) {
169             err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
170             if (err) {
171                 break;
172             }
173         }
174         return err;
175     case GDB_WATCHPOINT_WRITE:
176     case GDB_WATCHPOINT_READ:
177     case GDB_WATCHPOINT_ACCESS:
178         CPU_FOREACH(cpu) {
179             err = cpu_watchpoint_remove(cpu, addr, len,
180                                         xlat_gdb_type(cpu, type));
181             if (err) {
182                 break;
183             }
184         }
185         return err;
186     default:
187         return -ENOSYS;
188     }
189 }
190 
191 static inline void tcg_remove_all_breakpoints(CPUState *cpu)
192 {
193     cpu_breakpoint_remove_all(cpu, BP_GDB);
194     cpu_watchpoint_remove_all(cpu, BP_GDB);
195 }
196 
197 static void tcg_accel_ops_init(AccelOpsClass *ops)
198 {
199     if (qemu_tcg_mttcg_enabled()) {
200         ops->create_vcpu_thread = mttcg_start_vcpu_thread;
201         ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
202         ops->handle_interrupt = tcg_handle_interrupt;
203     } else {
204         ops->create_vcpu_thread = rr_start_vcpu_thread;
205         ops->kick_vcpu_thread = rr_kick_vcpu_thread;
206 
207         if (icount_enabled()) {
208             ops->handle_interrupt = icount_handle_interrupt;
209             ops->get_virtual_clock = icount_get;
210             ops->get_elapsed_ticks = icount_get;
211         } else {
212             ops->handle_interrupt = tcg_handle_interrupt;
213         }
214     }
215 
216     ops->cpu_reset_hold = tcg_cpu_reset_hold;
217     ops->supports_guest_debug = tcg_supports_guest_debug;
218     ops->insert_breakpoint = tcg_insert_breakpoint;
219     ops->remove_breakpoint = tcg_remove_breakpoint;
220     ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
221 }
222 
223 static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
224 {
225     AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
226 
227     ops->ops_init = tcg_accel_ops_init;
228 }
229 
230 static const TypeInfo tcg_accel_ops_type = {
231     .name = ACCEL_OPS_NAME("tcg"),
232 
233     .parent = TYPE_ACCEL_OPS,
234     .class_init = tcg_accel_ops_class_init,
235     .abstract = true,
236 };
237 module_obj(ACCEL_OPS_NAME("tcg"));
238 
239 static void tcg_accel_ops_register_types(void)
240 {
241     type_register_static(&tcg_accel_ops_type);
242 }
243 type_init(tcg_accel_ops_register_types);
244