xref: /openbmc/qemu/accel/tcg/tcg-all.c (revision 7f3a3d3d)
1 /*
2  * QEMU System Emulator, accelerator interfaces
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2014 Red Hat Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "sysemu/tcg.h"
28 #include "exec/replay-core.h"
29 #include "sysemu/cpu-timers.h"
30 #include "tcg/tcg.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
33 #include "qemu/accel.h"
34 #include "qemu/atomic.h"
35 #include "qapi/qapi-builtin-visit.h"
36 #include "qemu/units.h"
37 #if !defined(CONFIG_USER_ONLY)
38 #include "hw/boards.h"
39 #endif
40 #include "internal.h"
41 
42 struct TCGState {
43     AccelState parent_obj;
44 
45     bool mttcg_enabled;
46     bool one_insn_per_tb;
47     int splitwx_enabled;
48     unsigned long tb_size;
49 };
50 typedef struct TCGState TCGState;
51 
52 #define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg")
53 
54 DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
55                          TYPE_TCG_ACCEL)
56 
57 /*
58  * We default to false if we know other options have been enabled
59  * which are currently incompatible with MTTCG. Otherwise when each
60  * guest (target) has been updated to support:
61  *   - atomic instructions
62  *   - memory ordering primitives (barriers)
63  * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
64  *
65  * Once a guest architecture has been converted to the new primitives
66  * there are two remaining limitations to check.
67  *
68  * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
69  * - The host must have a stronger memory order than the guest
70  *
71  * It may be possible in future to support strong guests on weak hosts
72  * but that will require tagging all load/stores in a guest with their
73  * implicit memory order requirements which would likely slow things
74  * down a lot.
75  */
76 
77 static bool check_tcg_memory_orders_compatible(void)
78 {
79 #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
80     return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
81 #else
82     return false;
83 #endif
84 }
85 
86 static bool default_mttcg_enabled(void)
87 {
88     if (icount_enabled() || TCG_OVERSIZED_GUEST) {
89         return false;
90     } else {
91 #ifdef TARGET_SUPPORTS_MTTCG
92         return check_tcg_memory_orders_compatible();
93 #else
94         return false;
95 #endif
96     }
97 }
98 
99 static void tcg_accel_instance_init(Object *obj)
100 {
101     TCGState *s = TCG_STATE(obj);
102 
103     s->mttcg_enabled = default_mttcg_enabled();
104 
105     /* If debugging enabled, default "auto on", otherwise off. */
106 #if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY)
107     s->splitwx_enabled = -1;
108 #else
109     s->splitwx_enabled = 0;
110 #endif
111 }
112 
113 bool mttcg_enabled;
114 bool one_insn_per_tb;
115 
116 static int tcg_init_machine(MachineState *ms)
117 {
118     TCGState *s = TCG_STATE(current_accel());
119 #ifdef CONFIG_USER_ONLY
120     unsigned max_cpus = 1;
121 #else
122     unsigned max_cpus = ms->smp.max_cpus;
123 #endif
124 
125     tcg_allowed = true;
126     mttcg_enabled = s->mttcg_enabled;
127 
128     page_init();
129     tb_htable_init();
130     tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus);
131 
132 #if defined(CONFIG_SOFTMMU)
133     /*
134      * There's no guest base to take into account, so go ahead and
135      * initialize the prologue now.
136      */
137     tcg_prologue_init(tcg_ctx);
138 #endif
139 
140     return 0;
141 }
142 
143 static char *tcg_get_thread(Object *obj, Error **errp)
144 {
145     TCGState *s = TCG_STATE(obj);
146 
147     return g_strdup(s->mttcg_enabled ? "multi" : "single");
148 }
149 
150 static void tcg_set_thread(Object *obj, const char *value, Error **errp)
151 {
152     TCGState *s = TCG_STATE(obj);
153 
154     if (strcmp(value, "multi") == 0) {
155         if (TCG_OVERSIZED_GUEST) {
156             error_setg(errp, "No MTTCG when guest word size > hosts");
157         } else if (icount_enabled()) {
158             error_setg(errp, "No MTTCG when icount is enabled");
159         } else {
160 #ifndef TARGET_SUPPORTS_MTTCG
161             warn_report("Guest not yet converted to MTTCG - "
162                         "you may get unexpected results");
163 #endif
164             if (!check_tcg_memory_orders_compatible()) {
165                 warn_report("Guest expects a stronger memory ordering "
166                             "than the host provides");
167                 error_printf("This may cause strange/hard to debug errors\n");
168             }
169             s->mttcg_enabled = true;
170         }
171     } else if (strcmp(value, "single") == 0) {
172         s->mttcg_enabled = false;
173     } else {
174         error_setg(errp, "Invalid 'thread' setting %s", value);
175     }
176 }
177 
178 static void tcg_get_tb_size(Object *obj, Visitor *v,
179                             const char *name, void *opaque,
180                             Error **errp)
181 {
182     TCGState *s = TCG_STATE(obj);
183     uint32_t value = s->tb_size;
184 
185     visit_type_uint32(v, name, &value, errp);
186 }
187 
188 static void tcg_set_tb_size(Object *obj, Visitor *v,
189                             const char *name, void *opaque,
190                             Error **errp)
191 {
192     TCGState *s = TCG_STATE(obj);
193     uint32_t value;
194 
195     if (!visit_type_uint32(v, name, &value, errp)) {
196         return;
197     }
198 
199     s->tb_size = value;
200 }
201 
202 static bool tcg_get_splitwx(Object *obj, Error **errp)
203 {
204     TCGState *s = TCG_STATE(obj);
205     return s->splitwx_enabled;
206 }
207 
208 static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
209 {
210     TCGState *s = TCG_STATE(obj);
211     s->splitwx_enabled = value;
212 }
213 
214 static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp)
215 {
216     TCGState *s = TCG_STATE(obj);
217     return s->one_insn_per_tb;
218 }
219 
220 static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
221 {
222     TCGState *s = TCG_STATE(obj);
223     s->one_insn_per_tb = value;
224     /* Set the global also: this changes the behaviour */
225     qatomic_set(&one_insn_per_tb, value);
226 }
227 
228 static int tcg_gdbstub_supported_sstep_flags(void)
229 {
230     /*
231      * In replay mode all events will come from the log and can't be
232      * suppressed otherwise we would break determinism. However as those
233      * events are tied to the number of executed instructions we won't see
234      * them occurring every time we single step.
235      */
236     if (replay_mode != REPLAY_MODE_NONE) {
237         return SSTEP_ENABLE;
238     } else {
239         return SSTEP_ENABLE | SSTEP_NOIRQ | SSTEP_NOTIMER;
240     }
241 }
242 
243 static void tcg_accel_class_init(ObjectClass *oc, void *data)
244 {
245     AccelClass *ac = ACCEL_CLASS(oc);
246     ac->name = "tcg";
247     ac->init_machine = tcg_init_machine;
248     ac->allowed = &tcg_allowed;
249     ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
250 
251     object_class_property_add_str(oc, "thread",
252                                   tcg_get_thread,
253                                   tcg_set_thread);
254 
255     object_class_property_add(oc, "tb-size", "int",
256         tcg_get_tb_size, tcg_set_tb_size,
257         NULL, NULL);
258     object_class_property_set_description(oc, "tb-size",
259         "TCG translation block cache size");
260 
261     object_class_property_add_bool(oc, "split-wx",
262         tcg_get_splitwx, tcg_set_splitwx);
263     object_class_property_set_description(oc, "split-wx",
264         "Map jit pages into separate RW and RX regions");
265 
266     object_class_property_add_bool(oc, "one-insn-per-tb",
267                                    tcg_get_one_insn_per_tb,
268                                    tcg_set_one_insn_per_tb);
269     object_class_property_set_description(oc, "one-insn-per-tb",
270         "Only put one guest insn in each translation block");
271 }
272 
273 static const TypeInfo tcg_accel_type = {
274     .name = TYPE_TCG_ACCEL,
275     .parent = TYPE_ACCEL,
276     .instance_init = tcg_accel_instance_init,
277     .class_init = tcg_accel_class_init,
278     .instance_size = sizeof(TCGState),
279 };
280 module_obj(TYPE_TCG_ACCEL);
281 
282 static void register_accel_types(void)
283 {
284     type_register_static(&tcg_accel_type);
285 }
286 
287 type_init(register_accel_types);
288