xref: /openbmc/qemu/accel/tcg/tcg-all.c (revision 8fa3b702)
1 /*
2  * QEMU System Emulator, accelerator interfaces
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2014 Red Hat Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "sysemu/accel.h"
28 #include "sysemu/tcg.h"
29 #include "qom/object.h"
30 #include "cpu.h"
31 #include "sysemu/cpus.h"
32 #include "qemu/main-loop.h"
33 #include "tcg/tcg.h"
34 #include "qapi/error.h"
35 #include "qemu/error-report.h"
36 #include "hw/boards.h"
37 #include "qapi/qapi-builtin-visit.h"
38 
39 struct TCGState {
40     AccelState parent_obj;
41 
42     bool mttcg_enabled;
43     unsigned long tb_size;
44 };
45 typedef struct TCGState TCGState;
46 
47 #define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg")
48 
49 DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
50                          TYPE_TCG_ACCEL)
51 
52 /* mask must never be zero, except for A20 change call */
53 static void tcg_handle_interrupt(CPUState *cpu, int mask)
54 {
55     int old_mask;
56     g_assert(qemu_mutex_iothread_locked());
57 
58     old_mask = cpu->interrupt_request;
59     cpu->interrupt_request |= mask;
60 
61     /*
62      * If called from iothread context, wake the target cpu in
63      * case its halted.
64      */
65     if (!qemu_cpu_is_self(cpu)) {
66         qemu_cpu_kick(cpu);
67     } else {
68         atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
69         if (use_icount &&
70             !cpu->can_do_io
71             && (mask & ~old_mask) != 0) {
72             cpu_abort(cpu, "Raised interrupt while not in I/O function");
73         }
74     }
75 }
76 
77 /*
78  * We default to false if we know other options have been enabled
79  * which are currently incompatible with MTTCG. Otherwise when each
80  * guest (target) has been updated to support:
81  *   - atomic instructions
82  *   - memory ordering primitives (barriers)
83  * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
84  *
85  * Once a guest architecture has been converted to the new primitives
86  * there are two remaining limitations to check.
87  *
88  * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
89  * - The host must have a stronger memory order than the guest
90  *
91  * It may be possible in future to support strong guests on weak hosts
92  * but that will require tagging all load/stores in a guest with their
93  * implicit memory order requirements which would likely slow things
94  * down a lot.
95  */
96 
97 static bool check_tcg_memory_orders_compatible(void)
98 {
99 #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
100     return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
101 #else
102     return false;
103 #endif
104 }
105 
106 static bool default_mttcg_enabled(void)
107 {
108     if (use_icount || TCG_OVERSIZED_GUEST) {
109         return false;
110     } else {
111 #ifdef TARGET_SUPPORTS_MTTCG
112         return check_tcg_memory_orders_compatible();
113 #else
114         return false;
115 #endif
116     }
117 }
118 
119 static void tcg_accel_instance_init(Object *obj)
120 {
121     TCGState *s = TCG_STATE(obj);
122 
123     s->mttcg_enabled = default_mttcg_enabled();
124 }
125 
126 static int tcg_init(MachineState *ms)
127 {
128     TCGState *s = TCG_STATE(current_accel());
129 
130     tcg_exec_init(s->tb_size * 1024 * 1024);
131     cpu_interrupt_handler = tcg_handle_interrupt;
132     mttcg_enabled = s->mttcg_enabled;
133     return 0;
134 }
135 
136 static char *tcg_get_thread(Object *obj, Error **errp)
137 {
138     TCGState *s = TCG_STATE(obj);
139 
140     return g_strdup(s->mttcg_enabled ? "multi" : "single");
141 }
142 
143 static void tcg_set_thread(Object *obj, const char *value, Error **errp)
144 {
145     TCGState *s = TCG_STATE(obj);
146 
147     if (strcmp(value, "multi") == 0) {
148         if (TCG_OVERSIZED_GUEST) {
149             error_setg(errp, "No MTTCG when guest word size > hosts");
150         } else if (use_icount) {
151             error_setg(errp, "No MTTCG when icount is enabled");
152         } else {
153 #ifndef TARGET_SUPPORTS_MTTCG
154             warn_report("Guest not yet converted to MTTCG - "
155                         "you may get unexpected results");
156 #endif
157             if (!check_tcg_memory_orders_compatible()) {
158                 warn_report("Guest expects a stronger memory ordering "
159                             "than the host provides");
160                 error_printf("This may cause strange/hard to debug errors\n");
161             }
162             s->mttcg_enabled = true;
163         }
164     } else if (strcmp(value, "single") == 0) {
165         s->mttcg_enabled = false;
166     } else {
167         error_setg(errp, "Invalid 'thread' setting %s", value);
168     }
169 }
170 
171 static void tcg_get_tb_size(Object *obj, Visitor *v,
172                             const char *name, void *opaque,
173                             Error **errp)
174 {
175     TCGState *s = TCG_STATE(obj);
176     uint32_t value = s->tb_size;
177 
178     visit_type_uint32(v, name, &value, errp);
179 }
180 
181 static void tcg_set_tb_size(Object *obj, Visitor *v,
182                             const char *name, void *opaque,
183                             Error **errp)
184 {
185     TCGState *s = TCG_STATE(obj);
186     uint32_t value;
187 
188     if (!visit_type_uint32(v, name, &value, errp)) {
189         return;
190     }
191 
192     s->tb_size = value;
193 }
194 
195 static void tcg_accel_class_init(ObjectClass *oc, void *data)
196 {
197     AccelClass *ac = ACCEL_CLASS(oc);
198     ac->name = "tcg";
199     ac->init_machine = tcg_init;
200     ac->allowed = &tcg_allowed;
201 
202     object_class_property_add_str(oc, "thread",
203                                   tcg_get_thread,
204                                   tcg_set_thread);
205 
206     object_class_property_add(oc, "tb-size", "int",
207         tcg_get_tb_size, tcg_set_tb_size,
208         NULL, NULL);
209     object_class_property_set_description(oc, "tb-size",
210         "TCG translation block cache size");
211 
212 }
213 
214 static const TypeInfo tcg_accel_type = {
215     .name = TYPE_TCG_ACCEL,
216     .parent = TYPE_ACCEL,
217     .instance_init = tcg_accel_instance_init,
218     .class_init = tcg_accel_class_init,
219     .instance_size = sizeof(TCGState),
220 };
221 
222 static void register_accel_types(void)
223 {
224     type_register_static(&tcg_accel_type);
225 }
226 
227 type_init(register_accel_types);
228