xref: /openbmc/qemu/target/i386/tcg/tcg-cpu.c (revision ec1d32af)
1 /*
2  * i386 TCG cpu class initialization
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "helper-tcg.h"
23 #include "qemu/accel.h"
24 #include "hw/core/accel-cpu.h"
25 
26 #include "tcg-cpu.h"
27 
28 /* Frob eflags into and out of the CPU temporary format.  */
29 
x86_cpu_exec_enter(CPUState * cs)30 static void x86_cpu_exec_enter(CPUState *cs)
31 {
32     X86CPU *cpu = X86_CPU(cs);
33     CPUX86State *env = &cpu->env;
34 
35     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
36     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
37     CC_OP = CC_OP_EFLAGS;
38     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
39 }
40 
x86_cpu_exec_exit(CPUState * cs)41 static void x86_cpu_exec_exit(CPUState *cs)
42 {
43     X86CPU *cpu = X86_CPU(cs);
44     CPUX86State *env = &cpu->env;
45 
46     env->eflags = cpu_compute_eflags(env);
47 }
48 
x86_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)49 static void x86_cpu_synchronize_from_tb(CPUState *cs,
50                                         const TranslationBlock *tb)
51 {
52     /* The instruction pointer is always up to date with CF_PCREL. */
53     if (!(tb_cflags(tb) & CF_PCREL)) {
54         CPUX86State *env = cpu_env(cs);
55 
56         if (tb->flags & HF_CS64_MASK) {
57             env->eip = tb->pc;
58         } else {
59             env->eip = (uint32_t)(tb->pc - tb->cs_base);
60         }
61     }
62 }
63 
x86_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)64 static void x86_restore_state_to_opc(CPUState *cs,
65                                      const TranslationBlock *tb,
66                                      const uint64_t *data)
67 {
68     X86CPU *cpu = X86_CPU(cs);
69     CPUX86State *env = &cpu->env;
70     int cc_op = data[1];
71     uint64_t new_pc;
72 
73     if (tb_cflags(tb) & CF_PCREL) {
74         /*
75          * data[0] in PC-relative TBs is also a linear address, i.e. an address with
76          * the CS base added, because it is not guaranteed that EIP bits 12 and higher
77          * stay the same across the translation block.  Add the CS base back before
78          * replacing the low bits, and subtract it below just like for !CF_PCREL.
79          */
80         uint64_t pc = env->eip + tb->cs_base;
81         new_pc = (pc & TARGET_PAGE_MASK) | data[0];
82     } else {
83         new_pc = data[0];
84     }
85     if (tb->flags & HF_CS64_MASK) {
86         env->eip = new_pc;
87     } else {
88         env->eip = (uint32_t)(new_pc - tb->cs_base);
89     }
90 
91     if (cc_op != CC_OP_DYNAMIC) {
92         env->cc_op = cc_op;
93     }
94 }
95 
96 #ifndef CONFIG_USER_ONLY
x86_debug_check_breakpoint(CPUState * cs)97 static bool x86_debug_check_breakpoint(CPUState *cs)
98 {
99     X86CPU *cpu = X86_CPU(cs);
100     CPUX86State *env = &cpu->env;
101 
102     /* RF disables all architectural breakpoints. */
103     return !(env->eflags & RF_MASK);
104 }
105 #endif
106 
107 #include "hw/core/tcg-cpu-ops.h"
108 
109 static const TCGCPUOps x86_tcg_ops = {
110     .initialize = tcg_x86_init,
111     .synchronize_from_tb = x86_cpu_synchronize_from_tb,
112     .restore_state_to_opc = x86_restore_state_to_opc,
113     .cpu_exec_enter = x86_cpu_exec_enter,
114     .cpu_exec_exit = x86_cpu_exec_exit,
115 #ifdef CONFIG_USER_ONLY
116     .fake_user_interrupt = x86_cpu_do_interrupt,
117     .record_sigsegv = x86_cpu_record_sigsegv,
118     .record_sigbus = x86_cpu_record_sigbus,
119 #else
120     .tlb_fill = x86_cpu_tlb_fill,
121     .do_interrupt = x86_cpu_do_interrupt,
122     .cpu_exec_halt = x86_cpu_exec_halt,
123     .cpu_exec_interrupt = x86_cpu_exec_interrupt,
124     .do_unaligned_access = x86_cpu_do_unaligned_access,
125     .debug_excp_handler = breakpoint_handler,
126     .debug_check_breakpoint = x86_debug_check_breakpoint,
127     .need_replay_interrupt = x86_need_replay_interrupt,
128 #endif /* !CONFIG_USER_ONLY */
129 };
130 
x86_tcg_cpu_init_ops(AccelCPUClass * accel_cpu,CPUClass * cc)131 static void x86_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
132 {
133     /* for x86, all cpus use the same set of operations */
134     cc->tcg_ops = &x86_tcg_ops;
135 }
136 
x86_tcg_cpu_class_init(CPUClass * cc)137 static void x86_tcg_cpu_class_init(CPUClass *cc)
138 {
139     cc->init_accel_cpu = x86_tcg_cpu_init_ops;
140 }
141 
x86_tcg_cpu_xsave_init(void)142 static void x86_tcg_cpu_xsave_init(void)
143 {
144 #define XO(bit, field) \
145     x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
146 
147     XO(XSTATE_FP_BIT, legacy);
148     XO(XSTATE_SSE_BIT, legacy);
149     XO(XSTATE_YMM_BIT, avx_state);
150     XO(XSTATE_BNDREGS_BIT, bndreg_state);
151     XO(XSTATE_BNDCSR_BIT, bndcsr_state);
152     XO(XSTATE_OPMASK_BIT, opmask_state);
153     XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state);
154     XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state);
155     XO(XSTATE_PKRU_BIT, pkru_state);
156 
157 #undef XO
158 }
159 
160 /*
161  * TCG-specific defaults that override cpudef models when using TCG.
162  * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
163  */
164 static PropValue x86_tcg_default_props[] = {
165     { "vme", "off" },
166     { NULL, NULL },
167 };
168 
x86_tcg_cpu_instance_init(CPUState * cs)169 static void x86_tcg_cpu_instance_init(CPUState *cs)
170 {
171     X86CPU *cpu = X86_CPU(cs);
172     X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
173 
174     if (xcc->model) {
175         /* Special cases not set in the X86CPUDefinition structs: */
176         x86_cpu_apply_props(cpu, x86_tcg_default_props);
177     }
178 
179     x86_tcg_cpu_xsave_init();
180 }
181 
x86_tcg_cpu_accel_class_init(ObjectClass * oc,void * data)182 static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
183 {
184     AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
185 
186 #ifndef CONFIG_USER_ONLY
187     acc->cpu_target_realize = tcg_cpu_realizefn;
188 #endif /* CONFIG_USER_ONLY */
189 
190     acc->cpu_class_init = x86_tcg_cpu_class_init;
191     acc->cpu_instance_init = x86_tcg_cpu_instance_init;
192 }
193 static const TypeInfo x86_tcg_cpu_accel_type_info = {
194     .name = ACCEL_CPU_NAME("tcg"),
195 
196     .parent = TYPE_ACCEL_CPU,
197     .class_init = x86_tcg_cpu_accel_class_init,
198     .abstract = true,
199 };
x86_tcg_cpu_accel_register_types(void)200 static void x86_tcg_cpu_accel_register_types(void)
201 {
202     type_register_static(&x86_tcg_cpu_accel_type_info);
203 }
204 type_init(x86_tcg_cpu_accel_register_types);
205