1 /* 2 * i386 TCG cpu class initialization 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "helper-tcg.h" 23 #include "qemu/accel.h" 24 #include "hw/core/accel-cpu.h" 25 26 #include "tcg-cpu.h" 27 28 /* Frob eflags into and out of the CPU temporary format. */ 29 30 static void x86_cpu_exec_enter(CPUState *cs) 31 { 32 X86CPU *cpu = X86_CPU(cs); 33 CPUX86State *env = &cpu->env; 34 35 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 36 env->df = 1 - (2 * ((env->eflags >> 10) & 1)); 37 CC_OP = CC_OP_EFLAGS; 38 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 39 } 40 41 static void x86_cpu_exec_exit(CPUState *cs) 42 { 43 X86CPU *cpu = X86_CPU(cs); 44 CPUX86State *env = &cpu->env; 45 46 env->eflags = cpu_compute_eflags(env); 47 } 48 49 static void x86_cpu_synchronize_from_tb(CPUState *cs, 50 const TranslationBlock *tb) 51 { 52 /* The instruction pointer is always up to date with CF_PCREL. */ 53 if (!(tb_cflags(tb) & CF_PCREL)) { 54 CPUX86State *env = cpu_env(cs); 55 56 if (tb->flags & HF_CS64_MASK) { 57 env->eip = tb->pc; 58 } else { 59 env->eip = (uint32_t)(tb->pc - tb->cs_base); 60 } 61 } 62 } 63 64 static void x86_restore_state_to_opc(CPUState *cs, 65 const TranslationBlock *tb, 66 const uint64_t *data) 67 { 68 X86CPU *cpu = X86_CPU(cs); 69 CPUX86State *env = &cpu->env; 70 int cc_op = data[1]; 71 uint64_t new_pc; 72 73 if (tb_cflags(tb) & CF_PCREL) { 74 /* 75 * data[0] in PC-relative TBs is also a linear address, i.e. an address with 76 * the CS base added, because it is not guaranteed that EIP bits 12 and higher 77 * stay the same across the translation block. Add the CS base back before 78 * replacing the low bits, and subtract it below just like for !CF_PCREL. 79 */ 80 uint64_t pc = env->eip + tb->cs_base; 81 new_pc = (pc & TARGET_PAGE_MASK) | data[0]; 82 } else { 83 new_pc = data[0]; 84 } 85 if (tb->flags & HF_CS64_MASK) { 86 env->eip = new_pc; 87 } else { 88 env->eip = (uint32_t)(new_pc - tb->cs_base); 89 } 90 91 if (cc_op != CC_OP_DYNAMIC) { 92 env->cc_op = cc_op; 93 } 94 } 95 96 #ifndef CONFIG_USER_ONLY 97 static bool x86_debug_check_breakpoint(CPUState *cs) 98 { 99 X86CPU *cpu = X86_CPU(cs); 100 CPUX86State *env = &cpu->env; 101 102 /* RF disables all architectural breakpoints. */ 103 return !(env->eflags & RF_MASK); 104 } 105 #endif 106 107 #include "hw/core/tcg-cpu-ops.h" 108 109 static const struct TCGCPUOps x86_tcg_ops = { 110 .initialize = tcg_x86_init, 111 .synchronize_from_tb = x86_cpu_synchronize_from_tb, 112 .restore_state_to_opc = x86_restore_state_to_opc, 113 .cpu_exec_enter = x86_cpu_exec_enter, 114 .cpu_exec_exit = x86_cpu_exec_exit, 115 #ifdef CONFIG_USER_ONLY 116 .fake_user_interrupt = x86_cpu_do_interrupt, 117 .record_sigsegv = x86_cpu_record_sigsegv, 118 .record_sigbus = x86_cpu_record_sigbus, 119 #else 120 .tlb_fill = x86_cpu_tlb_fill, 121 .do_interrupt = x86_cpu_do_interrupt, 122 .cpu_exec_interrupt = x86_cpu_exec_interrupt, 123 .do_unaligned_access = x86_cpu_do_unaligned_access, 124 .debug_excp_handler = breakpoint_handler, 125 .debug_check_breakpoint = x86_debug_check_breakpoint, 126 #endif /* !CONFIG_USER_ONLY */ 127 }; 128 129 static void x86_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 130 { 131 /* for x86, all cpus use the same set of operations */ 132 cc->tcg_ops = &x86_tcg_ops; 133 } 134 135 static void x86_tcg_cpu_class_init(CPUClass *cc) 136 { 137 cc->init_accel_cpu = x86_tcg_cpu_init_ops; 138 } 139 140 static void x86_tcg_cpu_xsave_init(void) 141 { 142 #define XO(bit, field) \ 143 x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field); 144 145 XO(XSTATE_FP_BIT, legacy); 146 XO(XSTATE_SSE_BIT, legacy); 147 XO(XSTATE_YMM_BIT, avx_state); 148 XO(XSTATE_BNDREGS_BIT, bndreg_state); 149 XO(XSTATE_BNDCSR_BIT, bndcsr_state); 150 XO(XSTATE_OPMASK_BIT, opmask_state); 151 XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state); 152 XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state); 153 XO(XSTATE_PKRU_BIT, pkru_state); 154 155 #undef XO 156 } 157 158 /* 159 * TCG-specific defaults that override cpudef models when using TCG. 160 * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. 161 */ 162 static PropValue x86_tcg_default_props[] = { 163 { "vme", "off" }, 164 { NULL, NULL }, 165 }; 166 167 static void x86_tcg_cpu_instance_init(CPUState *cs) 168 { 169 X86CPU *cpu = X86_CPU(cs); 170 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 171 172 if (xcc->model) { 173 /* Special cases not set in the X86CPUDefinition structs: */ 174 x86_cpu_apply_props(cpu, x86_tcg_default_props); 175 } 176 177 x86_tcg_cpu_xsave_init(); 178 } 179 180 static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 181 { 182 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 183 184 #ifndef CONFIG_USER_ONLY 185 acc->cpu_target_realize = tcg_cpu_realizefn; 186 #endif /* CONFIG_USER_ONLY */ 187 188 acc->cpu_class_init = x86_tcg_cpu_class_init; 189 acc->cpu_instance_init = x86_tcg_cpu_instance_init; 190 } 191 static const TypeInfo x86_tcg_cpu_accel_type_info = { 192 .name = ACCEL_CPU_NAME("tcg"), 193 194 .parent = TYPE_ACCEL_CPU, 195 .class_init = x86_tcg_cpu_accel_class_init, 196 .abstract = true, 197 }; 198 static void x86_tcg_cpu_accel_register_types(void) 199 { 200 type_register_static(&x86_tcg_cpu_accel_type_info); 201 } 202 type_init(x86_tcg_cpu_accel_register_types); 203