1 /* 2 * x86 misc helpers - sysemu code 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/main-loop.h" 22 #include "cpu.h" 23 #include "exec/helper-proto.h" 24 #include "exec/cpu_ldst.h" 25 #include "exec/address-spaces.h" 26 #include "tcg/helper-tcg.h" 27 28 void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) 29 { 30 address_space_stb(&address_space_io, port, data, 31 cpu_get_mem_attrs(env), NULL); 32 } 33 34 target_ulong helper_inb(CPUX86State *env, uint32_t port) 35 { 36 return address_space_ldub(&address_space_io, port, 37 cpu_get_mem_attrs(env), NULL); 38 } 39 40 void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) 41 { 42 address_space_stw(&address_space_io, port, data, 43 cpu_get_mem_attrs(env), NULL); 44 } 45 46 target_ulong helper_inw(CPUX86State *env, uint32_t port) 47 { 48 return address_space_lduw(&address_space_io, port, 49 cpu_get_mem_attrs(env), NULL); 50 } 51 52 void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) 53 { 54 address_space_stl(&address_space_io, port, data, 55 cpu_get_mem_attrs(env), NULL); 56 } 57 58 target_ulong helper_inl(CPUX86State *env, uint32_t port) 59 { 60 return address_space_ldl(&address_space_io, port, 61 cpu_get_mem_attrs(env), NULL); 62 } 63 64 target_ulong helper_read_crN(CPUX86State *env, int reg) 65 { 66 target_ulong val; 67 68 cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC()); 69 switch (reg) { 70 default: 71 val = env->cr[reg]; 72 break; 73 case 8: 74 if (!(env->hflags2 & HF2_VINTR_MASK)) { 75 val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); 76 } else { 77 val = env->v_tpr; 78 } 79 break; 80 } 81 return val; 82 } 83 84 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) 85 { 86 cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC()); 87 switch (reg) { 88 case 0: 89 cpu_x86_update_cr0(env, t0); 90 break; 91 case 3: 92 cpu_x86_update_cr3(env, t0); 93 break; 94 case 4: 95 if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) && 96 (env->hflags & HF_CS64_MASK)) { 97 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 98 } 99 cpu_x86_update_cr4(env, t0); 100 break; 101 case 8: 102 if (!(env->hflags2 & HF2_VINTR_MASK)) { 103 qemu_mutex_lock_iothread(); 104 cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); 105 qemu_mutex_unlock_iothread(); 106 } 107 env->v_tpr = t0 & 0x0f; 108 break; 109 default: 110 env->cr[reg] = t0; 111 break; 112 } 113 } 114 115 void helper_wrmsr(CPUX86State *env) 116 { 117 uint64_t val; 118 CPUState *cs = env_cpu(env); 119 120 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); 121 122 val = ((uint32_t)env->regs[R_EAX]) | 123 ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); 124 125 switch ((uint32_t)env->regs[R_ECX]) { 126 case MSR_IA32_SYSENTER_CS: 127 env->sysenter_cs = val & 0xffff; 128 break; 129 case MSR_IA32_SYSENTER_ESP: 130 env->sysenter_esp = val; 131 break; 132 case MSR_IA32_SYSENTER_EIP: 133 env->sysenter_eip = val; 134 break; 135 case MSR_IA32_APICBASE: 136 cpu_set_apic_base(env_archcpu(env)->apic_state, val); 137 break; 138 case MSR_EFER: 139 { 140 uint64_t update_mask; 141 142 update_mask = 0; 143 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { 144 update_mask |= MSR_EFER_SCE; 145 } 146 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 147 update_mask |= MSR_EFER_LME; 148 } 149 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 150 update_mask |= MSR_EFER_FFXSR; 151 } 152 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { 153 update_mask |= MSR_EFER_NXE; 154 } 155 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 156 update_mask |= MSR_EFER_SVME; 157 } 158 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 159 update_mask |= MSR_EFER_FFXSR; 160 } 161 cpu_load_efer(env, (env->efer & ~update_mask) | 162 (val & update_mask)); 163 } 164 break; 165 case MSR_STAR: 166 env->star = val; 167 break; 168 case MSR_PAT: 169 env->pat = val; 170 break; 171 case MSR_IA32_PKRS: 172 if (val & 0xFFFFFFFF00000000ull) { 173 goto error; 174 } 175 env->pkrs = val; 176 tlb_flush(cs); 177 break; 178 case MSR_VM_HSAVE_PA: 179 env->vm_hsave = val; 180 break; 181 #ifdef TARGET_X86_64 182 case MSR_LSTAR: 183 env->lstar = val; 184 break; 185 case MSR_CSTAR: 186 env->cstar = val; 187 break; 188 case MSR_FMASK: 189 env->fmask = val; 190 break; 191 case MSR_FSBASE: 192 env->segs[R_FS].base = val; 193 break; 194 case MSR_GSBASE: 195 env->segs[R_GS].base = val; 196 break; 197 case MSR_KERNELGSBASE: 198 env->kernelgsbase = val; 199 break; 200 #endif 201 case MSR_MTRRphysBase(0): 202 case MSR_MTRRphysBase(1): 203 case MSR_MTRRphysBase(2): 204 case MSR_MTRRphysBase(3): 205 case MSR_MTRRphysBase(4): 206 case MSR_MTRRphysBase(5): 207 case MSR_MTRRphysBase(6): 208 case MSR_MTRRphysBase(7): 209 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 210 MSR_MTRRphysBase(0)) / 2].base = val; 211 break; 212 case MSR_MTRRphysMask(0): 213 case MSR_MTRRphysMask(1): 214 case MSR_MTRRphysMask(2): 215 case MSR_MTRRphysMask(3): 216 case MSR_MTRRphysMask(4): 217 case MSR_MTRRphysMask(5): 218 case MSR_MTRRphysMask(6): 219 case MSR_MTRRphysMask(7): 220 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 221 MSR_MTRRphysMask(0)) / 2].mask = val; 222 break; 223 case MSR_MTRRfix64K_00000: 224 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 225 MSR_MTRRfix64K_00000] = val; 226 break; 227 case MSR_MTRRfix16K_80000: 228 case MSR_MTRRfix16K_A0000: 229 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 230 MSR_MTRRfix16K_80000 + 1] = val; 231 break; 232 case MSR_MTRRfix4K_C0000: 233 case MSR_MTRRfix4K_C8000: 234 case MSR_MTRRfix4K_D0000: 235 case MSR_MTRRfix4K_D8000: 236 case MSR_MTRRfix4K_E0000: 237 case MSR_MTRRfix4K_E8000: 238 case MSR_MTRRfix4K_F0000: 239 case MSR_MTRRfix4K_F8000: 240 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 241 MSR_MTRRfix4K_C0000 + 3] = val; 242 break; 243 case MSR_MTRRdefType: 244 env->mtrr_deftype = val; 245 break; 246 case MSR_MCG_STATUS: 247 env->mcg_status = val; 248 break; 249 case MSR_MCG_CTL: 250 if ((env->mcg_cap & MCG_CTL_P) 251 && (val == 0 || val == ~(uint64_t)0)) { 252 env->mcg_ctl = val; 253 } 254 break; 255 case MSR_TSC_AUX: 256 env->tsc_aux = val; 257 break; 258 case MSR_IA32_MISC_ENABLE: 259 env->msr_ia32_misc_enable = val; 260 break; 261 case MSR_IA32_BNDCFGS: 262 /* FIXME: #GP if reserved bits are set. */ 263 /* FIXME: Extend highest implemented bit of linear address. */ 264 env->msr_bndcfgs = val; 265 cpu_sync_bndcs_hflags(env); 266 break; 267 default: 268 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 269 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 270 (4 * env->mcg_cap & 0xff)) { 271 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 272 if ((offset & 0x3) != 0 273 || (val == 0 || val == ~(uint64_t)0)) { 274 env->mce_banks[offset] = val; 275 } 276 break; 277 } 278 /* XXX: exception? */ 279 break; 280 } 281 return; 282 error: 283 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 284 } 285 286 void helper_rdmsr(CPUX86State *env) 287 { 288 X86CPU *x86_cpu = env_archcpu(env); 289 uint64_t val; 290 291 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); 292 293 switch ((uint32_t)env->regs[R_ECX]) { 294 case MSR_IA32_SYSENTER_CS: 295 val = env->sysenter_cs; 296 break; 297 case MSR_IA32_SYSENTER_ESP: 298 val = env->sysenter_esp; 299 break; 300 case MSR_IA32_SYSENTER_EIP: 301 val = env->sysenter_eip; 302 break; 303 case MSR_IA32_APICBASE: 304 val = cpu_get_apic_base(env_archcpu(env)->apic_state); 305 break; 306 case MSR_EFER: 307 val = env->efer; 308 break; 309 case MSR_STAR: 310 val = env->star; 311 break; 312 case MSR_PAT: 313 val = env->pat; 314 break; 315 case MSR_IA32_PKRS: 316 val = env->pkrs; 317 break; 318 case MSR_VM_HSAVE_PA: 319 val = env->vm_hsave; 320 break; 321 case MSR_IA32_PERF_STATUS: 322 /* tsc_increment_by_tick */ 323 val = 1000ULL; 324 /* CPU multiplier */ 325 val |= (((uint64_t)4ULL) << 40); 326 break; 327 #ifdef TARGET_X86_64 328 case MSR_LSTAR: 329 val = env->lstar; 330 break; 331 case MSR_CSTAR: 332 val = env->cstar; 333 break; 334 case MSR_FMASK: 335 val = env->fmask; 336 break; 337 case MSR_FSBASE: 338 val = env->segs[R_FS].base; 339 break; 340 case MSR_GSBASE: 341 val = env->segs[R_GS].base; 342 break; 343 case MSR_KERNELGSBASE: 344 val = env->kernelgsbase; 345 break; 346 case MSR_TSC_AUX: 347 val = env->tsc_aux; 348 break; 349 #endif 350 case MSR_SMI_COUNT: 351 val = env->msr_smi_count; 352 break; 353 case MSR_MTRRphysBase(0): 354 case MSR_MTRRphysBase(1): 355 case MSR_MTRRphysBase(2): 356 case MSR_MTRRphysBase(3): 357 case MSR_MTRRphysBase(4): 358 case MSR_MTRRphysBase(5): 359 case MSR_MTRRphysBase(6): 360 case MSR_MTRRphysBase(7): 361 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 362 MSR_MTRRphysBase(0)) / 2].base; 363 break; 364 case MSR_MTRRphysMask(0): 365 case MSR_MTRRphysMask(1): 366 case MSR_MTRRphysMask(2): 367 case MSR_MTRRphysMask(3): 368 case MSR_MTRRphysMask(4): 369 case MSR_MTRRphysMask(5): 370 case MSR_MTRRphysMask(6): 371 case MSR_MTRRphysMask(7): 372 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 373 MSR_MTRRphysMask(0)) / 2].mask; 374 break; 375 case MSR_MTRRfix64K_00000: 376 val = env->mtrr_fixed[0]; 377 break; 378 case MSR_MTRRfix16K_80000: 379 case MSR_MTRRfix16K_A0000: 380 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 381 MSR_MTRRfix16K_80000 + 1]; 382 break; 383 case MSR_MTRRfix4K_C0000: 384 case MSR_MTRRfix4K_C8000: 385 case MSR_MTRRfix4K_D0000: 386 case MSR_MTRRfix4K_D8000: 387 case MSR_MTRRfix4K_E0000: 388 case MSR_MTRRfix4K_E8000: 389 case MSR_MTRRfix4K_F0000: 390 case MSR_MTRRfix4K_F8000: 391 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 392 MSR_MTRRfix4K_C0000 + 3]; 393 break; 394 case MSR_MTRRdefType: 395 val = env->mtrr_deftype; 396 break; 397 case MSR_MTRRcap: 398 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 399 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | 400 MSR_MTRRcap_WC_SUPPORTED; 401 } else { 402 /* XXX: exception? */ 403 val = 0; 404 } 405 break; 406 case MSR_MCG_CAP: 407 val = env->mcg_cap; 408 break; 409 case MSR_MCG_CTL: 410 if (env->mcg_cap & MCG_CTL_P) { 411 val = env->mcg_ctl; 412 } else { 413 val = 0; 414 } 415 break; 416 case MSR_MCG_STATUS: 417 val = env->mcg_status; 418 break; 419 case MSR_IA32_MISC_ENABLE: 420 val = env->msr_ia32_misc_enable; 421 break; 422 case MSR_IA32_BNDCFGS: 423 val = env->msr_bndcfgs; 424 break; 425 case MSR_IA32_UCODE_REV: 426 val = x86_cpu->ucode_rev; 427 break; 428 default: 429 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 430 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 431 (4 * env->mcg_cap & 0xff)) { 432 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 433 val = env->mce_banks[offset]; 434 break; 435 } 436 /* XXX: exception? */ 437 val = 0; 438 break; 439 } 440 env->regs[R_EAX] = (uint32_t)(val); 441 env->regs[R_EDX] = (uint32_t)(val >> 32); 442 } 443