1 /* 2 * x86 misc helpers - sysemu code 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/main-loop.h" 22 #include "cpu.h" 23 #include "exec/helper-proto.h" 24 #include "exec/cpu_ldst.h" 25 #include "exec/address-spaces.h" 26 #include "tcg/helper-tcg.h" 27 28 void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) 29 { 30 address_space_stb(&address_space_io, port, data, 31 cpu_get_mem_attrs(env), NULL); 32 } 33 34 target_ulong helper_inb(CPUX86State *env, uint32_t port) 35 { 36 return address_space_ldub(&address_space_io, port, 37 cpu_get_mem_attrs(env), NULL); 38 } 39 40 void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) 41 { 42 address_space_stw(&address_space_io, port, data, 43 cpu_get_mem_attrs(env), NULL); 44 } 45 46 target_ulong helper_inw(CPUX86State *env, uint32_t port) 47 { 48 return address_space_lduw(&address_space_io, port, 49 cpu_get_mem_attrs(env), NULL); 50 } 51 52 void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) 53 { 54 address_space_stl(&address_space_io, port, data, 55 cpu_get_mem_attrs(env), NULL); 56 } 57 58 target_ulong helper_inl(CPUX86State *env, uint32_t port) 59 { 60 return address_space_ldl(&address_space_io, port, 61 cpu_get_mem_attrs(env), NULL); 62 } 63 64 target_ulong helper_read_crN(CPUX86State *env, int reg) 65 { 66 target_ulong val; 67 68 switch (reg) { 69 default: 70 val = env->cr[reg]; 71 break; 72 case 8: 73 if (!(env->hflags2 & HF2_VINTR_MASK)) { 74 val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); 75 } else { 76 val = env->v_tpr; 77 } 78 break; 79 } 80 return val; 81 } 82 83 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) 84 { 85 switch (reg) { 86 case 0: 87 /* 88 * If we reach this point, the CR0 write intercept is disabled. 89 * But we could still exit if the hypervisor has requested the selective 90 * intercept for bits other than TS and MP 91 */ 92 if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) && 93 ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) { 94 cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC()); 95 } 96 cpu_x86_update_cr0(env, t0); 97 break; 98 case 3: 99 cpu_x86_update_cr3(env, t0); 100 break; 101 case 4: 102 if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) && 103 (env->hflags & HF_CS64_MASK)) { 104 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 105 } 106 cpu_x86_update_cr4(env, t0); 107 break; 108 case 8: 109 if (!(env->hflags2 & HF2_VINTR_MASK)) { 110 qemu_mutex_lock_iothread(); 111 cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); 112 qemu_mutex_unlock_iothread(); 113 } 114 env->v_tpr = t0 & 0x0f; 115 break; 116 default: 117 env->cr[reg] = t0; 118 break; 119 } 120 } 121 122 void helper_wrmsr(CPUX86State *env) 123 { 124 uint64_t val; 125 CPUState *cs = env_cpu(env); 126 127 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); 128 129 val = ((uint32_t)env->regs[R_EAX]) | 130 ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); 131 132 switch ((uint32_t)env->regs[R_ECX]) { 133 case MSR_IA32_SYSENTER_CS: 134 env->sysenter_cs = val & 0xffff; 135 break; 136 case MSR_IA32_SYSENTER_ESP: 137 env->sysenter_esp = val; 138 break; 139 case MSR_IA32_SYSENTER_EIP: 140 env->sysenter_eip = val; 141 break; 142 case MSR_IA32_APICBASE: 143 cpu_set_apic_base(env_archcpu(env)->apic_state, val); 144 break; 145 case MSR_EFER: 146 { 147 uint64_t update_mask; 148 149 update_mask = 0; 150 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { 151 update_mask |= MSR_EFER_SCE; 152 } 153 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 154 update_mask |= MSR_EFER_LME; 155 } 156 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 157 update_mask |= MSR_EFER_FFXSR; 158 } 159 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { 160 update_mask |= MSR_EFER_NXE; 161 } 162 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 163 update_mask |= MSR_EFER_SVME; 164 } 165 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 166 update_mask |= MSR_EFER_FFXSR; 167 } 168 cpu_load_efer(env, (env->efer & ~update_mask) | 169 (val & update_mask)); 170 } 171 break; 172 case MSR_STAR: 173 env->star = val; 174 break; 175 case MSR_PAT: 176 env->pat = val; 177 break; 178 case MSR_IA32_PKRS: 179 if (val & 0xFFFFFFFF00000000ull) { 180 goto error; 181 } 182 env->pkrs = val; 183 tlb_flush(cs); 184 break; 185 case MSR_VM_HSAVE_PA: 186 env->vm_hsave = val; 187 break; 188 #ifdef TARGET_X86_64 189 case MSR_LSTAR: 190 env->lstar = val; 191 break; 192 case MSR_CSTAR: 193 env->cstar = val; 194 break; 195 case MSR_FMASK: 196 env->fmask = val; 197 break; 198 case MSR_FSBASE: 199 env->segs[R_FS].base = val; 200 break; 201 case MSR_GSBASE: 202 env->segs[R_GS].base = val; 203 break; 204 case MSR_KERNELGSBASE: 205 env->kernelgsbase = val; 206 break; 207 #endif 208 case MSR_MTRRphysBase(0): 209 case MSR_MTRRphysBase(1): 210 case MSR_MTRRphysBase(2): 211 case MSR_MTRRphysBase(3): 212 case MSR_MTRRphysBase(4): 213 case MSR_MTRRphysBase(5): 214 case MSR_MTRRphysBase(6): 215 case MSR_MTRRphysBase(7): 216 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 217 MSR_MTRRphysBase(0)) / 2].base = val; 218 break; 219 case MSR_MTRRphysMask(0): 220 case MSR_MTRRphysMask(1): 221 case MSR_MTRRphysMask(2): 222 case MSR_MTRRphysMask(3): 223 case MSR_MTRRphysMask(4): 224 case MSR_MTRRphysMask(5): 225 case MSR_MTRRphysMask(6): 226 case MSR_MTRRphysMask(7): 227 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 228 MSR_MTRRphysMask(0)) / 2].mask = val; 229 break; 230 case MSR_MTRRfix64K_00000: 231 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 232 MSR_MTRRfix64K_00000] = val; 233 break; 234 case MSR_MTRRfix16K_80000: 235 case MSR_MTRRfix16K_A0000: 236 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 237 MSR_MTRRfix16K_80000 + 1] = val; 238 break; 239 case MSR_MTRRfix4K_C0000: 240 case MSR_MTRRfix4K_C8000: 241 case MSR_MTRRfix4K_D0000: 242 case MSR_MTRRfix4K_D8000: 243 case MSR_MTRRfix4K_E0000: 244 case MSR_MTRRfix4K_E8000: 245 case MSR_MTRRfix4K_F0000: 246 case MSR_MTRRfix4K_F8000: 247 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 248 MSR_MTRRfix4K_C0000 + 3] = val; 249 break; 250 case MSR_MTRRdefType: 251 env->mtrr_deftype = val; 252 break; 253 case MSR_MCG_STATUS: 254 env->mcg_status = val; 255 break; 256 case MSR_MCG_CTL: 257 if ((env->mcg_cap & MCG_CTL_P) 258 && (val == 0 || val == ~(uint64_t)0)) { 259 env->mcg_ctl = val; 260 } 261 break; 262 case MSR_TSC_AUX: 263 env->tsc_aux = val; 264 break; 265 case MSR_IA32_MISC_ENABLE: 266 env->msr_ia32_misc_enable = val; 267 break; 268 case MSR_IA32_BNDCFGS: 269 /* FIXME: #GP if reserved bits are set. */ 270 /* FIXME: Extend highest implemented bit of linear address. */ 271 env->msr_bndcfgs = val; 272 cpu_sync_bndcs_hflags(env); 273 break; 274 default: 275 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 276 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 277 (4 * env->mcg_cap & 0xff)) { 278 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 279 if ((offset & 0x3) != 0 280 || (val == 0 || val == ~(uint64_t)0)) { 281 env->mce_banks[offset] = val; 282 } 283 break; 284 } 285 /* XXX: exception? */ 286 break; 287 } 288 return; 289 error: 290 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 291 } 292 293 void helper_rdmsr(CPUX86State *env) 294 { 295 X86CPU *x86_cpu = env_archcpu(env); 296 uint64_t val; 297 298 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); 299 300 switch ((uint32_t)env->regs[R_ECX]) { 301 case MSR_IA32_SYSENTER_CS: 302 val = env->sysenter_cs; 303 break; 304 case MSR_IA32_SYSENTER_ESP: 305 val = env->sysenter_esp; 306 break; 307 case MSR_IA32_SYSENTER_EIP: 308 val = env->sysenter_eip; 309 break; 310 case MSR_IA32_APICBASE: 311 val = cpu_get_apic_base(env_archcpu(env)->apic_state); 312 break; 313 case MSR_EFER: 314 val = env->efer; 315 break; 316 case MSR_STAR: 317 val = env->star; 318 break; 319 case MSR_PAT: 320 val = env->pat; 321 break; 322 case MSR_IA32_PKRS: 323 val = env->pkrs; 324 break; 325 case MSR_VM_HSAVE_PA: 326 val = env->vm_hsave; 327 break; 328 case MSR_IA32_PERF_STATUS: 329 /* tsc_increment_by_tick */ 330 val = 1000ULL; 331 /* CPU multiplier */ 332 val |= (((uint64_t)4ULL) << 40); 333 break; 334 #ifdef TARGET_X86_64 335 case MSR_LSTAR: 336 val = env->lstar; 337 break; 338 case MSR_CSTAR: 339 val = env->cstar; 340 break; 341 case MSR_FMASK: 342 val = env->fmask; 343 break; 344 case MSR_FSBASE: 345 val = env->segs[R_FS].base; 346 break; 347 case MSR_GSBASE: 348 val = env->segs[R_GS].base; 349 break; 350 case MSR_KERNELGSBASE: 351 val = env->kernelgsbase; 352 break; 353 case MSR_TSC_AUX: 354 val = env->tsc_aux; 355 break; 356 #endif 357 case MSR_SMI_COUNT: 358 val = env->msr_smi_count; 359 break; 360 case MSR_MTRRphysBase(0): 361 case MSR_MTRRphysBase(1): 362 case MSR_MTRRphysBase(2): 363 case MSR_MTRRphysBase(3): 364 case MSR_MTRRphysBase(4): 365 case MSR_MTRRphysBase(5): 366 case MSR_MTRRphysBase(6): 367 case MSR_MTRRphysBase(7): 368 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 369 MSR_MTRRphysBase(0)) / 2].base; 370 break; 371 case MSR_MTRRphysMask(0): 372 case MSR_MTRRphysMask(1): 373 case MSR_MTRRphysMask(2): 374 case MSR_MTRRphysMask(3): 375 case MSR_MTRRphysMask(4): 376 case MSR_MTRRphysMask(5): 377 case MSR_MTRRphysMask(6): 378 case MSR_MTRRphysMask(7): 379 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 380 MSR_MTRRphysMask(0)) / 2].mask; 381 break; 382 case MSR_MTRRfix64K_00000: 383 val = env->mtrr_fixed[0]; 384 break; 385 case MSR_MTRRfix16K_80000: 386 case MSR_MTRRfix16K_A0000: 387 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 388 MSR_MTRRfix16K_80000 + 1]; 389 break; 390 case MSR_MTRRfix4K_C0000: 391 case MSR_MTRRfix4K_C8000: 392 case MSR_MTRRfix4K_D0000: 393 case MSR_MTRRfix4K_D8000: 394 case MSR_MTRRfix4K_E0000: 395 case MSR_MTRRfix4K_E8000: 396 case MSR_MTRRfix4K_F0000: 397 case MSR_MTRRfix4K_F8000: 398 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 399 MSR_MTRRfix4K_C0000 + 3]; 400 break; 401 case MSR_MTRRdefType: 402 val = env->mtrr_deftype; 403 break; 404 case MSR_MTRRcap: 405 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 406 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | 407 MSR_MTRRcap_WC_SUPPORTED; 408 } else { 409 /* XXX: exception? */ 410 val = 0; 411 } 412 break; 413 case MSR_MCG_CAP: 414 val = env->mcg_cap; 415 break; 416 case MSR_MCG_CTL: 417 if (env->mcg_cap & MCG_CTL_P) { 418 val = env->mcg_ctl; 419 } else { 420 val = 0; 421 } 422 break; 423 case MSR_MCG_STATUS: 424 val = env->mcg_status; 425 break; 426 case MSR_IA32_MISC_ENABLE: 427 val = env->msr_ia32_misc_enable; 428 break; 429 case MSR_IA32_BNDCFGS: 430 val = env->msr_bndcfgs; 431 break; 432 case MSR_IA32_UCODE_REV: 433 val = x86_cpu->ucode_rev; 434 break; 435 default: 436 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 437 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 438 (4 * env->mcg_cap & 0xff)) { 439 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 440 val = env->mce_banks[offset]; 441 break; 442 } 443 /* XXX: exception? */ 444 val = 0; 445 break; 446 } 447 env->regs[R_EAX] = (uint32_t)(val); 448 env->regs[R_EDX] = (uint32_t)(val >> 32); 449 } 450 451 void helper_flush_page(CPUX86State *env, target_ulong addr) 452 { 453 tlb_flush_page(env_cpu(env), addr); 454 } 455 456 static void QEMU_NORETURN do_hlt(CPUX86State *env) 457 { 458 CPUState *cs = env_cpu(env); 459 460 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ 461 cs->halted = 1; 462 cs->exception_index = EXCP_HLT; 463 cpu_loop_exit(cs); 464 } 465 466 void QEMU_NORETURN helper_hlt(CPUX86State *env, int next_eip_addend) 467 { 468 cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); 469 env->eip += next_eip_addend; 470 471 do_hlt(env); 472 } 473 474 void helper_monitor(CPUX86State *env, target_ulong ptr) 475 { 476 if ((uint32_t)env->regs[R_ECX] != 0) { 477 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 478 } 479 /* XXX: store address? */ 480 cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); 481 } 482 483 void QEMU_NORETURN helper_mwait(CPUX86State *env, int next_eip_addend) 484 { 485 CPUState *cs = env_cpu(env); 486 487 if ((uint32_t)env->regs[R_ECX] != 0) { 488 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 489 } 490 cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); 491 env->eip += next_eip_addend; 492 493 /* XXX: not complete but not completely erroneous */ 494 if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { 495 do_pause(env); 496 } else { 497 do_hlt(env); 498 } 499 } 500