1 /* 2 * x86 misc helpers - sysemu code 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/main-loop.h" 22 #include "cpu.h" 23 #include "exec/helper-proto.h" 24 #include "exec/cpu_ldst.h" 25 #include "exec/address-spaces.h" 26 #include "tcg/helper-tcg.h" 27 28 void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) 29 { 30 address_space_stb(&address_space_io, port, data, 31 cpu_get_mem_attrs(env), NULL); 32 } 33 34 target_ulong helper_inb(CPUX86State *env, uint32_t port) 35 { 36 return address_space_ldub(&address_space_io, port, 37 cpu_get_mem_attrs(env), NULL); 38 } 39 40 void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) 41 { 42 address_space_stw(&address_space_io, port, data, 43 cpu_get_mem_attrs(env), NULL); 44 } 45 46 target_ulong helper_inw(CPUX86State *env, uint32_t port) 47 { 48 return address_space_lduw(&address_space_io, port, 49 cpu_get_mem_attrs(env), NULL); 50 } 51 52 void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) 53 { 54 address_space_stl(&address_space_io, port, data, 55 cpu_get_mem_attrs(env), NULL); 56 } 57 58 target_ulong helper_inl(CPUX86State *env, uint32_t port) 59 { 60 return address_space_ldl(&address_space_io, port, 61 cpu_get_mem_attrs(env), NULL); 62 } 63 64 target_ulong helper_read_crN(CPUX86State *env, int reg) 65 { 66 target_ulong val; 67 68 switch (reg) { 69 default: 70 val = env->cr[reg]; 71 break; 72 case 8: 73 if (!(env->hflags2 & HF2_VINTR_MASK)) { 74 val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); 75 } else { 76 val = env->v_tpr; 77 } 78 break; 79 } 80 return val; 81 } 82 83 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) 84 { 85 switch (reg) { 86 case 0: 87 /* 88 * If we reach this point, the CR0 write intercept is disabled. 89 * But we could still exit if the hypervisor has requested the selective 90 * intercept for bits other than TS and MP 91 */ 92 if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) && 93 ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) { 94 cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC()); 95 } 96 cpu_x86_update_cr0(env, t0); 97 break; 98 case 3: 99 if ((env->efer & MSR_EFER_LMA) && 100 (t0 & ((~0ULL) << env_archcpu(env)->phys_bits))) { 101 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); 102 } 103 if (!(env->efer & MSR_EFER_LMA)) { 104 t0 &= 0xffffffffUL; 105 } 106 cpu_x86_update_cr3(env, t0); 107 break; 108 case 4: 109 if (t0 & cr4_reserved_bits(env)) { 110 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); 111 } 112 if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) && 113 (env->hflags & HF_CS64_MASK)) { 114 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 115 } 116 cpu_x86_update_cr4(env, t0); 117 break; 118 case 8: 119 if (!(env->hflags2 & HF2_VINTR_MASK)) { 120 qemu_mutex_lock_iothread(); 121 cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); 122 qemu_mutex_unlock_iothread(); 123 } 124 env->v_tpr = t0 & 0x0f; 125 break; 126 default: 127 env->cr[reg] = t0; 128 break; 129 } 130 } 131 132 void helper_wrmsr(CPUX86State *env) 133 { 134 uint64_t val; 135 CPUState *cs = env_cpu(env); 136 137 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); 138 139 val = ((uint32_t)env->regs[R_EAX]) | 140 ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); 141 142 switch ((uint32_t)env->regs[R_ECX]) { 143 case MSR_IA32_SYSENTER_CS: 144 env->sysenter_cs = val & 0xffff; 145 break; 146 case MSR_IA32_SYSENTER_ESP: 147 env->sysenter_esp = val; 148 break; 149 case MSR_IA32_SYSENTER_EIP: 150 env->sysenter_eip = val; 151 break; 152 case MSR_IA32_APICBASE: 153 cpu_set_apic_base(env_archcpu(env)->apic_state, val); 154 break; 155 case MSR_EFER: 156 { 157 uint64_t update_mask; 158 159 update_mask = 0; 160 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { 161 update_mask |= MSR_EFER_SCE; 162 } 163 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 164 update_mask |= MSR_EFER_LME; 165 } 166 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 167 update_mask |= MSR_EFER_FFXSR; 168 } 169 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { 170 update_mask |= MSR_EFER_NXE; 171 } 172 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 173 update_mask |= MSR_EFER_SVME; 174 } 175 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 176 update_mask |= MSR_EFER_FFXSR; 177 } 178 cpu_load_efer(env, (env->efer & ~update_mask) | 179 (val & update_mask)); 180 } 181 break; 182 case MSR_STAR: 183 env->star = val; 184 break; 185 case MSR_PAT: 186 env->pat = val; 187 break; 188 case MSR_IA32_PKRS: 189 if (val & 0xFFFFFFFF00000000ull) { 190 goto error; 191 } 192 env->pkrs = val; 193 tlb_flush(cs); 194 break; 195 case MSR_VM_HSAVE_PA: 196 env->vm_hsave = val; 197 break; 198 #ifdef TARGET_X86_64 199 case MSR_LSTAR: 200 env->lstar = val; 201 break; 202 case MSR_CSTAR: 203 env->cstar = val; 204 break; 205 case MSR_FMASK: 206 env->fmask = val; 207 break; 208 case MSR_FSBASE: 209 env->segs[R_FS].base = val; 210 break; 211 case MSR_GSBASE: 212 env->segs[R_GS].base = val; 213 break; 214 case MSR_KERNELGSBASE: 215 env->kernelgsbase = val; 216 break; 217 #endif 218 case MSR_MTRRphysBase(0): 219 case MSR_MTRRphysBase(1): 220 case MSR_MTRRphysBase(2): 221 case MSR_MTRRphysBase(3): 222 case MSR_MTRRphysBase(4): 223 case MSR_MTRRphysBase(5): 224 case MSR_MTRRphysBase(6): 225 case MSR_MTRRphysBase(7): 226 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 227 MSR_MTRRphysBase(0)) / 2].base = val; 228 break; 229 case MSR_MTRRphysMask(0): 230 case MSR_MTRRphysMask(1): 231 case MSR_MTRRphysMask(2): 232 case MSR_MTRRphysMask(3): 233 case MSR_MTRRphysMask(4): 234 case MSR_MTRRphysMask(5): 235 case MSR_MTRRphysMask(6): 236 case MSR_MTRRphysMask(7): 237 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 238 MSR_MTRRphysMask(0)) / 2].mask = val; 239 break; 240 case MSR_MTRRfix64K_00000: 241 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 242 MSR_MTRRfix64K_00000] = val; 243 break; 244 case MSR_MTRRfix16K_80000: 245 case MSR_MTRRfix16K_A0000: 246 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 247 MSR_MTRRfix16K_80000 + 1] = val; 248 break; 249 case MSR_MTRRfix4K_C0000: 250 case MSR_MTRRfix4K_C8000: 251 case MSR_MTRRfix4K_D0000: 252 case MSR_MTRRfix4K_D8000: 253 case MSR_MTRRfix4K_E0000: 254 case MSR_MTRRfix4K_E8000: 255 case MSR_MTRRfix4K_F0000: 256 case MSR_MTRRfix4K_F8000: 257 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 258 MSR_MTRRfix4K_C0000 + 3] = val; 259 break; 260 case MSR_MTRRdefType: 261 env->mtrr_deftype = val; 262 break; 263 case MSR_MCG_STATUS: 264 env->mcg_status = val; 265 break; 266 case MSR_MCG_CTL: 267 if ((env->mcg_cap & MCG_CTL_P) 268 && (val == 0 || val == ~(uint64_t)0)) { 269 env->mcg_ctl = val; 270 } 271 break; 272 case MSR_TSC_AUX: 273 env->tsc_aux = val; 274 break; 275 case MSR_IA32_MISC_ENABLE: 276 env->msr_ia32_misc_enable = val; 277 break; 278 case MSR_IA32_BNDCFGS: 279 /* FIXME: #GP if reserved bits are set. */ 280 /* FIXME: Extend highest implemented bit of linear address. */ 281 env->msr_bndcfgs = val; 282 cpu_sync_bndcs_hflags(env); 283 break; 284 default: 285 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 286 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 287 (4 * env->mcg_cap & 0xff)) { 288 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 289 if ((offset & 0x3) != 0 290 || (val == 0 || val == ~(uint64_t)0)) { 291 env->mce_banks[offset] = val; 292 } 293 break; 294 } 295 /* XXX: exception? */ 296 break; 297 } 298 return; 299 error: 300 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 301 } 302 303 void helper_rdmsr(CPUX86State *env) 304 { 305 X86CPU *x86_cpu = env_archcpu(env); 306 uint64_t val; 307 308 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); 309 310 switch ((uint32_t)env->regs[R_ECX]) { 311 case MSR_IA32_SYSENTER_CS: 312 val = env->sysenter_cs; 313 break; 314 case MSR_IA32_SYSENTER_ESP: 315 val = env->sysenter_esp; 316 break; 317 case MSR_IA32_SYSENTER_EIP: 318 val = env->sysenter_eip; 319 break; 320 case MSR_IA32_APICBASE: 321 val = cpu_get_apic_base(env_archcpu(env)->apic_state); 322 break; 323 case MSR_EFER: 324 val = env->efer; 325 break; 326 case MSR_STAR: 327 val = env->star; 328 break; 329 case MSR_PAT: 330 val = env->pat; 331 break; 332 case MSR_IA32_PKRS: 333 val = env->pkrs; 334 break; 335 case MSR_VM_HSAVE_PA: 336 val = env->vm_hsave; 337 break; 338 case MSR_IA32_PERF_STATUS: 339 /* tsc_increment_by_tick */ 340 val = 1000ULL; 341 /* CPU multiplier */ 342 val |= (((uint64_t)4ULL) << 40); 343 break; 344 #ifdef TARGET_X86_64 345 case MSR_LSTAR: 346 val = env->lstar; 347 break; 348 case MSR_CSTAR: 349 val = env->cstar; 350 break; 351 case MSR_FMASK: 352 val = env->fmask; 353 break; 354 case MSR_FSBASE: 355 val = env->segs[R_FS].base; 356 break; 357 case MSR_GSBASE: 358 val = env->segs[R_GS].base; 359 break; 360 case MSR_KERNELGSBASE: 361 val = env->kernelgsbase; 362 break; 363 case MSR_TSC_AUX: 364 val = env->tsc_aux; 365 break; 366 #endif 367 case MSR_SMI_COUNT: 368 val = env->msr_smi_count; 369 break; 370 case MSR_MTRRphysBase(0): 371 case MSR_MTRRphysBase(1): 372 case MSR_MTRRphysBase(2): 373 case MSR_MTRRphysBase(3): 374 case MSR_MTRRphysBase(4): 375 case MSR_MTRRphysBase(5): 376 case MSR_MTRRphysBase(6): 377 case MSR_MTRRphysBase(7): 378 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 379 MSR_MTRRphysBase(0)) / 2].base; 380 break; 381 case MSR_MTRRphysMask(0): 382 case MSR_MTRRphysMask(1): 383 case MSR_MTRRphysMask(2): 384 case MSR_MTRRphysMask(3): 385 case MSR_MTRRphysMask(4): 386 case MSR_MTRRphysMask(5): 387 case MSR_MTRRphysMask(6): 388 case MSR_MTRRphysMask(7): 389 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 390 MSR_MTRRphysMask(0)) / 2].mask; 391 break; 392 case MSR_MTRRfix64K_00000: 393 val = env->mtrr_fixed[0]; 394 break; 395 case MSR_MTRRfix16K_80000: 396 case MSR_MTRRfix16K_A0000: 397 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 398 MSR_MTRRfix16K_80000 + 1]; 399 break; 400 case MSR_MTRRfix4K_C0000: 401 case MSR_MTRRfix4K_C8000: 402 case MSR_MTRRfix4K_D0000: 403 case MSR_MTRRfix4K_D8000: 404 case MSR_MTRRfix4K_E0000: 405 case MSR_MTRRfix4K_E8000: 406 case MSR_MTRRfix4K_F0000: 407 case MSR_MTRRfix4K_F8000: 408 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 409 MSR_MTRRfix4K_C0000 + 3]; 410 break; 411 case MSR_MTRRdefType: 412 val = env->mtrr_deftype; 413 break; 414 case MSR_MTRRcap: 415 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 416 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | 417 MSR_MTRRcap_WC_SUPPORTED; 418 } else { 419 /* XXX: exception? */ 420 val = 0; 421 } 422 break; 423 case MSR_MCG_CAP: 424 val = env->mcg_cap; 425 break; 426 case MSR_MCG_CTL: 427 if (env->mcg_cap & MCG_CTL_P) { 428 val = env->mcg_ctl; 429 } else { 430 val = 0; 431 } 432 break; 433 case MSR_MCG_STATUS: 434 val = env->mcg_status; 435 break; 436 case MSR_IA32_MISC_ENABLE: 437 val = env->msr_ia32_misc_enable; 438 break; 439 case MSR_IA32_BNDCFGS: 440 val = env->msr_bndcfgs; 441 break; 442 case MSR_IA32_UCODE_REV: 443 val = x86_cpu->ucode_rev; 444 break; 445 default: 446 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 447 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 448 (4 * env->mcg_cap & 0xff)) { 449 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 450 val = env->mce_banks[offset]; 451 break; 452 } 453 /* XXX: exception? */ 454 val = 0; 455 break; 456 } 457 env->regs[R_EAX] = (uint32_t)(val); 458 env->regs[R_EDX] = (uint32_t)(val >> 32); 459 } 460 461 void helper_flush_page(CPUX86State *env, target_ulong addr) 462 { 463 tlb_flush_page(env_cpu(env), addr); 464 } 465 466 static void QEMU_NORETURN do_hlt(CPUX86State *env) 467 { 468 CPUState *cs = env_cpu(env); 469 470 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ 471 cs->halted = 1; 472 cs->exception_index = EXCP_HLT; 473 cpu_loop_exit(cs); 474 } 475 476 void QEMU_NORETURN helper_hlt(CPUX86State *env, int next_eip_addend) 477 { 478 cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); 479 env->eip += next_eip_addend; 480 481 do_hlt(env); 482 } 483 484 void helper_monitor(CPUX86State *env, target_ulong ptr) 485 { 486 if ((uint32_t)env->regs[R_ECX] != 0) { 487 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 488 } 489 /* XXX: store address? */ 490 cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); 491 } 492 493 void QEMU_NORETURN helper_mwait(CPUX86State *env, int next_eip_addend) 494 { 495 CPUState *cs = env_cpu(env); 496 497 if ((uint32_t)env->regs[R_ECX] != 0) { 498 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 499 } 500 cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); 501 env->eip += next_eip_addend; 502 503 /* XXX: not complete but not completely erroneous */ 504 if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { 505 do_pause(env); 506 } else { 507 do_hlt(env); 508 } 509 } 510