1 /* 2 * i386 helpers (without register variable usage) 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_i386.h" 25 #ifndef CONFIG_USER_ONLY 26 #include "sysemu/sysemu.h" 27 #include "sysemu/hw_accel.h" 28 #include "monitor/monitor.h" 29 #include "hw/i386/apic_internal.h" 30 #endif 31 32 static void cpu_x86_version(CPUX86State *env, int *family, int *model) 33 { 34 int cpuver = env->cpuid_version; 35 36 if (family == NULL || model == NULL) { 37 return; 38 } 39 40 *family = (cpuver >> 8) & 0x0f; 41 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f); 42 } 43 44 /* Broadcast MCA signal for processor version 06H_EH and above */ 45 int cpu_x86_support_mca_broadcast(CPUX86State *env) 46 { 47 int family = 0; 48 int model = 0; 49 50 cpu_x86_version(env, &family, &model); 51 if ((family == 6 && model >= 14) || family > 6) { 52 return 1; 53 } 54 55 return 0; 56 } 57 58 /***********************************************************/ 59 /* x86 debug */ 60 61 static const char *cc_op_str[CC_OP_NB] = { 62 "DYNAMIC", 63 "EFLAGS", 64 65 "MULB", 66 "MULW", 67 "MULL", 68 "MULQ", 69 70 "ADDB", 71 "ADDW", 72 "ADDL", 73 "ADDQ", 74 75 "ADCB", 76 "ADCW", 77 "ADCL", 78 "ADCQ", 79 80 "SUBB", 81 "SUBW", 82 "SUBL", 83 "SUBQ", 84 85 "SBBB", 86 "SBBW", 87 "SBBL", 88 "SBBQ", 89 90 "LOGICB", 91 "LOGICW", 92 "LOGICL", 93 "LOGICQ", 94 95 "INCB", 96 "INCW", 97 "INCL", 98 "INCQ", 99 100 "DECB", 101 "DECW", 102 "DECL", 103 "DECQ", 104 105 "SHLB", 106 "SHLW", 107 "SHLL", 108 "SHLQ", 109 110 "SARB", 111 "SARW", 112 "SARL", 113 "SARQ", 114 115 "BMILGB", 116 "BMILGW", 117 "BMILGL", 118 "BMILGQ", 119 120 "ADCX", 121 "ADOX", 122 "ADCOX", 123 124 "CLR", 125 }; 126 127 static void 128 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf, 129 const char *name, struct SegmentCache *sc) 130 { 131 #ifdef TARGET_X86_64 132 if (env->hflags & HF_CS64_MASK) { 133 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name, 134 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00); 135 } else 136 #endif 137 { 138 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector, 139 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00); 140 } 141 142 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK)) 143 goto done; 144 145 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT); 146 if (sc->flags & DESC_S_MASK) { 147 if (sc->flags & DESC_CS_MASK) { 148 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" : 149 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16")); 150 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-', 151 (sc->flags & DESC_R_MASK) ? 'R' : '-'); 152 } else { 153 cpu_fprintf(f, 154 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK) 155 ? "DS " : "DS16"); 156 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-', 157 (sc->flags & DESC_W_MASK) ? 'W' : '-'); 158 } 159 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-'); 160 } else { 161 static const char *sys_type_name[2][16] = { 162 { /* 32 bit mode */ 163 "Reserved", "TSS16-avl", "LDT", "TSS16-busy", 164 "CallGate16", "TaskGate", "IntGate16", "TrapGate16", 165 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy", 166 "CallGate32", "Reserved", "IntGate32", "TrapGate32" 167 }, 168 { /* 64 bit mode */ 169 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved", 170 "Reserved", "Reserved", "Reserved", "Reserved", 171 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64", 172 "Reserved", "IntGate64", "TrapGate64" 173 } 174 }; 175 cpu_fprintf(f, "%s", 176 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0] 177 [(sc->flags & DESC_TYPE_MASK) 178 >> DESC_TYPE_SHIFT]); 179 } 180 done: 181 cpu_fprintf(f, "\n"); 182 } 183 184 #ifndef CONFIG_USER_ONLY 185 186 /* ARRAY_SIZE check is not required because 187 * DeliveryMode(dm) has a size of 3 bit. 188 */ 189 static inline const char *dm2str(uint32_t dm) 190 { 191 static const char *str[] = { 192 "Fixed", 193 "...", 194 "SMI", 195 "...", 196 "NMI", 197 "INIT", 198 "...", 199 "ExtINT" 200 }; 201 return str[dm]; 202 } 203 204 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf, 205 const char *name, uint32_t lvt, bool is_timer) 206 { 207 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT; 208 cpu_fprintf(f, 209 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s", 210 name, lvt, 211 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi", 212 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge", 213 lvt & APIC_LVT_MASKED ? "masked" : "", 214 lvt & APIC_LVT_DELIV_STS ? "pending" : "", 215 !is_timer ? 216 "" : lvt & APIC_LVT_TIMER_PERIODIC ? 217 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ? 218 "tsc-deadline" : "one-shot", 219 dm2str(dm)); 220 if (dm != APIC_DM_NMI) { 221 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK); 222 } else { 223 cpu_fprintf(f, "\n"); 224 } 225 } 226 227 /* ARRAY_SIZE check is not required because 228 * destination shorthand has a size of 2 bit. 229 */ 230 static inline const char *shorthand2str(uint32_t shorthand) 231 { 232 const char *str[] = { 233 "no-shorthand", "self", "all-self", "all" 234 }; 235 return str[shorthand]; 236 } 237 238 static inline uint8_t divider_conf(uint32_t divide_conf) 239 { 240 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3); 241 242 return divide_val == 7 ? 1 : 2 << divide_val; 243 } 244 245 static inline void mask2str(char *str, uint32_t val, uint8_t size) 246 { 247 while (size--) { 248 *str++ = (val >> size) & 1 ? '1' : '0'; 249 } 250 *str = 0; 251 } 252 253 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16 254 255 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf, 256 APICCommonState *s, CPUX86State *env) 257 { 258 uint32_t icr = s->icr[0], icr2 = s->icr[1]; 259 uint8_t dest_shorthand = \ 260 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT; 261 bool logical_mod = icr & APIC_ICR_DEST_MOD; 262 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1]; 263 uint32_t dest_field; 264 bool x2apic; 265 266 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n", 267 icr, 268 logical_mod ? "logical" : "physical", 269 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge", 270 icr & APIC_ICR_LEVEL ? "assert" : "de-assert", 271 shorthand2str(dest_shorthand)); 272 273 cpu_fprintf(f, "ICR2\t 0x%08x", icr2); 274 if (dest_shorthand != 0) { 275 cpu_fprintf(f, "\n"); 276 return; 277 } 278 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC; 279 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT; 280 281 if (!logical_mod) { 282 if (x2apic) { 283 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field); 284 } else { 285 cpu_fprintf(f, " cpu %u (APIC ID)\n", 286 dest_field & APIC_LOGDEST_XAPIC_ID); 287 } 288 return; 289 } 290 291 if (s->dest_mode == 0xf) { /* flat mode */ 292 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8); 293 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str); 294 } else if (s->dest_mode == 0) { /* cluster mode */ 295 if (x2apic) { 296 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16); 297 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n", 298 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str); 299 } else { 300 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4); 301 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n", 302 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str); 303 } 304 } 305 } 306 307 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf, 308 const char *name, uint32_t *ireg_tab, 309 uint32_t *tmr_tab) 310 { 311 int i, empty = true; 312 313 cpu_fprintf(f, "%s\t ", name); 314 for (i = 0; i < 256; i++) { 315 if (apic_get_bit(ireg_tab, i)) { 316 cpu_fprintf(f, "%u%s ", i, 317 apic_get_bit(tmr_tab, i) ? "(level)" : ""); 318 empty = false; 319 } 320 } 321 cpu_fprintf(f, "%s\n", empty ? "(none)" : ""); 322 } 323 324 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f, 325 fprintf_function cpu_fprintf, int flags) 326 { 327 X86CPU *cpu = X86_CPU(cs); 328 APICCommonState *s = APIC_COMMON(cpu->apic_state); 329 uint32_t *lvt = s->lvt; 330 331 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n", 332 CPU(cpu)->cpu_index); 333 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false); 334 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false); 335 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false); 336 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false); 337 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false); 338 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true); 339 340 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n", 341 s->divide_conf & APIC_DCR_MASK, 342 divider_conf(s->divide_conf), 343 s->initial_count); 344 345 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n", 346 s->spurious_vec, 347 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled", 348 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off", 349 s->spurious_vec & APIC_VECTOR_MASK); 350 351 dump_apic_icr(f, cpu_fprintf, s, &cpu->env); 352 353 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr); 354 355 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr); 356 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr); 357 358 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x", 359 s->arb_id, s->tpr, s->dest_mode, s->log_dest); 360 if (s->dest_mode == 0) { 361 cpu_fprintf(f, "(cluster %u: id %u)", 362 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT, 363 s->log_dest & APIC_LOGDEST_XAPIC_ID); 364 } 365 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s)); 366 } 367 #else 368 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f, 369 fprintf_function cpu_fprintf, int flags) 370 { 371 } 372 #endif /* !CONFIG_USER_ONLY */ 373 374 #define DUMP_CODE_BYTES_TOTAL 50 375 #define DUMP_CODE_BYTES_BACKWARD 20 376 377 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, 378 int flags) 379 { 380 X86CPU *cpu = X86_CPU(cs); 381 CPUX86State *env = &cpu->env; 382 int eflags, i, nb; 383 char cc_op_name[32]; 384 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; 385 386 eflags = cpu_compute_eflags(env); 387 #ifdef TARGET_X86_64 388 if (env->hflags & HF_CS64_MASK) { 389 cpu_fprintf(f, 390 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n" 391 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n" 392 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n" 393 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n" 394 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", 395 env->regs[R_EAX], 396 env->regs[R_EBX], 397 env->regs[R_ECX], 398 env->regs[R_EDX], 399 env->regs[R_ESI], 400 env->regs[R_EDI], 401 env->regs[R_EBP], 402 env->regs[R_ESP], 403 env->regs[8], 404 env->regs[9], 405 env->regs[10], 406 env->regs[11], 407 env->regs[12], 408 env->regs[13], 409 env->regs[14], 410 env->regs[15], 411 env->eip, eflags, 412 eflags & DF_MASK ? 'D' : '-', 413 eflags & CC_O ? 'O' : '-', 414 eflags & CC_S ? 'S' : '-', 415 eflags & CC_Z ? 'Z' : '-', 416 eflags & CC_A ? 'A' : '-', 417 eflags & CC_P ? 'P' : '-', 418 eflags & CC_C ? 'C' : '-', 419 env->hflags & HF_CPL_MASK, 420 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, 421 (env->a20_mask >> 20) & 1, 422 (env->hflags >> HF_SMM_SHIFT) & 1, 423 cs->halted); 424 } else 425 #endif 426 { 427 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" 428 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" 429 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", 430 (uint32_t)env->regs[R_EAX], 431 (uint32_t)env->regs[R_EBX], 432 (uint32_t)env->regs[R_ECX], 433 (uint32_t)env->regs[R_EDX], 434 (uint32_t)env->regs[R_ESI], 435 (uint32_t)env->regs[R_EDI], 436 (uint32_t)env->regs[R_EBP], 437 (uint32_t)env->regs[R_ESP], 438 (uint32_t)env->eip, eflags, 439 eflags & DF_MASK ? 'D' : '-', 440 eflags & CC_O ? 'O' : '-', 441 eflags & CC_S ? 'S' : '-', 442 eflags & CC_Z ? 'Z' : '-', 443 eflags & CC_A ? 'A' : '-', 444 eflags & CC_P ? 'P' : '-', 445 eflags & CC_C ? 'C' : '-', 446 env->hflags & HF_CPL_MASK, 447 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, 448 (env->a20_mask >> 20) & 1, 449 (env->hflags >> HF_SMM_SHIFT) & 1, 450 cs->halted); 451 } 452 453 for(i = 0; i < 6; i++) { 454 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i], 455 &env->segs[i]); 456 } 457 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt); 458 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr); 459 460 #ifdef TARGET_X86_64 461 if (env->hflags & HF_LMA_MASK) { 462 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n", 463 env->gdt.base, env->gdt.limit); 464 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n", 465 env->idt.base, env->idt.limit); 466 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n", 467 (uint32_t)env->cr[0], 468 env->cr[2], 469 env->cr[3], 470 (uint32_t)env->cr[4]); 471 for(i = 0; i < 4; i++) 472 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]); 473 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n", 474 env->dr[6], env->dr[7]); 475 } else 476 #endif 477 { 478 cpu_fprintf(f, "GDT= %08x %08x\n", 479 (uint32_t)env->gdt.base, env->gdt.limit); 480 cpu_fprintf(f, "IDT= %08x %08x\n", 481 (uint32_t)env->idt.base, env->idt.limit); 482 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n", 483 (uint32_t)env->cr[0], 484 (uint32_t)env->cr[2], 485 (uint32_t)env->cr[3], 486 (uint32_t)env->cr[4]); 487 for(i = 0; i < 4; i++) { 488 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]); 489 } 490 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n", 491 env->dr[6], env->dr[7]); 492 } 493 if (flags & CPU_DUMP_CCOP) { 494 if ((unsigned)env->cc_op < CC_OP_NB) 495 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); 496 else 497 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); 498 #ifdef TARGET_X86_64 499 if (env->hflags & HF_CS64_MASK) { 500 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", 501 env->cc_src, env->cc_dst, 502 cc_op_name); 503 } else 504 #endif 505 { 506 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", 507 (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 508 cc_op_name); 509 } 510 } 511 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer); 512 if (flags & CPU_DUMP_FPU) { 513 int fptag; 514 fptag = 0; 515 for(i = 0; i < 8; i++) { 516 fptag |= ((!env->fptags[i]) << i); 517 } 518 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n", 519 env->fpuc, 520 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11, 521 env->fpstt, 522 fptag, 523 env->mxcsr); 524 for(i=0;i<8;i++) { 525 CPU_LDoubleU u; 526 u.d = env->fpregs[i].d; 527 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x", 528 i, u.l.lower, u.l.upper); 529 if ((i & 1) == 1) 530 cpu_fprintf(f, "\n"); 531 else 532 cpu_fprintf(f, " "); 533 } 534 if (env->hflags & HF_CS64_MASK) 535 nb = 16; 536 else 537 nb = 8; 538 for(i=0;i<nb;i++) { 539 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x", 540 i, 541 env->xmm_regs[i].ZMM_L(3), 542 env->xmm_regs[i].ZMM_L(2), 543 env->xmm_regs[i].ZMM_L(1), 544 env->xmm_regs[i].ZMM_L(0)); 545 if ((i & 1) == 1) 546 cpu_fprintf(f, "\n"); 547 else 548 cpu_fprintf(f, " "); 549 } 550 } 551 if (flags & CPU_DUMP_CODE) { 552 target_ulong base = env->segs[R_CS].base + env->eip; 553 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD); 554 uint8_t code; 555 char codestr[3]; 556 557 cpu_fprintf(f, "Code="); 558 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) { 559 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) { 560 snprintf(codestr, sizeof(codestr), "%02x", code); 561 } else { 562 snprintf(codestr, sizeof(codestr), "??"); 563 } 564 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "", 565 i == offs ? "<" : "", codestr, i == offs ? ">" : ""); 566 } 567 cpu_fprintf(f, "\n"); 568 } 569 } 570 571 /***********************************************************/ 572 /* x86 mmu */ 573 /* XXX: add PGE support */ 574 575 void x86_cpu_set_a20(X86CPU *cpu, int a20_state) 576 { 577 CPUX86State *env = &cpu->env; 578 579 a20_state = (a20_state != 0); 580 if (a20_state != ((env->a20_mask >> 20) & 1)) { 581 CPUState *cs = CPU(cpu); 582 583 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state); 584 /* if the cpu is currently executing code, we must unlink it and 585 all the potentially executing TB */ 586 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); 587 588 /* when a20 is changed, all the MMU mappings are invalid, so 589 we must flush everything */ 590 tlb_flush(cs); 591 env->a20_mask = ~(1 << 20) | (a20_state << 20); 592 } 593 } 594 595 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) 596 { 597 X86CPU *cpu = x86_env_get_cpu(env); 598 int pe_state; 599 600 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0); 601 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) != 602 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { 603 tlb_flush(CPU(cpu)); 604 } 605 606 #ifdef TARGET_X86_64 607 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && 608 (env->efer & MSR_EFER_LME)) { 609 /* enter in long mode */ 610 /* XXX: generate an exception */ 611 if (!(env->cr[4] & CR4_PAE_MASK)) 612 return; 613 env->efer |= MSR_EFER_LMA; 614 env->hflags |= HF_LMA_MASK; 615 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && 616 (env->efer & MSR_EFER_LMA)) { 617 /* exit long mode */ 618 env->efer &= ~MSR_EFER_LMA; 619 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); 620 env->eip &= 0xffffffff; 621 } 622 #endif 623 env->cr[0] = new_cr0 | CR0_ET_MASK; 624 625 /* update PE flag in hidden flags */ 626 pe_state = (env->cr[0] & CR0_PE_MASK); 627 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); 628 /* ensure that ADDSEG is always set in real mode */ 629 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); 630 /* update FPU flags */ 631 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | 632 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); 633 } 634 635 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in 636 the PDPT */ 637 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) 638 { 639 X86CPU *cpu = x86_env_get_cpu(env); 640 641 env->cr[3] = new_cr3; 642 if (env->cr[0] & CR0_PG_MASK) { 643 qemu_log_mask(CPU_LOG_MMU, 644 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); 645 tlb_flush(CPU(cpu)); 646 } 647 } 648 649 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) 650 { 651 X86CPU *cpu = x86_env_get_cpu(env); 652 uint32_t hflags; 653 654 #if defined(DEBUG_MMU) 655 printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4); 656 #endif 657 if ((new_cr4 ^ env->cr[4]) & 658 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK | 659 CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) { 660 tlb_flush(CPU(cpu)); 661 } 662 663 /* Clear bits we're going to recompute. */ 664 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK); 665 666 /* SSE handling */ 667 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) { 668 new_cr4 &= ~CR4_OSFXSR_MASK; 669 } 670 if (new_cr4 & CR4_OSFXSR_MASK) { 671 hflags |= HF_OSFXSR_MASK; 672 } 673 674 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { 675 new_cr4 &= ~CR4_SMAP_MASK; 676 } 677 if (new_cr4 & CR4_SMAP_MASK) { 678 hflags |= HF_SMAP_MASK; 679 } 680 681 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) { 682 new_cr4 &= ~CR4_PKE_MASK; 683 } 684 685 env->cr[4] = new_cr4; 686 env->hflags = hflags; 687 688 cpu_sync_bndcs_hflags(env); 689 } 690 691 #if defined(CONFIG_USER_ONLY) 692 693 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, 694 int is_write, int mmu_idx) 695 { 696 X86CPU *cpu = X86_CPU(cs); 697 CPUX86State *env = &cpu->env; 698 699 /* user mode only emulation */ 700 is_write &= 1; 701 env->cr[2] = addr; 702 env->error_code = (is_write << PG_ERROR_W_BIT); 703 env->error_code |= PG_ERROR_U_MASK; 704 cs->exception_index = EXCP0E_PAGE; 705 env->exception_is_int = 0; 706 env->exception_next_eip = -1; 707 return 1; 708 } 709 710 #else 711 712 /* return value: 713 * -1 = cannot handle fault 714 * 0 = nothing more to do 715 * 1 = generate PF fault 716 */ 717 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, 718 int is_write1, int mmu_idx) 719 { 720 X86CPU *cpu = X86_CPU(cs); 721 CPUX86State *env = &cpu->env; 722 uint64_t ptep, pte; 723 target_ulong pde_addr, pte_addr; 724 int error_code = 0; 725 int is_dirty, prot, page_size, is_write, is_user; 726 hwaddr paddr; 727 uint64_t rsvd_mask = PG_HI_RSVD_MASK; 728 uint32_t page_offset; 729 target_ulong vaddr; 730 731 is_user = mmu_idx == MMU_USER_IDX; 732 #if defined(DEBUG_MMU) 733 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 734 addr, is_write1, is_user, env->eip); 735 #endif 736 is_write = is_write1 & 1; 737 738 if (!(env->cr[0] & CR0_PG_MASK)) { 739 pte = addr; 740 #ifdef TARGET_X86_64 741 if (!(env->hflags & HF_LMA_MASK)) { 742 /* Without long mode we can only address 32bits in real mode */ 743 pte = (uint32_t)pte; 744 } 745 #endif 746 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 747 page_size = 4096; 748 goto do_mapping; 749 } 750 751 if (!(env->efer & MSR_EFER_NXE)) { 752 rsvd_mask |= PG_NX_MASK; 753 } 754 755 if (env->cr[4] & CR4_PAE_MASK) { 756 uint64_t pde, pdpe; 757 target_ulong pdpe_addr; 758 759 #ifdef TARGET_X86_64 760 if (env->hflags & HF_LMA_MASK) { 761 bool la57 = env->cr[4] & CR4_LA57_MASK; 762 uint64_t pml5e_addr, pml5e; 763 uint64_t pml4e_addr, pml4e; 764 int32_t sext; 765 766 /* test virtual address sign extension */ 767 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; 768 if (sext != 0 && sext != -1) { 769 env->error_code = 0; 770 cs->exception_index = EXCP0D_GPF; 771 return 1; 772 } 773 774 if (la57) { 775 pml5e_addr = ((env->cr[3] & ~0xfff) + 776 (((addr >> 48) & 0x1ff) << 3)) & env->a20_mask; 777 pml5e = x86_ldq_phys(cs, pml5e_addr); 778 if (!(pml5e & PG_PRESENT_MASK)) { 779 goto do_fault; 780 } 781 if (pml5e & (rsvd_mask | PG_PSE_MASK)) { 782 goto do_fault_rsvd; 783 } 784 if (!(pml5e & PG_ACCESSED_MASK)) { 785 pml5e |= PG_ACCESSED_MASK; 786 x86_stl_phys_notdirty(cs, pml5e_addr, pml5e); 787 } 788 ptep = pml5e ^ PG_NX_MASK; 789 } else { 790 pml5e = env->cr[3]; 791 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 792 } 793 794 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + 795 (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; 796 pml4e = x86_ldq_phys(cs, pml4e_addr); 797 if (!(pml4e & PG_PRESENT_MASK)) { 798 goto do_fault; 799 } 800 if (pml4e & (rsvd_mask | PG_PSE_MASK)) { 801 goto do_fault_rsvd; 802 } 803 if (!(pml4e & PG_ACCESSED_MASK)) { 804 pml4e |= PG_ACCESSED_MASK; 805 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); 806 } 807 ptep &= pml4e ^ PG_NX_MASK; 808 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & 809 env->a20_mask; 810 pdpe = x86_ldq_phys(cs, pdpe_addr); 811 if (!(pdpe & PG_PRESENT_MASK)) { 812 goto do_fault; 813 } 814 if (pdpe & rsvd_mask) { 815 goto do_fault_rsvd; 816 } 817 ptep &= pdpe ^ PG_NX_MASK; 818 if (!(pdpe & PG_ACCESSED_MASK)) { 819 pdpe |= PG_ACCESSED_MASK; 820 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); 821 } 822 if (pdpe & PG_PSE_MASK) { 823 /* 1 GB page */ 824 page_size = 1024 * 1024 * 1024; 825 pte_addr = pdpe_addr; 826 pte = pdpe; 827 goto do_check_protect; 828 } 829 } else 830 #endif 831 { 832 /* XXX: load them when cr3 is loaded ? */ 833 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & 834 env->a20_mask; 835 pdpe = x86_ldq_phys(cs, pdpe_addr); 836 if (!(pdpe & PG_PRESENT_MASK)) { 837 goto do_fault; 838 } 839 rsvd_mask |= PG_HI_USER_MASK; 840 if (pdpe & (rsvd_mask | PG_NX_MASK)) { 841 goto do_fault_rsvd; 842 } 843 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 844 } 845 846 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & 847 env->a20_mask; 848 pde = x86_ldq_phys(cs, pde_addr); 849 if (!(pde & PG_PRESENT_MASK)) { 850 goto do_fault; 851 } 852 if (pde & rsvd_mask) { 853 goto do_fault_rsvd; 854 } 855 ptep &= pde ^ PG_NX_MASK; 856 if (pde & PG_PSE_MASK) { 857 /* 2 MB page */ 858 page_size = 2048 * 1024; 859 pte_addr = pde_addr; 860 pte = pde; 861 goto do_check_protect; 862 } 863 /* 4 KB page */ 864 if (!(pde & PG_ACCESSED_MASK)) { 865 pde |= PG_ACCESSED_MASK; 866 x86_stl_phys_notdirty(cs, pde_addr, pde); 867 } 868 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & 869 env->a20_mask; 870 pte = x86_ldq_phys(cs, pte_addr); 871 if (!(pte & PG_PRESENT_MASK)) { 872 goto do_fault; 873 } 874 if (pte & rsvd_mask) { 875 goto do_fault_rsvd; 876 } 877 /* combine pde and pte nx, user and rw protections */ 878 ptep &= pte ^ PG_NX_MASK; 879 page_size = 4096; 880 } else { 881 uint32_t pde; 882 883 /* page directory entry */ 884 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & 885 env->a20_mask; 886 pde = x86_ldl_phys(cs, pde_addr); 887 if (!(pde & PG_PRESENT_MASK)) { 888 goto do_fault; 889 } 890 ptep = pde | PG_NX_MASK; 891 892 /* if PSE bit is set, then we use a 4MB page */ 893 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { 894 page_size = 4096 * 1024; 895 pte_addr = pde_addr; 896 897 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. 898 * Leave bits 20-13 in place for setting accessed/dirty bits below. 899 */ 900 pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); 901 rsvd_mask = 0x200000; 902 goto do_check_protect_pse36; 903 } 904 905 if (!(pde & PG_ACCESSED_MASK)) { 906 pde |= PG_ACCESSED_MASK; 907 x86_stl_phys_notdirty(cs, pde_addr, pde); 908 } 909 910 /* page directory entry */ 911 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 912 env->a20_mask; 913 pte = x86_ldl_phys(cs, pte_addr); 914 if (!(pte & PG_PRESENT_MASK)) { 915 goto do_fault; 916 } 917 /* combine pde and pte user and rw protections */ 918 ptep &= pte | PG_NX_MASK; 919 page_size = 4096; 920 rsvd_mask = 0; 921 } 922 923 do_check_protect: 924 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; 925 do_check_protect_pse36: 926 if (pte & rsvd_mask) { 927 goto do_fault_rsvd; 928 } 929 ptep ^= PG_NX_MASK; 930 931 /* can the page can be put in the TLB? prot will tell us */ 932 if (is_user && !(ptep & PG_USER_MASK)) { 933 goto do_fault_protect; 934 } 935 936 prot = 0; 937 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { 938 prot |= PAGE_READ; 939 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) { 940 prot |= PAGE_WRITE; 941 } 942 } 943 if (!(ptep & PG_NX_MASK) && 944 (mmu_idx == MMU_USER_IDX || 945 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) { 946 prot |= PAGE_EXEC; 947 } 948 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) && 949 (ptep & PG_USER_MASK) && env->pkru) { 950 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; 951 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1; 952 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2; 953 uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 954 955 if (pkru_ad) { 956 pkru_prot &= ~(PAGE_READ | PAGE_WRITE); 957 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) { 958 pkru_prot &= ~PAGE_WRITE; 959 } 960 961 prot &= pkru_prot; 962 if ((pkru_prot & (1 << is_write1)) == 0) { 963 assert(is_write1 != 2); 964 error_code |= PG_ERROR_PK_MASK; 965 goto do_fault_protect; 966 } 967 } 968 969 if ((prot & (1 << is_write1)) == 0) { 970 goto do_fault_protect; 971 } 972 973 /* yes, it can! */ 974 is_dirty = is_write && !(pte & PG_DIRTY_MASK); 975 if (!(pte & PG_ACCESSED_MASK) || is_dirty) { 976 pte |= PG_ACCESSED_MASK; 977 if (is_dirty) { 978 pte |= PG_DIRTY_MASK; 979 } 980 x86_stl_phys_notdirty(cs, pte_addr, pte); 981 } 982 983 if (!(pte & PG_DIRTY_MASK)) { 984 /* only set write access if already dirty... otherwise wait 985 for dirty access */ 986 assert(!is_write); 987 prot &= ~PAGE_WRITE; 988 } 989 990 do_mapping: 991 pte = pte & env->a20_mask; 992 993 /* align to page_size */ 994 pte &= PG_ADDRESS_MASK & ~(page_size - 1); 995 996 /* Even if 4MB pages, we map only one 4KB page in the cache to 997 avoid filling it too fast */ 998 vaddr = addr & TARGET_PAGE_MASK; 999 page_offset = vaddr & (page_size - 1); 1000 paddr = pte + page_offset; 1001 1002 assert(prot & (1 << is_write1)); 1003 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env), 1004 prot, mmu_idx, page_size); 1005 return 0; 1006 do_fault_rsvd: 1007 error_code |= PG_ERROR_RSVD_MASK; 1008 do_fault_protect: 1009 error_code |= PG_ERROR_P_MASK; 1010 do_fault: 1011 error_code |= (is_write << PG_ERROR_W_BIT); 1012 if (is_user) 1013 error_code |= PG_ERROR_U_MASK; 1014 if (is_write1 == 2 && 1015 (((env->efer & MSR_EFER_NXE) && 1016 (env->cr[4] & CR4_PAE_MASK)) || 1017 (env->cr[4] & CR4_SMEP_MASK))) 1018 error_code |= PG_ERROR_I_D_MASK; 1019 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { 1020 /* cr2 is not modified in case of exceptions */ 1021 x86_stq_phys(cs, 1022 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 1023 addr); 1024 } else { 1025 env->cr[2] = addr; 1026 } 1027 env->error_code = error_code; 1028 cs->exception_index = EXCP0E_PAGE; 1029 return 1; 1030 } 1031 1032 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1033 { 1034 X86CPU *cpu = X86_CPU(cs); 1035 CPUX86State *env = &cpu->env; 1036 target_ulong pde_addr, pte_addr; 1037 uint64_t pte; 1038 uint32_t page_offset; 1039 int page_size; 1040 1041 if (!(env->cr[0] & CR0_PG_MASK)) { 1042 pte = addr & env->a20_mask; 1043 page_size = 4096; 1044 } else if (env->cr[4] & CR4_PAE_MASK) { 1045 target_ulong pdpe_addr; 1046 uint64_t pde, pdpe; 1047 1048 #ifdef TARGET_X86_64 1049 if (env->hflags & HF_LMA_MASK) { 1050 bool la57 = env->cr[4] & CR4_LA57_MASK; 1051 uint64_t pml5e_addr, pml5e; 1052 uint64_t pml4e_addr, pml4e; 1053 int32_t sext; 1054 1055 /* test virtual address sign extension */ 1056 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; 1057 if (sext != 0 && sext != -1) { 1058 return -1; 1059 } 1060 1061 if (la57) { 1062 pml5e_addr = ((env->cr[3] & ~0xfff) + 1063 (((addr >> 48) & 0x1ff) << 3)) & env->a20_mask; 1064 pml5e = x86_ldq_phys(cs, pml5e_addr); 1065 if (!(pml5e & PG_PRESENT_MASK)) { 1066 return -1; 1067 } 1068 } else { 1069 pml5e = env->cr[3]; 1070 } 1071 1072 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + 1073 (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; 1074 pml4e = x86_ldq_phys(cs, pml4e_addr); 1075 if (!(pml4e & PG_PRESENT_MASK)) { 1076 return -1; 1077 } 1078 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + 1079 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; 1080 pdpe = x86_ldq_phys(cs, pdpe_addr); 1081 if (!(pdpe & PG_PRESENT_MASK)) { 1082 return -1; 1083 } 1084 if (pdpe & PG_PSE_MASK) { 1085 page_size = 1024 * 1024 * 1024; 1086 pte = pdpe; 1087 goto out; 1088 } 1089 1090 } else 1091 #endif 1092 { 1093 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & 1094 env->a20_mask; 1095 pdpe = x86_ldq_phys(cs, pdpe_addr); 1096 if (!(pdpe & PG_PRESENT_MASK)) 1097 return -1; 1098 } 1099 1100 pde_addr = ((pdpe & PG_ADDRESS_MASK) + 1101 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; 1102 pde = x86_ldq_phys(cs, pde_addr); 1103 if (!(pde & PG_PRESENT_MASK)) { 1104 return -1; 1105 } 1106 if (pde & PG_PSE_MASK) { 1107 /* 2 MB page */ 1108 page_size = 2048 * 1024; 1109 pte = pde; 1110 } else { 1111 /* 4 KB page */ 1112 pte_addr = ((pde & PG_ADDRESS_MASK) + 1113 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; 1114 page_size = 4096; 1115 pte = x86_ldq_phys(cs, pte_addr); 1116 } 1117 if (!(pte & PG_PRESENT_MASK)) { 1118 return -1; 1119 } 1120 } else { 1121 uint32_t pde; 1122 1123 /* page directory entry */ 1124 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask; 1125 pde = x86_ldl_phys(cs, pde_addr); 1126 if (!(pde & PG_PRESENT_MASK)) 1127 return -1; 1128 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { 1129 pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); 1130 page_size = 4096 * 1024; 1131 } else { 1132 /* page directory entry */ 1133 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; 1134 pte = x86_ldl_phys(cs, pte_addr); 1135 if (!(pte & PG_PRESENT_MASK)) { 1136 return -1; 1137 } 1138 page_size = 4096; 1139 } 1140 pte = pte & env->a20_mask; 1141 } 1142 1143 #ifdef TARGET_X86_64 1144 out: 1145 #endif 1146 pte &= PG_ADDRESS_MASK & ~(page_size - 1); 1147 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); 1148 return pte | page_offset; 1149 } 1150 1151 typedef struct MCEInjectionParams { 1152 Monitor *mon; 1153 int bank; 1154 uint64_t status; 1155 uint64_t mcg_status; 1156 uint64_t addr; 1157 uint64_t misc; 1158 int flags; 1159 } MCEInjectionParams; 1160 1161 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data) 1162 { 1163 MCEInjectionParams *params = data.host_ptr; 1164 X86CPU *cpu = X86_CPU(cs); 1165 CPUX86State *cenv = &cpu->env; 1166 uint64_t *banks = cenv->mce_banks + 4 * params->bank; 1167 1168 cpu_synchronize_state(cs); 1169 1170 /* 1171 * If there is an MCE exception being processed, ignore this SRAO MCE 1172 * unless unconditional injection was requested. 1173 */ 1174 if (!(params->flags & MCE_INJECT_UNCOND_AO) 1175 && !(params->status & MCI_STATUS_AR) 1176 && (cenv->mcg_status & MCG_STATUS_MCIP)) { 1177 return; 1178 } 1179 1180 if (params->status & MCI_STATUS_UC) { 1181 /* 1182 * if MSR_MCG_CTL is not all 1s, the uncorrected error 1183 * reporting is disabled 1184 */ 1185 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) { 1186 monitor_printf(params->mon, 1187 "CPU %d: Uncorrected error reporting disabled\n", 1188 cs->cpu_index); 1189 return; 1190 } 1191 1192 /* 1193 * if MSR_MCi_CTL is not all 1s, the uncorrected error 1194 * reporting is disabled for the bank 1195 */ 1196 if (banks[0] != ~(uint64_t)0) { 1197 monitor_printf(params->mon, 1198 "CPU %d: Uncorrected error reporting disabled for" 1199 " bank %d\n", 1200 cs->cpu_index, params->bank); 1201 return; 1202 } 1203 1204 if ((cenv->mcg_status & MCG_STATUS_MCIP) || 1205 !(cenv->cr[4] & CR4_MCE_MASK)) { 1206 monitor_printf(params->mon, 1207 "CPU %d: Previous MCE still in progress, raising" 1208 " triple fault\n", 1209 cs->cpu_index); 1210 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); 1211 qemu_system_reset_request(); 1212 return; 1213 } 1214 if (banks[1] & MCI_STATUS_VAL) { 1215 params->status |= MCI_STATUS_OVER; 1216 } 1217 banks[2] = params->addr; 1218 banks[3] = params->misc; 1219 cenv->mcg_status = params->mcg_status; 1220 banks[1] = params->status; 1221 cpu_interrupt(cs, CPU_INTERRUPT_MCE); 1222 } else if (!(banks[1] & MCI_STATUS_VAL) 1223 || !(banks[1] & MCI_STATUS_UC)) { 1224 if (banks[1] & MCI_STATUS_VAL) { 1225 params->status |= MCI_STATUS_OVER; 1226 } 1227 banks[2] = params->addr; 1228 banks[3] = params->misc; 1229 banks[1] = params->status; 1230 } else { 1231 banks[1] |= MCI_STATUS_OVER; 1232 } 1233 } 1234 1235 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, 1236 uint64_t status, uint64_t mcg_status, uint64_t addr, 1237 uint64_t misc, int flags) 1238 { 1239 CPUState *cs = CPU(cpu); 1240 CPUX86State *cenv = &cpu->env; 1241 MCEInjectionParams params = { 1242 .mon = mon, 1243 .bank = bank, 1244 .status = status, 1245 .mcg_status = mcg_status, 1246 .addr = addr, 1247 .misc = misc, 1248 .flags = flags, 1249 }; 1250 unsigned bank_num = cenv->mcg_cap & 0xff; 1251 1252 if (!cenv->mcg_cap) { 1253 monitor_printf(mon, "MCE injection not supported\n"); 1254 return; 1255 } 1256 if (bank >= bank_num) { 1257 monitor_printf(mon, "Invalid MCE bank number\n"); 1258 return; 1259 } 1260 if (!(status & MCI_STATUS_VAL)) { 1261 monitor_printf(mon, "Invalid MCE status code\n"); 1262 return; 1263 } 1264 if ((flags & MCE_INJECT_BROADCAST) 1265 && !cpu_x86_support_mca_broadcast(cenv)) { 1266 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n"); 1267 return; 1268 } 1269 1270 run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(¶ms)); 1271 if (flags & MCE_INJECT_BROADCAST) { 1272 CPUState *other_cs; 1273 1274 params.bank = 1; 1275 params.status = MCI_STATUS_VAL | MCI_STATUS_UC; 1276 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV; 1277 params.addr = 0; 1278 params.misc = 0; 1279 CPU_FOREACH(other_cs) { 1280 if (other_cs == cs) { 1281 continue; 1282 } 1283 run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(¶ms)); 1284 } 1285 } 1286 } 1287 1288 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) 1289 { 1290 X86CPU *cpu = x86_env_get_cpu(env); 1291 CPUState *cs = CPU(cpu); 1292 1293 if (kvm_enabled()) { 1294 env->tpr_access_type = access; 1295 1296 cpu_interrupt(cs, CPU_INTERRUPT_TPR); 1297 } else { 1298 cpu_restore_state(cs, cs->mem_io_pc); 1299 1300 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access); 1301 } 1302 } 1303 #endif /* !CONFIG_USER_ONLY */ 1304 1305 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 1306 target_ulong *base, unsigned int *limit, 1307 unsigned int *flags) 1308 { 1309 X86CPU *cpu = x86_env_get_cpu(env); 1310 CPUState *cs = CPU(cpu); 1311 SegmentCache *dt; 1312 target_ulong ptr; 1313 uint32_t e1, e2; 1314 int index; 1315 1316 if (selector & 0x4) 1317 dt = &env->ldt; 1318 else 1319 dt = &env->gdt; 1320 index = selector & ~7; 1321 ptr = dt->base + index; 1322 if ((index + 7) > dt->limit 1323 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0 1324 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0) 1325 return 0; 1326 1327 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); 1328 *limit = (e1 & 0xffff) | (e2 & 0x000f0000); 1329 if (e2 & DESC_G_MASK) 1330 *limit = (*limit << 12) | 0xfff; 1331 *flags = e2; 1332 1333 return 1; 1334 } 1335 1336 #if !defined(CONFIG_USER_ONLY) 1337 void do_cpu_init(X86CPU *cpu) 1338 { 1339 CPUState *cs = CPU(cpu); 1340 CPUX86State *env = &cpu->env; 1341 CPUX86State *save = g_new(CPUX86State, 1); 1342 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI; 1343 1344 *save = *env; 1345 1346 cpu_reset(cs); 1347 cs->interrupt_request = sipi; 1348 memcpy(&env->start_init_save, &save->start_init_save, 1349 offsetof(CPUX86State, end_init_save) - 1350 offsetof(CPUX86State, start_init_save)); 1351 g_free(save); 1352 1353 if (kvm_enabled()) { 1354 kvm_arch_do_init_vcpu(cpu); 1355 } 1356 apic_init_reset(cpu->apic_state); 1357 } 1358 1359 void do_cpu_sipi(X86CPU *cpu) 1360 { 1361 apic_sipi(cpu->apic_state); 1362 } 1363 #else 1364 void do_cpu_init(X86CPU *cpu) 1365 { 1366 } 1367 void do_cpu_sipi(X86CPU *cpu) 1368 { 1369 } 1370 #endif 1371 1372 /* Frob eflags into and out of the CPU temporary format. */ 1373 1374 void x86_cpu_exec_enter(CPUState *cs) 1375 { 1376 X86CPU *cpu = X86_CPU(cs); 1377 CPUX86State *env = &cpu->env; 1378 1379 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 1380 env->df = 1 - (2 * ((env->eflags >> 10) & 1)); 1381 CC_OP = CC_OP_EFLAGS; 1382 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 1383 } 1384 1385 void x86_cpu_exec_exit(CPUState *cs) 1386 { 1387 X86CPU *cpu = X86_CPU(cs); 1388 CPUX86State *env = &cpu->env; 1389 1390 env->eflags = cpu_compute_eflags(env); 1391 } 1392 1393 #ifndef CONFIG_USER_ONLY 1394 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr) 1395 { 1396 X86CPU *cpu = X86_CPU(cs); 1397 CPUX86State *env = &cpu->env; 1398 1399 return address_space_ldub(cs->as, addr, 1400 cpu_get_mem_attrs(env), 1401 NULL); 1402 } 1403 1404 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr) 1405 { 1406 X86CPU *cpu = X86_CPU(cs); 1407 CPUX86State *env = &cpu->env; 1408 1409 return address_space_lduw(cs->as, addr, 1410 cpu_get_mem_attrs(env), 1411 NULL); 1412 } 1413 1414 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr) 1415 { 1416 X86CPU *cpu = X86_CPU(cs); 1417 CPUX86State *env = &cpu->env; 1418 1419 return address_space_ldl(cs->as, addr, 1420 cpu_get_mem_attrs(env), 1421 NULL); 1422 } 1423 1424 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr) 1425 { 1426 X86CPU *cpu = X86_CPU(cs); 1427 CPUX86State *env = &cpu->env; 1428 1429 return address_space_ldq(cs->as, addr, 1430 cpu_get_mem_attrs(env), 1431 NULL); 1432 } 1433 1434 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val) 1435 { 1436 X86CPU *cpu = X86_CPU(cs); 1437 CPUX86State *env = &cpu->env; 1438 1439 address_space_stb(cs->as, addr, val, 1440 cpu_get_mem_attrs(env), 1441 NULL); 1442 } 1443 1444 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val) 1445 { 1446 X86CPU *cpu = X86_CPU(cs); 1447 CPUX86State *env = &cpu->env; 1448 1449 address_space_stl_notdirty(cs->as, addr, val, 1450 cpu_get_mem_attrs(env), 1451 NULL); 1452 } 1453 1454 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val) 1455 { 1456 X86CPU *cpu = X86_CPU(cs); 1457 CPUX86State *env = &cpu->env; 1458 1459 address_space_stw(cs->as, addr, val, 1460 cpu_get_mem_attrs(env), 1461 NULL); 1462 } 1463 1464 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val) 1465 { 1466 X86CPU *cpu = X86_CPU(cs); 1467 CPUX86State *env = &cpu->env; 1468 1469 address_space_stl(cs->as, addr, val, 1470 cpu_get_mem_attrs(env), 1471 NULL); 1472 } 1473 1474 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val) 1475 { 1476 X86CPU *cpu = X86_CPU(cs); 1477 CPUX86State *env = &cpu->env; 1478 1479 address_space_stq(cs->as, addr, val, 1480 cpu_get_mem_attrs(env), 1481 NULL); 1482 } 1483 #endif 1484