1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qapi/qapi-events-run-state.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "exec/translation-block.h"
25 #include "exec/target_page.h"
26 #include "system/runstate.h"
27 #ifndef CONFIG_USER_ONLY
28 #include "system/hw_accel.h"
29 #include "system/memory.h"
30 #include "monitor/monitor.h"
31 #include "kvm/kvm_i386.h"
32 #endif
33 #include "qemu/log.h"
34 #ifdef CONFIG_TCG
35 #include "tcg/insn-start-words.h"
36 #endif
37
cpu_sync_avx_hflag(CPUX86State * env)38 void cpu_sync_avx_hflag(CPUX86State *env)
39 {
40 if ((env->cr[4] & CR4_OSXSAVE_MASK)
41 && (env->xcr0 & (XSTATE_SSE_MASK | XSTATE_YMM_MASK))
42 == (XSTATE_SSE_MASK | XSTATE_YMM_MASK)) {
43 env->hflags |= HF_AVX_EN_MASK;
44 } else{
45 env->hflags &= ~HF_AVX_EN_MASK;
46 }
47 }
48
cpu_sync_bndcs_hflags(CPUX86State * env)49 void cpu_sync_bndcs_hflags(CPUX86State *env)
50 {
51 uint32_t hflags = env->hflags;
52 uint32_t hflags2 = env->hflags2;
53 uint32_t bndcsr;
54
55 if ((hflags & HF_CPL_MASK) == 3) {
56 bndcsr = env->bndcs_regs.cfgu;
57 } else {
58 bndcsr = env->msr_bndcfgs;
59 }
60
61 if ((env->cr[4] & CR4_OSXSAVE_MASK)
62 && (env->xcr0 & XSTATE_BNDCSR_MASK)
63 && (bndcsr & BNDCFG_ENABLE)) {
64 hflags |= HF_MPX_EN_MASK;
65 } else {
66 hflags &= ~HF_MPX_EN_MASK;
67 }
68
69 if (bndcsr & BNDCFG_BNDPRESERVE) {
70 hflags2 |= HF2_MPX_PR_MASK;
71 } else {
72 hflags2 &= ~HF2_MPX_PR_MASK;
73 }
74
75 env->hflags = hflags;
76 env->hflags2 = hflags2;
77 }
78
cpu_x86_version(CPUX86State * env,int * family,int * model)79 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
80 {
81 int cpuver = env->cpuid_version;
82
83 if (family == NULL || model == NULL) {
84 return;
85 }
86
87 *family = (cpuver >> 8) & 0x0f;
88 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
89 }
90
91 /* Broadcast MCA signal for processor version 06H_EH and above */
cpu_x86_support_mca_broadcast(CPUX86State * env)92 int cpu_x86_support_mca_broadcast(CPUX86State *env)
93 {
94 int family = 0;
95 int model = 0;
96
97 if (IS_AMD_CPU(env)) {
98 return 0;
99 }
100
101 cpu_x86_version(env, &family, &model);
102 if ((family == 6 && model >= 14) || family > 6) {
103 return 1;
104 }
105
106 return 0;
107 }
108
109 /***********************************************************/
110 /* x86 mmu */
111 /* XXX: add PGE support */
112
x86_cpu_set_a20(X86CPU * cpu,int a20_state)113 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
114 {
115 CPUX86State *env = &cpu->env;
116
117 a20_state = (a20_state != 0);
118 if (a20_state != ((env->a20_mask >> 20) & 1)) {
119 CPUState *cs = CPU(cpu);
120
121 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
122 /* if the cpu is currently executing code, we must unlink it and
123 all the potentially executing TB */
124 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
125
126 /* when a20 is changed, all the MMU mappings are invalid, so
127 we must flush everything */
128 tlb_flush(cs);
129 env->a20_mask = ~(1 << 20) | (a20_state << 20);
130 }
131 }
132
cpu_x86_update_cr0(CPUX86State * env,uint32_t new_cr0)133 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
134 {
135 X86CPU *cpu = env_archcpu(env);
136 int pe_state;
137
138 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
139 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
140 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
141 tlb_flush(CPU(cpu));
142 }
143
144 #ifdef TARGET_X86_64
145 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
146 (env->efer & MSR_EFER_LME)) {
147 /* enter in long mode */
148 /* XXX: generate an exception */
149 if (!(env->cr[4] & CR4_PAE_MASK))
150 return;
151 env->efer |= MSR_EFER_LMA;
152 env->hflags |= HF_LMA_MASK;
153 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
154 (env->efer & MSR_EFER_LMA)) {
155 /* exit long mode */
156 env->efer &= ~MSR_EFER_LMA;
157 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
158 env->eip &= 0xffffffff;
159 }
160 #endif
161 env->cr[0] = new_cr0 | CR0_ET_MASK;
162
163 /* update PE flag in hidden flags */
164 pe_state = (env->cr[0] & CR0_PE_MASK);
165 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
166 /* ensure that ADDSEG is always set in real mode */
167 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
168 /* update FPU flags */
169 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
170 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
171 }
172
173 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
174 the PDPT */
cpu_x86_update_cr3(CPUX86State * env,target_ulong new_cr3)175 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
176 {
177 env->cr[3] = new_cr3;
178 if (env->cr[0] & CR0_PG_MASK) {
179 qemu_log_mask(CPU_LOG_MMU,
180 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
181 tlb_flush(env_cpu(env));
182 }
183 }
184
cpu_x86_update_cr4(CPUX86State * env,uint32_t new_cr4)185 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
186 {
187 uint32_t hflags;
188
189 #if defined(DEBUG_MMU)
190 printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
191 #endif
192 if ((new_cr4 ^ env->cr[4]) &
193 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
194 CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
195 tlb_flush(env_cpu(env));
196 }
197
198 /* Clear bits we're going to recompute. */
199 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK | HF_UMIP_MASK);
200
201 /* SSE handling */
202 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
203 new_cr4 &= ~CR4_OSFXSR_MASK;
204 }
205 if (new_cr4 & CR4_OSFXSR_MASK) {
206 hflags |= HF_OSFXSR_MASK;
207 }
208
209 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
210 new_cr4 &= ~CR4_SMAP_MASK;
211 }
212 if (new_cr4 & CR4_SMAP_MASK) {
213 hflags |= HF_SMAP_MASK;
214 }
215 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
216 new_cr4 &= ~CR4_UMIP_MASK;
217 }
218 if (new_cr4 & CR4_UMIP_MASK) {
219 hflags |= HF_UMIP_MASK;
220 }
221
222 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
223 new_cr4 &= ~CR4_PKE_MASK;
224 }
225 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
226 new_cr4 &= ~CR4_PKS_MASK;
227 }
228
229 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) {
230 new_cr4 &= ~CR4_LAM_SUP_MASK;
231 }
232
233 env->cr[4] = new_cr4;
234 env->hflags = hflags;
235
236 cpu_sync_bndcs_hflags(env);
237 cpu_sync_avx_hflag(env);
238 }
239
240 #if !defined(CONFIG_USER_ONLY)
x86_cpu_get_phys_page_attrs_debug(CPUState * cs,vaddr addr,MemTxAttrs * attrs)241 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
242 MemTxAttrs *attrs)
243 {
244 X86CPU *cpu = X86_CPU(cs);
245 CPUX86State *env = &cpu->env;
246 target_ulong pde_addr, pte_addr;
247 uint64_t pte;
248 int32_t a20_mask;
249 uint32_t page_offset;
250 int page_size;
251
252 *attrs = cpu_get_mem_attrs(env);
253
254 a20_mask = x86_get_a20_mask(env);
255 if (!(env->cr[0] & CR0_PG_MASK)) {
256 pte = addr & a20_mask;
257 page_size = 4096;
258 } else if (env->cr[4] & CR4_PAE_MASK) {
259 target_ulong pdpe_addr;
260 uint64_t pde, pdpe;
261
262 #ifdef TARGET_X86_64
263 if (env->hflags & HF_LMA_MASK) {
264 bool la57 = env->cr[4] & CR4_LA57_MASK;
265 uint64_t pml5e_addr, pml5e;
266 uint64_t pml4e_addr, pml4e;
267 int32_t sext;
268
269 /* test virtual address sign extension */
270 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
271 if (sext != 0 && sext != -1) {
272 return -1;
273 }
274
275 if (la57) {
276 pml5e_addr = ((env->cr[3] & ~0xfff) +
277 (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
278 pml5e = x86_ldq_phys(cs, pml5e_addr);
279 if (!(pml5e & PG_PRESENT_MASK)) {
280 return -1;
281 }
282 } else {
283 pml5e = env->cr[3];
284 }
285
286 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
287 (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
288 pml4e = x86_ldq_phys(cs, pml4e_addr);
289 if (!(pml4e & PG_PRESENT_MASK)) {
290 return -1;
291 }
292 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
293 (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
294 pdpe = x86_ldq_phys(cs, pdpe_addr);
295 if (!(pdpe & PG_PRESENT_MASK)) {
296 return -1;
297 }
298 if (pdpe & PG_PSE_MASK) {
299 page_size = 1024 * 1024 * 1024;
300 pte = pdpe;
301 goto out;
302 }
303
304 } else
305 #endif
306 {
307 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
308 a20_mask;
309 pdpe = x86_ldq_phys(cs, pdpe_addr);
310 if (!(pdpe & PG_PRESENT_MASK))
311 return -1;
312 }
313
314 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
315 (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
316 pde = x86_ldq_phys(cs, pde_addr);
317 if (!(pde & PG_PRESENT_MASK)) {
318 return -1;
319 }
320 if (pde & PG_PSE_MASK) {
321 /* 2 MB page */
322 page_size = 2048 * 1024;
323 pte = pde;
324 } else {
325 /* 4 KB page */
326 pte_addr = ((pde & PG_ADDRESS_MASK) +
327 (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
328 page_size = 4096;
329 pte = x86_ldq_phys(cs, pte_addr);
330 }
331 if (!(pte & PG_PRESENT_MASK)) {
332 return -1;
333 }
334 } else {
335 uint32_t pde;
336
337 /* page directory entry */
338 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
339 pde = x86_ldl_phys(cs, pde_addr);
340 if (!(pde & PG_PRESENT_MASK))
341 return -1;
342 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
343 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
344 page_size = 4096 * 1024;
345 } else {
346 /* page directory entry */
347 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
348 pte = x86_ldl_phys(cs, pte_addr);
349 if (!(pte & PG_PRESENT_MASK)) {
350 return -1;
351 }
352 page_size = 4096;
353 }
354 pte = pte & a20_mask;
355 }
356
357 #ifdef TARGET_X86_64
358 out:
359 #endif
360 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
361 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
362 return pte | page_offset;
363 }
364
365 typedef struct MCEInjectionParams {
366 Monitor *mon;
367 int bank;
368 uint64_t status;
369 uint64_t mcg_status;
370 uint64_t addr;
371 uint64_t misc;
372 int flags;
373 } MCEInjectionParams;
374
emit_guest_memory_failure(MemoryFailureAction action,bool ar,bool recursive)375 static void emit_guest_memory_failure(MemoryFailureAction action, bool ar,
376 bool recursive)
377 {
378 MemoryFailureFlags mff = {.action_required = ar, .recursive = recursive};
379
380 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST, action,
381 &mff);
382 }
383
do_inject_x86_mce(CPUState * cs,run_on_cpu_data data)384 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
385 {
386 MCEInjectionParams *params = data.host_ptr;
387 X86CPU *cpu = X86_CPU(cs);
388 CPUX86State *cenv = &cpu->env;
389 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
390 g_autofree char *msg = NULL;
391 bool need_reset = false;
392 bool recursive;
393 bool ar = !!(params->status & MCI_STATUS_AR);
394
395 cpu_synchronize_state(cs);
396 recursive = !!(cenv->mcg_status & MCG_STATUS_MCIP);
397
398 /*
399 * If there is an MCE exception being processed, ignore this SRAO MCE
400 * unless unconditional injection was requested.
401 */
402 if (!(params->flags & MCE_INJECT_UNCOND_AO) && !ar && recursive) {
403 emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, ar, recursive);
404 return;
405 }
406
407 if (params->status & MCI_STATUS_UC) {
408 /*
409 * if MSR_MCG_CTL is not all 1s, the uncorrected error
410 * reporting is disabled
411 */
412 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
413 monitor_printf(params->mon,
414 "CPU %d: Uncorrected error reporting disabled\n",
415 cs->cpu_index);
416 return;
417 }
418
419 /*
420 * if MSR_MCi_CTL is not all 1s, the uncorrected error
421 * reporting is disabled for the bank
422 */
423 if (banks[0] != ~(uint64_t)0) {
424 monitor_printf(params->mon,
425 "CPU %d: Uncorrected error reporting disabled for"
426 " bank %d\n",
427 cs->cpu_index, params->bank);
428 return;
429 }
430
431 if (!(cenv->cr[4] & CR4_MCE_MASK)) {
432 need_reset = true;
433 msg = g_strdup_printf("CPU %d: MCE capability is not enabled, "
434 "raising triple fault", cs->cpu_index);
435 } else if (recursive) {
436 need_reset = true;
437 msg = g_strdup_printf("CPU %d: Previous MCE still in progress, "
438 "raising triple fault", cs->cpu_index);
439 }
440
441 if (need_reset) {
442 emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar,
443 recursive);
444 monitor_printf(params->mon, "%s", msg);
445 qemu_log_mask(CPU_LOG_RESET, "%s\n", msg);
446 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
447 return;
448 }
449
450 if (banks[1] & MCI_STATUS_VAL) {
451 params->status |= MCI_STATUS_OVER;
452 }
453 banks[2] = params->addr;
454 banks[3] = params->misc;
455 cenv->mcg_status = params->mcg_status;
456 banks[1] = params->status;
457 cpu_interrupt(cs, CPU_INTERRUPT_MCE);
458 } else if (!(banks[1] & MCI_STATUS_VAL)
459 || !(banks[1] & MCI_STATUS_UC)) {
460 if (banks[1] & MCI_STATUS_VAL) {
461 params->status |= MCI_STATUS_OVER;
462 }
463 banks[2] = params->addr;
464 banks[3] = params->misc;
465 banks[1] = params->status;
466 } else {
467 banks[1] |= MCI_STATUS_OVER;
468 }
469
470 emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT, ar, recursive);
471 }
472
cpu_x86_inject_mce(Monitor * mon,X86CPU * cpu,int bank,uint64_t status,uint64_t mcg_status,uint64_t addr,uint64_t misc,int flags)473 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
474 uint64_t status, uint64_t mcg_status, uint64_t addr,
475 uint64_t misc, int flags)
476 {
477 CPUState *cs = CPU(cpu);
478 CPUX86State *cenv = &cpu->env;
479 MCEInjectionParams params = {
480 .mon = mon,
481 .bank = bank,
482 .status = status,
483 .mcg_status = mcg_status,
484 .addr = addr,
485 .misc = misc,
486 .flags = flags,
487 };
488 unsigned bank_num = cenv->mcg_cap & 0xff;
489
490 if (!cenv->mcg_cap) {
491 monitor_printf(mon, "MCE injection not supported\n");
492 return;
493 }
494 if (bank >= bank_num) {
495 monitor_printf(mon, "Invalid MCE bank number\n");
496 return;
497 }
498 if (!(status & MCI_STATUS_VAL)) {
499 monitor_printf(mon, "Invalid MCE status code\n");
500 return;
501 }
502 if ((flags & MCE_INJECT_BROADCAST)
503 && !cpu_x86_support_mca_broadcast(cenv)) {
504 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
505 return;
506 }
507
508 run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(¶ms));
509 if (flags & MCE_INJECT_BROADCAST) {
510 CPUState *other_cs;
511
512 params.bank = 1;
513 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
514 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
515 params.addr = 0;
516 params.misc = 0;
517 CPU_FOREACH(other_cs) {
518 if (other_cs == cs) {
519 continue;
520 }
521 run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(¶ms));
522 }
523 }
524 }
525
get_memio_eip(CPUX86State * env)526 static inline target_ulong get_memio_eip(CPUX86State *env)
527 {
528 #ifdef CONFIG_TCG
529 uint64_t data[INSN_START_WORDS];
530 CPUState *cs = env_cpu(env);
531
532 if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) {
533 return env->eip;
534 }
535
536 /* Per x86_restore_state_to_opc. */
537 if (tcg_cflags_has(cs, CF_PCREL)) {
538 return (env->eip & TARGET_PAGE_MASK) | data[0];
539 } else {
540 return data[0] - env->segs[R_CS].base;
541 }
542 #else
543 qemu_build_not_reached();
544 #endif
545 }
546
cpu_report_tpr_access(CPUX86State * env,TPRAccess access)547 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
548 {
549 X86CPU *cpu = env_archcpu(env);
550 CPUState *cs = env_cpu(env);
551
552 if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) {
553 env->tpr_access_type = access;
554
555 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
556 } else if (tcg_enabled()) {
557 target_ulong eip = get_memio_eip(env);
558
559 apic_handle_tpr_access_report(cpu->apic_state, eip, access);
560 }
561 }
562 #endif /* !CONFIG_USER_ONLY */
563
cpu_x86_get_descr_debug(CPUX86State * env,unsigned int selector,target_ulong * base,unsigned int * limit,unsigned int * flags)564 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
565 target_ulong *base, unsigned int *limit,
566 unsigned int *flags)
567 {
568 CPUState *cs = env_cpu(env);
569 SegmentCache *dt;
570 target_ulong ptr;
571 uint32_t e1, e2;
572 int index;
573
574 if (selector & 0x4)
575 dt = &env->ldt;
576 else
577 dt = &env->gdt;
578 index = selector & ~7;
579 ptr = dt->base + index;
580 if ((index + 7) > dt->limit
581 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
582 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
583 return 0;
584
585 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
586 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
587 if (e2 & DESC_G_MASK)
588 *limit = (*limit << 12) | 0xfff;
589 *flags = e2;
590
591 return 1;
592 }
593
do_cpu_init(X86CPU * cpu)594 void do_cpu_init(X86CPU *cpu)
595 {
596 #if !defined(CONFIG_USER_ONLY)
597 CPUState *cs = CPU(cpu);
598 CPUX86State *env = &cpu->env;
599 CPUX86State *save = g_new(CPUX86State, 1);
600 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
601
602 *save = *env;
603
604 cpu_reset(cs);
605 cs->interrupt_request = sipi;
606 memcpy(&env->start_init_save, &save->start_init_save,
607 offsetof(CPUX86State, end_init_save) -
608 offsetof(CPUX86State, start_init_save));
609 g_free(save);
610
611 if (kvm_enabled()) {
612 kvm_arch_do_init_vcpu(cpu);
613 }
614 apic_init_reset(cpu->apic_state);
615 #endif /* CONFIG_USER_ONLY */
616 }
617
618 #ifndef CONFIG_USER_ONLY
619
do_cpu_sipi(X86CPU * cpu)620 void do_cpu_sipi(X86CPU *cpu)
621 {
622 CPUX86State *env = &cpu->env;
623 if (env->hflags & HF_SMM_MASK) {
624 return;
625 }
626 apic_sipi(cpu->apic_state);
627 }
628
cpu_load_efer(CPUX86State * env,uint64_t val)629 void cpu_load_efer(CPUX86State *env, uint64_t val)
630 {
631 env->efer = val;
632 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
633 if (env->efer & MSR_EFER_LMA) {
634 env->hflags |= HF_LMA_MASK;
635 }
636 if (env->efer & MSR_EFER_SVME) {
637 env->hflags |= HF_SVME_MASK;
638 }
639 }
640
x86_ldub_phys(CPUState * cs,hwaddr addr)641 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
642 {
643 X86CPU *cpu = X86_CPU(cs);
644 CPUX86State *env = &cpu->env;
645 MemTxAttrs attrs = cpu_get_mem_attrs(env);
646 AddressSpace *as = cpu_addressspace(cs, attrs);
647
648 return address_space_ldub(as, addr, attrs, NULL);
649 }
650
x86_lduw_phys(CPUState * cs,hwaddr addr)651 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
652 {
653 X86CPU *cpu = X86_CPU(cs);
654 CPUX86State *env = &cpu->env;
655 MemTxAttrs attrs = cpu_get_mem_attrs(env);
656 AddressSpace *as = cpu_addressspace(cs, attrs);
657
658 return address_space_lduw(as, addr, attrs, NULL);
659 }
660
x86_ldl_phys(CPUState * cs,hwaddr addr)661 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
662 {
663 X86CPU *cpu = X86_CPU(cs);
664 CPUX86State *env = &cpu->env;
665 MemTxAttrs attrs = cpu_get_mem_attrs(env);
666 AddressSpace *as = cpu_addressspace(cs, attrs);
667
668 return address_space_ldl(as, addr, attrs, NULL);
669 }
670
x86_ldq_phys(CPUState * cs,hwaddr addr)671 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
672 {
673 X86CPU *cpu = X86_CPU(cs);
674 CPUX86State *env = &cpu->env;
675 MemTxAttrs attrs = cpu_get_mem_attrs(env);
676 AddressSpace *as = cpu_addressspace(cs, attrs);
677
678 return address_space_ldq(as, addr, attrs, NULL);
679 }
680
x86_stb_phys(CPUState * cs,hwaddr addr,uint8_t val)681 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
682 {
683 X86CPU *cpu = X86_CPU(cs);
684 CPUX86State *env = &cpu->env;
685 MemTxAttrs attrs = cpu_get_mem_attrs(env);
686 AddressSpace *as = cpu_addressspace(cs, attrs);
687
688 address_space_stb(as, addr, val, attrs, NULL);
689 }
690
x86_stl_phys_notdirty(CPUState * cs,hwaddr addr,uint32_t val)691 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
692 {
693 X86CPU *cpu = X86_CPU(cs);
694 CPUX86State *env = &cpu->env;
695 MemTxAttrs attrs = cpu_get_mem_attrs(env);
696 AddressSpace *as = cpu_addressspace(cs, attrs);
697
698 address_space_stl_notdirty(as, addr, val, attrs, NULL);
699 }
700
x86_stw_phys(CPUState * cs,hwaddr addr,uint32_t val)701 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
702 {
703 X86CPU *cpu = X86_CPU(cs);
704 CPUX86State *env = &cpu->env;
705 MemTxAttrs attrs = cpu_get_mem_attrs(env);
706 AddressSpace *as = cpu_addressspace(cs, attrs);
707
708 address_space_stw(as, addr, val, attrs, NULL);
709 }
710
x86_stl_phys(CPUState * cs,hwaddr addr,uint32_t val)711 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
712 {
713 X86CPU *cpu = X86_CPU(cs);
714 CPUX86State *env = &cpu->env;
715 MemTxAttrs attrs = cpu_get_mem_attrs(env);
716 AddressSpace *as = cpu_addressspace(cs, attrs);
717
718 address_space_stl(as, addr, val, attrs, NULL);
719 }
720
x86_stq_phys(CPUState * cs,hwaddr addr,uint64_t val)721 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
722 {
723 X86CPU *cpu = X86_CPU(cs);
724 CPUX86State *env = &cpu->env;
725 MemTxAttrs attrs = cpu_get_mem_attrs(env);
726 AddressSpace *as = cpu_addressspace(cs, attrs);
727
728 address_space_stq(as, addr, val, attrs, NULL);
729 }
730 #endif
731