xref: /openbmc/qemu/target/i386/helper.c (revision b2d86f1c5429979d9ecaf43a7973cc129da1b135)
1 /*
2  *  i386 helpers (without register variable usage)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qapi/qapi-events-run-state.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "exec/translation-block.h"
25 #include "exec/target_page.h"
26 #include "system/runstate.h"
27 #ifndef CONFIG_USER_ONLY
28 #include "system/hw_accel.h"
29 #include "system/memory.h"
30 #include "monitor/monitor.h"
31 #include "kvm/kvm_i386.h"
32 #endif
33 #include "qemu/log.h"
34 #ifdef CONFIG_TCG
35 #include "tcg/insn-start-words.h"
36 #endif
37 
38 void cpu_sync_avx_hflag(CPUX86State *env)
39 {
40     if ((env->cr[4] & CR4_OSXSAVE_MASK)
41         && (env->xcr0 & (XSTATE_SSE_MASK | XSTATE_YMM_MASK))
42             == (XSTATE_SSE_MASK | XSTATE_YMM_MASK)) {
43         env->hflags |= HF_AVX_EN_MASK;
44     } else{
45         env->hflags &= ~HF_AVX_EN_MASK;
46     }
47 }
48 
49 void cpu_sync_bndcs_hflags(CPUX86State *env)
50 {
51     uint32_t hflags = env->hflags;
52     uint32_t hflags2 = env->hflags2;
53     uint32_t bndcsr;
54 
55     if ((hflags & HF_CPL_MASK) == 3) {
56         bndcsr = env->bndcs_regs.cfgu;
57     } else {
58         bndcsr = env->msr_bndcfgs;
59     }
60 
61     if ((env->cr[4] & CR4_OSXSAVE_MASK)
62         && (env->xcr0 & XSTATE_BNDCSR_MASK)
63         && (bndcsr & BNDCFG_ENABLE)) {
64         hflags |= HF_MPX_EN_MASK;
65     } else {
66         hflags &= ~HF_MPX_EN_MASK;
67     }
68 
69     if (bndcsr & BNDCFG_BNDPRESERVE) {
70         hflags2 |= HF2_MPX_PR_MASK;
71     } else {
72         hflags2 &= ~HF2_MPX_PR_MASK;
73     }
74 
75     env->hflags = hflags;
76     env->hflags2 = hflags2;
77 }
78 
79 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
80 {
81     int cpuver = env->cpuid_version;
82 
83     if (family == NULL || model == NULL) {
84         return;
85     }
86 
87     *family = (cpuver >> 8) & 0x0f;
88     *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
89 }
90 
91 /* Broadcast MCA signal for processor version 06H_EH and above */
92 int cpu_x86_support_mca_broadcast(CPUX86State *env)
93 {
94     int family = 0;
95     int model = 0;
96 
97     if (IS_AMD_CPU(env)) {
98         return 0;
99     }
100 
101     cpu_x86_version(env, &family, &model);
102     if ((family == 6 && model >= 14) || family > 6) {
103         return 1;
104     }
105 
106     return 0;
107 }
108 
109 /***********************************************************/
110 /* x86 mmu */
111 /* XXX: add PGE support */
112 
113 #ifndef CONFIG_USER_ONLY
114 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
115 {
116     CPUX86State *env = &cpu->env;
117 
118     a20_state = (a20_state != 0);
119     if (a20_state != ((env->a20_mask >> 20) & 1)) {
120         CPUState *cs = CPU(cpu);
121 
122         qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
123         /* if the cpu is currently executing code, we must unlink it and
124            all the potentially executing TB */
125         cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
126 
127         /* when a20 is changed, all the MMU mappings are invalid, so
128            we must flush everything */
129         tlb_flush(cs);
130         env->a20_mask = ~(1 << 20) | (a20_state << 20);
131     }
132 }
133 #endif
134 
135 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
136 {
137     X86CPU *cpu = env_archcpu(env);
138     int pe_state;
139 
140     qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
141     if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
142         (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
143         tlb_flush(CPU(cpu));
144     }
145 
146 #ifdef TARGET_X86_64
147     if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
148         (env->efer & MSR_EFER_LME)) {
149         /* enter in long mode */
150         /* XXX: generate an exception */
151         if (!(env->cr[4] & CR4_PAE_MASK))
152             return;
153         env->efer |= MSR_EFER_LMA;
154         env->hflags |= HF_LMA_MASK;
155     } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
156                (env->efer & MSR_EFER_LMA)) {
157         /* exit long mode */
158         env->efer &= ~MSR_EFER_LMA;
159         env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
160         env->eip &= 0xffffffff;
161     }
162 #endif
163     env->cr[0] = new_cr0 | CR0_ET_MASK;
164 
165     /* update PE flag in hidden flags */
166     pe_state = (env->cr[0] & CR0_PE_MASK);
167     env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
168     /* ensure that ADDSEG is always set in real mode */
169     env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
170     /* update FPU flags */
171     env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
172         ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
173 }
174 
175 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
176    the PDPT */
177 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
178 {
179     env->cr[3] = new_cr3;
180     if (env->cr[0] & CR0_PG_MASK) {
181         qemu_log_mask(CPU_LOG_MMU,
182                         "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
183         tlb_flush(env_cpu(env));
184     }
185 }
186 
187 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
188 {
189     uint32_t hflags;
190 
191 #if defined(DEBUG_MMU)
192     printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
193 #endif
194     if ((new_cr4 ^ env->cr[4]) &
195         (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
196          CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
197         tlb_flush(env_cpu(env));
198     }
199 
200     /* Clear bits we're going to recompute.  */
201     hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK | HF_UMIP_MASK);
202 
203     /* SSE handling */
204     if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
205         new_cr4 &= ~CR4_OSFXSR_MASK;
206     }
207     if (new_cr4 & CR4_OSFXSR_MASK) {
208         hflags |= HF_OSFXSR_MASK;
209     }
210 
211     if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
212         new_cr4 &= ~CR4_SMAP_MASK;
213     }
214     if (new_cr4 & CR4_SMAP_MASK) {
215         hflags |= HF_SMAP_MASK;
216     }
217     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
218         new_cr4 &= ~CR4_UMIP_MASK;
219     }
220     if (new_cr4 & CR4_UMIP_MASK) {
221         hflags |= HF_UMIP_MASK;
222     }
223 
224     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
225         new_cr4 &= ~CR4_PKE_MASK;
226     }
227     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
228         new_cr4 &= ~CR4_PKS_MASK;
229     }
230 
231     if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) {
232         new_cr4 &= ~CR4_LAM_SUP_MASK;
233     }
234 
235     env->cr[4] = new_cr4;
236     env->hflags = hflags;
237 
238     cpu_sync_bndcs_hflags(env);
239     cpu_sync_avx_hflag(env);
240 }
241 
242 #if !defined(CONFIG_USER_ONLY)
243 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
244                                          MemTxAttrs *attrs)
245 {
246     X86CPU *cpu = X86_CPU(cs);
247     CPUX86State *env = &cpu->env;
248     target_ulong pde_addr, pte_addr;
249     uint64_t pte;
250     int32_t a20_mask;
251     uint32_t page_offset;
252     int page_size;
253 
254     *attrs = cpu_get_mem_attrs(env);
255 
256     a20_mask = x86_get_a20_mask(env);
257     if (!(env->cr[0] & CR0_PG_MASK)) {
258         pte = addr & a20_mask;
259         page_size = 4096;
260     } else if (env->cr[4] & CR4_PAE_MASK) {
261         target_ulong pdpe_addr;
262         uint64_t pde, pdpe;
263 
264 #ifdef TARGET_X86_64
265         if (env->hflags & HF_LMA_MASK) {
266             bool la57 = env->cr[4] & CR4_LA57_MASK;
267             uint64_t pml5e_addr, pml5e;
268             uint64_t pml4e_addr, pml4e;
269             int32_t sext;
270 
271             /* test virtual address sign extension */
272             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
273             if (sext != 0 && sext != -1) {
274                 return -1;
275             }
276 
277             if (la57) {
278                 pml5e_addr = ((env->cr[3] & ~0xfff) +
279                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
280                 pml5e = x86_ldq_phys(cs, pml5e_addr);
281                 if (!(pml5e & PG_PRESENT_MASK)) {
282                     return -1;
283                 }
284             } else {
285                 pml5e = env->cr[3];
286             }
287 
288             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
289                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
290             pml4e = x86_ldq_phys(cs, pml4e_addr);
291             if (!(pml4e & PG_PRESENT_MASK)) {
292                 return -1;
293             }
294             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
295                          (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
296             pdpe = x86_ldq_phys(cs, pdpe_addr);
297             if (!(pdpe & PG_PRESENT_MASK)) {
298                 return -1;
299             }
300             if (pdpe & PG_PSE_MASK) {
301                 page_size = 1024 * 1024 * 1024;
302                 pte = pdpe;
303                 goto out;
304             }
305 
306         } else
307 #endif
308         {
309             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
310                 a20_mask;
311             pdpe = x86_ldq_phys(cs, pdpe_addr);
312             if (!(pdpe & PG_PRESENT_MASK))
313                 return -1;
314         }
315 
316         pde_addr = ((pdpe & PG_ADDRESS_MASK) +
317                     (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
318         pde = x86_ldq_phys(cs, pde_addr);
319         if (!(pde & PG_PRESENT_MASK)) {
320             return -1;
321         }
322         if (pde & PG_PSE_MASK) {
323             /* 2 MB page */
324             page_size = 2048 * 1024;
325             pte = pde;
326         } else {
327             /* 4 KB page */
328             pte_addr = ((pde & PG_ADDRESS_MASK) +
329                         (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
330             page_size = 4096;
331             pte = x86_ldq_phys(cs, pte_addr);
332         }
333         if (!(pte & PG_PRESENT_MASK)) {
334             return -1;
335         }
336     } else {
337         uint32_t pde;
338 
339         /* page directory entry */
340         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
341         pde = x86_ldl_phys(cs, pde_addr);
342         if (!(pde & PG_PRESENT_MASK))
343             return -1;
344         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
345             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
346             page_size = 4096 * 1024;
347         } else {
348             /* page directory entry */
349             pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
350             pte = x86_ldl_phys(cs, pte_addr);
351             if (!(pte & PG_PRESENT_MASK)) {
352                 return -1;
353             }
354             page_size = 4096;
355         }
356         pte = pte & a20_mask;
357     }
358 
359 #ifdef TARGET_X86_64
360 out:
361 #endif
362     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
363     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
364     return pte | page_offset;
365 }
366 
367 typedef struct MCEInjectionParams {
368     Monitor *mon;
369     int bank;
370     uint64_t status;
371     uint64_t mcg_status;
372     uint64_t addr;
373     uint64_t misc;
374     int flags;
375 } MCEInjectionParams;
376 
377 static void emit_guest_memory_failure(MemoryFailureAction action, bool ar,
378                                       bool recursive)
379 {
380     MemoryFailureFlags mff = {.action_required = ar, .recursive = recursive};
381 
382     qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST, action,
383                                    &mff);
384 }
385 
386 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
387 {
388     MCEInjectionParams *params = data.host_ptr;
389     X86CPU *cpu = X86_CPU(cs);
390     CPUX86State *cenv = &cpu->env;
391     uint64_t *banks = cenv->mce_banks + 4 * params->bank;
392     g_autofree char *msg = NULL;
393     bool need_reset = false;
394     bool recursive;
395     bool ar = !!(params->status & MCI_STATUS_AR);
396 
397     cpu_synchronize_state(cs);
398     recursive = !!(cenv->mcg_status & MCG_STATUS_MCIP);
399 
400     /*
401      * If there is an MCE exception being processed, ignore this SRAO MCE
402      * unless unconditional injection was requested.
403      */
404     if (!(params->flags & MCE_INJECT_UNCOND_AO) && !ar && recursive) {
405         emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, ar, recursive);
406         return;
407     }
408 
409     if (params->status & MCI_STATUS_UC) {
410         /*
411          * if MSR_MCG_CTL is not all 1s, the uncorrected error
412          * reporting is disabled
413          */
414         if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
415             monitor_printf(params->mon,
416                            "CPU %d: Uncorrected error reporting disabled\n",
417                            cs->cpu_index);
418             return;
419         }
420 
421         /*
422          * if MSR_MCi_CTL is not all 1s, the uncorrected error
423          * reporting is disabled for the bank
424          */
425         if (banks[0] != ~(uint64_t)0) {
426             monitor_printf(params->mon,
427                            "CPU %d: Uncorrected error reporting disabled for"
428                            " bank %d\n",
429                            cs->cpu_index, params->bank);
430             return;
431         }
432 
433         if (!(cenv->cr[4] & CR4_MCE_MASK)) {
434             need_reset = true;
435             msg = g_strdup_printf("CPU %d: MCE capability is not enabled, "
436                                   "raising triple fault", cs->cpu_index);
437         } else if (recursive) {
438             need_reset = true;
439             msg = g_strdup_printf("CPU %d: Previous MCE still in progress, "
440                                   "raising triple fault", cs->cpu_index);
441         }
442 
443         if (need_reset) {
444             emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar,
445                                       recursive);
446             monitor_printf(params->mon, "%s", msg);
447             qemu_log_mask(CPU_LOG_RESET, "%s\n", msg);
448             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
449             return;
450         }
451 
452         if (banks[1] & MCI_STATUS_VAL) {
453             params->status |= MCI_STATUS_OVER;
454         }
455         banks[2] = params->addr;
456         banks[3] = params->misc;
457         cenv->mcg_status = params->mcg_status;
458         banks[1] = params->status;
459         cpu_interrupt(cs, CPU_INTERRUPT_MCE);
460     } else if (!(banks[1] & MCI_STATUS_VAL)
461                || !(banks[1] & MCI_STATUS_UC)) {
462         if (banks[1] & MCI_STATUS_VAL) {
463             params->status |= MCI_STATUS_OVER;
464         }
465         banks[2] = params->addr;
466         banks[3] = params->misc;
467         banks[1] = params->status;
468     } else {
469         banks[1] |= MCI_STATUS_OVER;
470     }
471 
472     emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT, ar, recursive);
473 }
474 
475 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
476                         uint64_t status, uint64_t mcg_status, uint64_t addr,
477                         uint64_t misc, int flags)
478 {
479     CPUState *cs = CPU(cpu);
480     CPUX86State *cenv = &cpu->env;
481     MCEInjectionParams params = {
482         .mon = mon,
483         .bank = bank,
484         .status = status,
485         .mcg_status = mcg_status,
486         .addr = addr,
487         .misc = misc,
488         .flags = flags,
489     };
490     unsigned bank_num = cenv->mcg_cap & 0xff;
491 
492     if (!cenv->mcg_cap) {
493         monitor_printf(mon, "MCE injection not supported\n");
494         return;
495     }
496     if (bank >= bank_num) {
497         monitor_printf(mon, "Invalid MCE bank number\n");
498         return;
499     }
500     if (!(status & MCI_STATUS_VAL)) {
501         monitor_printf(mon, "Invalid MCE status code\n");
502         return;
503     }
504     if ((flags & MCE_INJECT_BROADCAST)
505         && !cpu_x86_support_mca_broadcast(cenv)) {
506         monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
507         return;
508     }
509 
510     run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
511     if (flags & MCE_INJECT_BROADCAST) {
512         CPUState *other_cs;
513 
514         params.bank = 1;
515         params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
516         params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
517         params.addr = 0;
518         params.misc = 0;
519         CPU_FOREACH(other_cs) {
520             if (other_cs == cs) {
521                 continue;
522             }
523             run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
524         }
525     }
526 }
527 
528 static inline target_ulong get_memio_eip(CPUX86State *env)
529 {
530 #ifdef CONFIG_TCG
531     uint64_t data[INSN_START_WORDS];
532     CPUState *cs = env_cpu(env);
533 
534     if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) {
535         return env->eip;
536     }
537 
538     /* Per x86_restore_state_to_opc. */
539     if (tcg_cflags_has(cs, CF_PCREL)) {
540         return (env->eip & TARGET_PAGE_MASK) | data[0];
541     } else {
542         return data[0] - env->segs[R_CS].base;
543     }
544 #else
545     qemu_build_not_reached();
546 #endif
547 }
548 
549 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
550 {
551     X86CPU *cpu = env_archcpu(env);
552     CPUState *cs = env_cpu(env);
553 
554     if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) {
555         env->tpr_access_type = access;
556 
557         cpu_interrupt(cs, CPU_INTERRUPT_TPR);
558     } else if (tcg_enabled()) {
559         target_ulong eip = get_memio_eip(env);
560 
561         apic_handle_tpr_access_report(cpu->apic_state, eip, access);
562     }
563 }
564 #endif /* !CONFIG_USER_ONLY */
565 
566 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
567                             target_ulong *base, unsigned int *limit,
568                             unsigned int *flags)
569 {
570     CPUState *cs = env_cpu(env);
571     SegmentCache *dt;
572     target_ulong ptr;
573     uint32_t e1, e2;
574     int index;
575 
576     if (selector & 0x4)
577         dt = &env->ldt;
578     else
579         dt = &env->gdt;
580     index = selector & ~7;
581     ptr = dt->base + index;
582     if ((index + 7) > dt->limit
583         || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
584         || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
585         return 0;
586 
587     *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
588     *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
589     if (e2 & DESC_G_MASK)
590         *limit = (*limit << 12) | 0xfff;
591     *flags = e2;
592 
593     return 1;
594 }
595 
596 void do_cpu_init(X86CPU *cpu)
597 {
598 #if !defined(CONFIG_USER_ONLY)
599     CPUState *cs = CPU(cpu);
600     CPUX86State *env = &cpu->env;
601     CPUX86State *save = g_new(CPUX86State, 1);
602     int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
603 
604     *save = *env;
605 
606     cpu_reset(cs);
607     cs->interrupt_request = sipi;
608     memcpy(&env->start_init_save, &save->start_init_save,
609            offsetof(CPUX86State, end_init_save) -
610            offsetof(CPUX86State, start_init_save));
611     g_free(save);
612 
613     if (kvm_enabled()) {
614         kvm_arch_do_init_vcpu(cpu);
615     }
616     apic_init_reset(cpu->apic_state);
617 #endif /* CONFIG_USER_ONLY */
618 }
619 
620 #ifndef CONFIG_USER_ONLY
621 
622 void do_cpu_sipi(X86CPU *cpu)
623 {
624     CPUX86State *env = &cpu->env;
625     if (env->hflags & HF_SMM_MASK) {
626         return;
627     }
628     apic_sipi(cpu->apic_state);
629 }
630 
631 void cpu_load_efer(CPUX86State *env, uint64_t val)
632 {
633     env->efer = val;
634     env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
635     if (env->efer & MSR_EFER_LMA) {
636         env->hflags |= HF_LMA_MASK;
637     }
638     if (env->efer & MSR_EFER_SVME) {
639         env->hflags |= HF_SVME_MASK;
640     }
641 }
642 
643 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
644 {
645     X86CPU *cpu = X86_CPU(cs);
646     CPUX86State *env = &cpu->env;
647     MemTxAttrs attrs = cpu_get_mem_attrs(env);
648     AddressSpace *as = cpu_addressspace(cs, attrs);
649 
650     return address_space_ldub(as, addr, attrs, NULL);
651 }
652 
653 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
654 {
655     X86CPU *cpu = X86_CPU(cs);
656     CPUX86State *env = &cpu->env;
657     MemTxAttrs attrs = cpu_get_mem_attrs(env);
658     AddressSpace *as = cpu_addressspace(cs, attrs);
659 
660     return address_space_lduw(as, addr, attrs, NULL);
661 }
662 
663 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
664 {
665     X86CPU *cpu = X86_CPU(cs);
666     CPUX86State *env = &cpu->env;
667     MemTxAttrs attrs = cpu_get_mem_attrs(env);
668     AddressSpace *as = cpu_addressspace(cs, attrs);
669 
670     return address_space_ldl(as, addr, attrs, NULL);
671 }
672 
673 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
674 {
675     X86CPU *cpu = X86_CPU(cs);
676     CPUX86State *env = &cpu->env;
677     MemTxAttrs attrs = cpu_get_mem_attrs(env);
678     AddressSpace *as = cpu_addressspace(cs, attrs);
679 
680     return address_space_ldq(as, addr, attrs, NULL);
681 }
682 
683 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
684 {
685     X86CPU *cpu = X86_CPU(cs);
686     CPUX86State *env = &cpu->env;
687     MemTxAttrs attrs = cpu_get_mem_attrs(env);
688     AddressSpace *as = cpu_addressspace(cs, attrs);
689 
690     address_space_stb(as, addr, val, attrs, NULL);
691 }
692 
693 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
694 {
695     X86CPU *cpu = X86_CPU(cs);
696     CPUX86State *env = &cpu->env;
697     MemTxAttrs attrs = cpu_get_mem_attrs(env);
698     AddressSpace *as = cpu_addressspace(cs, attrs);
699 
700     address_space_stl_notdirty(as, addr, val, attrs, NULL);
701 }
702 
703 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
704 {
705     X86CPU *cpu = X86_CPU(cs);
706     CPUX86State *env = &cpu->env;
707     MemTxAttrs attrs = cpu_get_mem_attrs(env);
708     AddressSpace *as = cpu_addressspace(cs, attrs);
709 
710     address_space_stw(as, addr, val, attrs, NULL);
711 }
712 
713 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
714 {
715     X86CPU *cpu = X86_CPU(cs);
716     CPUX86State *env = &cpu->env;
717     MemTxAttrs attrs = cpu_get_mem_attrs(env);
718     AddressSpace *as = cpu_addressspace(cs, attrs);
719 
720     address_space_stl(as, addr, val, attrs, NULL);
721 }
722 
723 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
724 {
725     X86CPU *cpu = X86_CPU(cs);
726     CPUX86State *env = &cpu->env;
727     MemTxAttrs attrs = cpu_get_mem_attrs(env);
728     AddressSpace *as = cpu_addressspace(cs, attrs);
729 
730     address_space_stq(as, addr, val, attrs, NULL);
731 }
732 #endif
733