xref: /openbmc/qemu/target/i386/helper.c (revision 28d5bfc0)
1 /*
2  *  i386 helpers (without register variable usage)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qapi/qapi-events-run-state.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/runstate.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/hw_accel.h"
27 #include "monitor/monitor.h"
28 #include "kvm/kvm_i386.h"
29 #endif
30 #include "qemu/log.h"
31 #ifdef CONFIG_TCG
32 #include "tcg/insn-start-words.h"
33 #endif
34 
35 void cpu_sync_avx_hflag(CPUX86State *env)
36 {
37     if ((env->cr[4] & CR4_OSXSAVE_MASK)
38         && (env->xcr0 & (XSTATE_SSE_MASK | XSTATE_YMM_MASK))
39             == (XSTATE_SSE_MASK | XSTATE_YMM_MASK)) {
40         env->hflags |= HF_AVX_EN_MASK;
41     } else{
42         env->hflags &= ~HF_AVX_EN_MASK;
43     }
44 }
45 
46 void cpu_sync_bndcs_hflags(CPUX86State *env)
47 {
48     uint32_t hflags = env->hflags;
49     uint32_t hflags2 = env->hflags2;
50     uint32_t bndcsr;
51 
52     if ((hflags & HF_CPL_MASK) == 3) {
53         bndcsr = env->bndcs_regs.cfgu;
54     } else {
55         bndcsr = env->msr_bndcfgs;
56     }
57 
58     if ((env->cr[4] & CR4_OSXSAVE_MASK)
59         && (env->xcr0 & XSTATE_BNDCSR_MASK)
60         && (bndcsr & BNDCFG_ENABLE)) {
61         hflags |= HF_MPX_EN_MASK;
62     } else {
63         hflags &= ~HF_MPX_EN_MASK;
64     }
65 
66     if (bndcsr & BNDCFG_BNDPRESERVE) {
67         hflags2 |= HF2_MPX_PR_MASK;
68     } else {
69         hflags2 &= ~HF2_MPX_PR_MASK;
70     }
71 
72     env->hflags = hflags;
73     env->hflags2 = hflags2;
74 }
75 
76 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
77 {
78     int cpuver = env->cpuid_version;
79 
80     if (family == NULL || model == NULL) {
81         return;
82     }
83 
84     *family = (cpuver >> 8) & 0x0f;
85     *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
86 }
87 
88 /* Broadcast MCA signal for processor version 06H_EH and above */
89 int cpu_x86_support_mca_broadcast(CPUX86State *env)
90 {
91     int family = 0;
92     int model = 0;
93 
94     cpu_x86_version(env, &family, &model);
95     if ((family == 6 && model >= 14) || family > 6) {
96         return 1;
97     }
98 
99     return 0;
100 }
101 
102 /***********************************************************/
103 /* x86 mmu */
104 /* XXX: add PGE support */
105 
106 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
107 {
108     CPUX86State *env = &cpu->env;
109 
110     a20_state = (a20_state != 0);
111     if (a20_state != ((env->a20_mask >> 20) & 1)) {
112         CPUState *cs = CPU(cpu);
113 
114         qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
115         /* if the cpu is currently executing code, we must unlink it and
116            all the potentially executing TB */
117         cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
118 
119         /* when a20 is changed, all the MMU mappings are invalid, so
120            we must flush everything */
121         tlb_flush(cs);
122         env->a20_mask = ~(1 << 20) | (a20_state << 20);
123     }
124 }
125 
126 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
127 {
128     X86CPU *cpu = env_archcpu(env);
129     int pe_state;
130 
131     qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
132     if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
133         (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
134         tlb_flush(CPU(cpu));
135     }
136 
137 #ifdef TARGET_X86_64
138     if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
139         (env->efer & MSR_EFER_LME)) {
140         /* enter in long mode */
141         /* XXX: generate an exception */
142         if (!(env->cr[4] & CR4_PAE_MASK))
143             return;
144         env->efer |= MSR_EFER_LMA;
145         env->hflags |= HF_LMA_MASK;
146     } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
147                (env->efer & MSR_EFER_LMA)) {
148         /* exit long mode */
149         env->efer &= ~MSR_EFER_LMA;
150         env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
151         env->eip &= 0xffffffff;
152     }
153 #endif
154     env->cr[0] = new_cr0 | CR0_ET_MASK;
155 
156     /* update PE flag in hidden flags */
157     pe_state = (env->cr[0] & CR0_PE_MASK);
158     env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
159     /* ensure that ADDSEG is always set in real mode */
160     env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
161     /* update FPU flags */
162     env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
163         ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
164 }
165 
166 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
167    the PDPT */
168 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
169 {
170     env->cr[3] = new_cr3;
171     if (env->cr[0] & CR0_PG_MASK) {
172         qemu_log_mask(CPU_LOG_MMU,
173                         "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
174         tlb_flush(env_cpu(env));
175     }
176 }
177 
178 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
179 {
180     uint32_t hflags;
181 
182 #if defined(DEBUG_MMU)
183     printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
184 #endif
185     if ((new_cr4 ^ env->cr[4]) &
186         (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
187          CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
188         tlb_flush(env_cpu(env));
189     }
190 
191     /* Clear bits we're going to recompute.  */
192     hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK | HF_UMIP_MASK);
193 
194     /* SSE handling */
195     if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
196         new_cr4 &= ~CR4_OSFXSR_MASK;
197     }
198     if (new_cr4 & CR4_OSFXSR_MASK) {
199         hflags |= HF_OSFXSR_MASK;
200     }
201 
202     if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
203         new_cr4 &= ~CR4_SMAP_MASK;
204     }
205     if (new_cr4 & CR4_SMAP_MASK) {
206         hflags |= HF_SMAP_MASK;
207     }
208     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
209         new_cr4 &= ~CR4_UMIP_MASK;
210     }
211     if (new_cr4 & CR4_UMIP_MASK) {
212         hflags |= HF_UMIP_MASK;
213     }
214 
215     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
216         new_cr4 &= ~CR4_PKE_MASK;
217     }
218     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
219         new_cr4 &= ~CR4_PKS_MASK;
220     }
221 
222     if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) {
223         new_cr4 &= ~CR4_LAM_SUP_MASK;
224     }
225 
226     env->cr[4] = new_cr4;
227     env->hflags = hflags;
228 
229     cpu_sync_bndcs_hflags(env);
230     cpu_sync_avx_hflag(env);
231 }
232 
233 #if !defined(CONFIG_USER_ONLY)
234 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
235                                          MemTxAttrs *attrs)
236 {
237     X86CPU *cpu = X86_CPU(cs);
238     CPUX86State *env = &cpu->env;
239     target_ulong pde_addr, pte_addr;
240     uint64_t pte;
241     int32_t a20_mask;
242     uint32_t page_offset;
243     int page_size;
244 
245     *attrs = cpu_get_mem_attrs(env);
246 
247     a20_mask = x86_get_a20_mask(env);
248     if (!(env->cr[0] & CR0_PG_MASK)) {
249         pte = addr & a20_mask;
250         page_size = 4096;
251     } else if (env->cr[4] & CR4_PAE_MASK) {
252         target_ulong pdpe_addr;
253         uint64_t pde, pdpe;
254 
255 #ifdef TARGET_X86_64
256         if (env->hflags & HF_LMA_MASK) {
257             bool la57 = env->cr[4] & CR4_LA57_MASK;
258             uint64_t pml5e_addr, pml5e;
259             uint64_t pml4e_addr, pml4e;
260             int32_t sext;
261 
262             /* test virtual address sign extension */
263             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
264             if (sext != 0 && sext != -1) {
265                 return -1;
266             }
267 
268             if (la57) {
269                 pml5e_addr = ((env->cr[3] & ~0xfff) +
270                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
271                 pml5e = x86_ldq_phys(cs, pml5e_addr);
272                 if (!(pml5e & PG_PRESENT_MASK)) {
273                     return -1;
274                 }
275             } else {
276                 pml5e = env->cr[3];
277             }
278 
279             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
280                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
281             pml4e = x86_ldq_phys(cs, pml4e_addr);
282             if (!(pml4e & PG_PRESENT_MASK)) {
283                 return -1;
284             }
285             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
286                          (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
287             pdpe = x86_ldq_phys(cs, pdpe_addr);
288             if (!(pdpe & PG_PRESENT_MASK)) {
289                 return -1;
290             }
291             if (pdpe & PG_PSE_MASK) {
292                 page_size = 1024 * 1024 * 1024;
293                 pte = pdpe;
294                 goto out;
295             }
296 
297         } else
298 #endif
299         {
300             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
301                 a20_mask;
302             pdpe = x86_ldq_phys(cs, pdpe_addr);
303             if (!(pdpe & PG_PRESENT_MASK))
304                 return -1;
305         }
306 
307         pde_addr = ((pdpe & PG_ADDRESS_MASK) +
308                     (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
309         pde = x86_ldq_phys(cs, pde_addr);
310         if (!(pde & PG_PRESENT_MASK)) {
311             return -1;
312         }
313         if (pde & PG_PSE_MASK) {
314             /* 2 MB page */
315             page_size = 2048 * 1024;
316             pte = pde;
317         } else {
318             /* 4 KB page */
319             pte_addr = ((pde & PG_ADDRESS_MASK) +
320                         (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
321             page_size = 4096;
322             pte = x86_ldq_phys(cs, pte_addr);
323         }
324         if (!(pte & PG_PRESENT_MASK)) {
325             return -1;
326         }
327     } else {
328         uint32_t pde;
329 
330         /* page directory entry */
331         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
332         pde = x86_ldl_phys(cs, pde_addr);
333         if (!(pde & PG_PRESENT_MASK))
334             return -1;
335         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
336             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
337             page_size = 4096 * 1024;
338         } else {
339             /* page directory entry */
340             pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
341             pte = x86_ldl_phys(cs, pte_addr);
342             if (!(pte & PG_PRESENT_MASK)) {
343                 return -1;
344             }
345             page_size = 4096;
346         }
347         pte = pte & a20_mask;
348     }
349 
350 #ifdef TARGET_X86_64
351 out:
352 #endif
353     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
354     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
355     return pte | page_offset;
356 }
357 
358 typedef struct MCEInjectionParams {
359     Monitor *mon;
360     int bank;
361     uint64_t status;
362     uint64_t mcg_status;
363     uint64_t addr;
364     uint64_t misc;
365     int flags;
366 } MCEInjectionParams;
367 
368 static void emit_guest_memory_failure(MemoryFailureAction action, bool ar,
369                                       bool recursive)
370 {
371     MemoryFailureFlags mff = {.action_required = ar, .recursive = recursive};
372 
373     qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST, action,
374                                    &mff);
375 }
376 
377 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
378 {
379     MCEInjectionParams *params = data.host_ptr;
380     X86CPU *cpu = X86_CPU(cs);
381     CPUX86State *cenv = &cpu->env;
382     uint64_t *banks = cenv->mce_banks + 4 * params->bank;
383     g_autofree char *msg = NULL;
384     bool need_reset = false;
385     bool recursive;
386     bool ar = !!(params->status & MCI_STATUS_AR);
387 
388     cpu_synchronize_state(cs);
389     recursive = !!(cenv->mcg_status & MCG_STATUS_MCIP);
390 
391     /*
392      * If there is an MCE exception being processed, ignore this SRAO MCE
393      * unless unconditional injection was requested.
394      */
395     if (!(params->flags & MCE_INJECT_UNCOND_AO) && !ar && recursive) {
396         emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, ar, recursive);
397         return;
398     }
399 
400     if (params->status & MCI_STATUS_UC) {
401         /*
402          * if MSR_MCG_CTL is not all 1s, the uncorrected error
403          * reporting is disabled
404          */
405         if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
406             monitor_printf(params->mon,
407                            "CPU %d: Uncorrected error reporting disabled\n",
408                            cs->cpu_index);
409             return;
410         }
411 
412         /*
413          * if MSR_MCi_CTL is not all 1s, the uncorrected error
414          * reporting is disabled for the bank
415          */
416         if (banks[0] != ~(uint64_t)0) {
417             monitor_printf(params->mon,
418                            "CPU %d: Uncorrected error reporting disabled for"
419                            " bank %d\n",
420                            cs->cpu_index, params->bank);
421             return;
422         }
423 
424         if (!(cenv->cr[4] & CR4_MCE_MASK)) {
425             need_reset = true;
426             msg = g_strdup_printf("CPU %d: MCE capability is not enabled, "
427                                   "raising triple fault", cs->cpu_index);
428         } else if (recursive) {
429             need_reset = true;
430             msg = g_strdup_printf("CPU %d: Previous MCE still in progress, "
431                                   "raising triple fault", cs->cpu_index);
432         }
433 
434         if (need_reset) {
435             emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar,
436                                       recursive);
437             monitor_printf(params->mon, "%s", msg);
438             qemu_log_mask(CPU_LOG_RESET, "%s\n", msg);
439             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
440             return;
441         }
442 
443         if (banks[1] & MCI_STATUS_VAL) {
444             params->status |= MCI_STATUS_OVER;
445         }
446         banks[2] = params->addr;
447         banks[3] = params->misc;
448         cenv->mcg_status = params->mcg_status;
449         banks[1] = params->status;
450         cpu_interrupt(cs, CPU_INTERRUPT_MCE);
451     } else if (!(banks[1] & MCI_STATUS_VAL)
452                || !(banks[1] & MCI_STATUS_UC)) {
453         if (banks[1] & MCI_STATUS_VAL) {
454             params->status |= MCI_STATUS_OVER;
455         }
456         banks[2] = params->addr;
457         banks[3] = params->misc;
458         banks[1] = params->status;
459     } else {
460         banks[1] |= MCI_STATUS_OVER;
461     }
462 
463     emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT, ar, recursive);
464 }
465 
466 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
467                         uint64_t status, uint64_t mcg_status, uint64_t addr,
468                         uint64_t misc, int flags)
469 {
470     CPUState *cs = CPU(cpu);
471     CPUX86State *cenv = &cpu->env;
472     MCEInjectionParams params = {
473         .mon = mon,
474         .bank = bank,
475         .status = status,
476         .mcg_status = mcg_status,
477         .addr = addr,
478         .misc = misc,
479         .flags = flags,
480     };
481     unsigned bank_num = cenv->mcg_cap & 0xff;
482 
483     if (!cenv->mcg_cap) {
484         monitor_printf(mon, "MCE injection not supported\n");
485         return;
486     }
487     if (bank >= bank_num) {
488         monitor_printf(mon, "Invalid MCE bank number\n");
489         return;
490     }
491     if (!(status & MCI_STATUS_VAL)) {
492         monitor_printf(mon, "Invalid MCE status code\n");
493         return;
494     }
495     if ((flags & MCE_INJECT_BROADCAST)
496         && !cpu_x86_support_mca_broadcast(cenv)) {
497         monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
498         return;
499     }
500 
501     run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
502     if (flags & MCE_INJECT_BROADCAST) {
503         CPUState *other_cs;
504 
505         params.bank = 1;
506         params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
507         params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
508         params.addr = 0;
509         params.misc = 0;
510         CPU_FOREACH(other_cs) {
511             if (other_cs == cs) {
512                 continue;
513             }
514             run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
515         }
516     }
517 }
518 
519 static inline target_ulong get_memio_eip(CPUX86State *env)
520 {
521 #ifdef CONFIG_TCG
522     uint64_t data[TARGET_INSN_START_WORDS];
523     CPUState *cs = env_cpu(env);
524 
525     if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) {
526         return env->eip;
527     }
528 
529     /* Per x86_restore_state_to_opc. */
530     if (tcg_cflags_has(cs, CF_PCREL)) {
531         return (env->eip & TARGET_PAGE_MASK) | data[0];
532     } else {
533         return data[0] - env->segs[R_CS].base;
534     }
535 #else
536     qemu_build_not_reached();
537 #endif
538 }
539 
540 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
541 {
542     X86CPU *cpu = env_archcpu(env);
543     CPUState *cs = env_cpu(env);
544 
545     if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) {
546         env->tpr_access_type = access;
547 
548         cpu_interrupt(cs, CPU_INTERRUPT_TPR);
549     } else if (tcg_enabled()) {
550         target_ulong eip = get_memio_eip(env);
551 
552         apic_handle_tpr_access_report(cpu->apic_state, eip, access);
553     }
554 }
555 #endif /* !CONFIG_USER_ONLY */
556 
557 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
558                             target_ulong *base, unsigned int *limit,
559                             unsigned int *flags)
560 {
561     CPUState *cs = env_cpu(env);
562     SegmentCache *dt;
563     target_ulong ptr;
564     uint32_t e1, e2;
565     int index;
566 
567     if (selector & 0x4)
568         dt = &env->ldt;
569     else
570         dt = &env->gdt;
571     index = selector & ~7;
572     ptr = dt->base + index;
573     if ((index + 7) > dt->limit
574         || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
575         || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
576         return 0;
577 
578     *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
579     *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
580     if (e2 & DESC_G_MASK)
581         *limit = (*limit << 12) | 0xfff;
582     *flags = e2;
583 
584     return 1;
585 }
586 
587 void do_cpu_init(X86CPU *cpu)
588 {
589 #if !defined(CONFIG_USER_ONLY)
590     CPUState *cs = CPU(cpu);
591     CPUX86State *env = &cpu->env;
592     CPUX86State *save = g_new(CPUX86State, 1);
593     int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
594 
595     *save = *env;
596 
597     cpu_reset(cs);
598     cs->interrupt_request = sipi;
599     memcpy(&env->start_init_save, &save->start_init_save,
600            offsetof(CPUX86State, end_init_save) -
601            offsetof(CPUX86State, start_init_save));
602     g_free(save);
603 
604     if (kvm_enabled()) {
605         kvm_arch_do_init_vcpu(cpu);
606     }
607     apic_init_reset(cpu->apic_state);
608 #endif /* CONFIG_USER_ONLY */
609 }
610 
611 #ifndef CONFIG_USER_ONLY
612 
613 void do_cpu_sipi(X86CPU *cpu)
614 {
615     apic_sipi(cpu->apic_state);
616 }
617 
618 void cpu_load_efer(CPUX86State *env, uint64_t val)
619 {
620     env->efer = val;
621     env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
622     if (env->efer & MSR_EFER_LMA) {
623         env->hflags |= HF_LMA_MASK;
624     }
625     if (env->efer & MSR_EFER_SVME) {
626         env->hflags |= HF_SVME_MASK;
627     }
628 }
629 
630 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
631 {
632     X86CPU *cpu = X86_CPU(cs);
633     CPUX86State *env = &cpu->env;
634     MemTxAttrs attrs = cpu_get_mem_attrs(env);
635     AddressSpace *as = cpu_addressspace(cs, attrs);
636 
637     return address_space_ldub(as, addr, attrs, NULL);
638 }
639 
640 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
641 {
642     X86CPU *cpu = X86_CPU(cs);
643     CPUX86State *env = &cpu->env;
644     MemTxAttrs attrs = cpu_get_mem_attrs(env);
645     AddressSpace *as = cpu_addressspace(cs, attrs);
646 
647     return address_space_lduw(as, addr, attrs, NULL);
648 }
649 
650 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
651 {
652     X86CPU *cpu = X86_CPU(cs);
653     CPUX86State *env = &cpu->env;
654     MemTxAttrs attrs = cpu_get_mem_attrs(env);
655     AddressSpace *as = cpu_addressspace(cs, attrs);
656 
657     return address_space_ldl(as, addr, attrs, NULL);
658 }
659 
660 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
661 {
662     X86CPU *cpu = X86_CPU(cs);
663     CPUX86State *env = &cpu->env;
664     MemTxAttrs attrs = cpu_get_mem_attrs(env);
665     AddressSpace *as = cpu_addressspace(cs, attrs);
666 
667     return address_space_ldq(as, addr, attrs, NULL);
668 }
669 
670 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
671 {
672     X86CPU *cpu = X86_CPU(cs);
673     CPUX86State *env = &cpu->env;
674     MemTxAttrs attrs = cpu_get_mem_attrs(env);
675     AddressSpace *as = cpu_addressspace(cs, attrs);
676 
677     address_space_stb(as, addr, val, attrs, NULL);
678 }
679 
680 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
681 {
682     X86CPU *cpu = X86_CPU(cs);
683     CPUX86State *env = &cpu->env;
684     MemTxAttrs attrs = cpu_get_mem_attrs(env);
685     AddressSpace *as = cpu_addressspace(cs, attrs);
686 
687     address_space_stl_notdirty(as, addr, val, attrs, NULL);
688 }
689 
690 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
691 {
692     X86CPU *cpu = X86_CPU(cs);
693     CPUX86State *env = &cpu->env;
694     MemTxAttrs attrs = cpu_get_mem_attrs(env);
695     AddressSpace *as = cpu_addressspace(cs, attrs);
696 
697     address_space_stw(as, addr, val, attrs, NULL);
698 }
699 
700 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
701 {
702     X86CPU *cpu = X86_CPU(cs);
703     CPUX86State *env = &cpu->env;
704     MemTxAttrs attrs = cpu_get_mem_attrs(env);
705     AddressSpace *as = cpu_addressspace(cs, attrs);
706 
707     address_space_stl(as, addr, val, attrs, NULL);
708 }
709 
710 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
711 {
712     X86CPU *cpu = X86_CPU(cs);
713     CPUX86State *env = &cpu->env;
714     MemTxAttrs attrs = cpu_get_mem_attrs(env);
715     AddressSpace *as = cpu_addressspace(cs, attrs);
716 
717     address_space_stq(as, addr, val, attrs, NULL);
718 }
719 #endif
720