xref: /openbmc/qemu/target/i386/helper.c (revision 0d04c4c9)
1 /*
2  *  i386 helpers (without register variable usage)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qapi/qapi-events-run-state.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/runstate.h"
25 #include "kvm/kvm_i386.h"
26 #ifndef CONFIG_USER_ONLY
27 #include "sysemu/hw_accel.h"
28 #include "monitor/monitor.h"
29 #endif
30 
31 void cpu_sync_bndcs_hflags(CPUX86State *env)
32 {
33     uint32_t hflags = env->hflags;
34     uint32_t hflags2 = env->hflags2;
35     uint32_t bndcsr;
36 
37     if ((hflags & HF_CPL_MASK) == 3) {
38         bndcsr = env->bndcs_regs.cfgu;
39     } else {
40         bndcsr = env->msr_bndcfgs;
41     }
42 
43     if ((env->cr[4] & CR4_OSXSAVE_MASK)
44         && (env->xcr0 & XSTATE_BNDCSR_MASK)
45         && (bndcsr & BNDCFG_ENABLE)) {
46         hflags |= HF_MPX_EN_MASK;
47     } else {
48         hflags &= ~HF_MPX_EN_MASK;
49     }
50 
51     if (bndcsr & BNDCFG_BNDPRESERVE) {
52         hflags2 |= HF2_MPX_PR_MASK;
53     } else {
54         hflags2 &= ~HF2_MPX_PR_MASK;
55     }
56 
57     env->hflags = hflags;
58     env->hflags2 = hflags2;
59 }
60 
61 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
62 {
63     int cpuver = env->cpuid_version;
64 
65     if (family == NULL || model == NULL) {
66         return;
67     }
68 
69     *family = (cpuver >> 8) & 0x0f;
70     *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
71 }
72 
73 /* Broadcast MCA signal for processor version 06H_EH and above */
74 int cpu_x86_support_mca_broadcast(CPUX86State *env)
75 {
76     int family = 0;
77     int model = 0;
78 
79     cpu_x86_version(env, &family, &model);
80     if ((family == 6 && model >= 14) || family > 6) {
81         return 1;
82     }
83 
84     return 0;
85 }
86 
87 /***********************************************************/
88 /* x86 mmu */
89 /* XXX: add PGE support */
90 
91 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
92 {
93     CPUX86State *env = &cpu->env;
94 
95     a20_state = (a20_state != 0);
96     if (a20_state != ((env->a20_mask >> 20) & 1)) {
97         CPUState *cs = CPU(cpu);
98 
99         qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
100         /* if the cpu is currently executing code, we must unlink it and
101            all the potentially executing TB */
102         cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
103 
104         /* when a20 is changed, all the MMU mappings are invalid, so
105            we must flush everything */
106         tlb_flush(cs);
107         env->a20_mask = ~(1 << 20) | (a20_state << 20);
108     }
109 }
110 
111 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
112 {
113     X86CPU *cpu = env_archcpu(env);
114     int pe_state;
115 
116     qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
117     if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
118         (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
119         tlb_flush(CPU(cpu));
120     }
121 
122 #ifdef TARGET_X86_64
123     if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
124         (env->efer & MSR_EFER_LME)) {
125         /* enter in long mode */
126         /* XXX: generate an exception */
127         if (!(env->cr[4] & CR4_PAE_MASK))
128             return;
129         env->efer |= MSR_EFER_LMA;
130         env->hflags |= HF_LMA_MASK;
131     } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
132                (env->efer & MSR_EFER_LMA)) {
133         /* exit long mode */
134         env->efer &= ~MSR_EFER_LMA;
135         env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
136         env->eip &= 0xffffffff;
137     }
138 #endif
139     env->cr[0] = new_cr0 | CR0_ET_MASK;
140 
141     /* update PE flag in hidden flags */
142     pe_state = (env->cr[0] & CR0_PE_MASK);
143     env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
144     /* ensure that ADDSEG is always set in real mode */
145     env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
146     /* update FPU flags */
147     env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
148         ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
149 }
150 
151 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
152    the PDPT */
153 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
154 {
155     env->cr[3] = new_cr3;
156     if (env->cr[0] & CR0_PG_MASK) {
157         qemu_log_mask(CPU_LOG_MMU,
158                         "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
159         tlb_flush(env_cpu(env));
160     }
161 }
162 
163 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
164 {
165     uint32_t hflags;
166 
167 #if defined(DEBUG_MMU)
168     printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
169 #endif
170     if ((new_cr4 ^ env->cr[4]) &
171         (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
172          CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
173         tlb_flush(env_cpu(env));
174     }
175 
176     /* Clear bits we're going to recompute.  */
177     hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK | HF_UMIP_MASK);
178 
179     /* SSE handling */
180     if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
181         new_cr4 &= ~CR4_OSFXSR_MASK;
182     }
183     if (new_cr4 & CR4_OSFXSR_MASK) {
184         hflags |= HF_OSFXSR_MASK;
185     }
186 
187     if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
188         new_cr4 &= ~CR4_SMAP_MASK;
189     }
190     if (new_cr4 & CR4_SMAP_MASK) {
191         hflags |= HF_SMAP_MASK;
192     }
193     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
194         new_cr4 &= ~CR4_UMIP_MASK;
195     }
196     if (new_cr4 & CR4_UMIP_MASK) {
197         hflags |= HF_UMIP_MASK;
198     }
199 
200     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
201         new_cr4 &= ~CR4_PKE_MASK;
202     }
203     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
204         new_cr4 &= ~CR4_PKS_MASK;
205     }
206 
207     env->cr[4] = new_cr4;
208     env->hflags = hflags;
209 
210     cpu_sync_bndcs_hflags(env);
211 }
212 
213 #if !defined(CONFIG_USER_ONLY)
214 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
215                                          MemTxAttrs *attrs)
216 {
217     X86CPU *cpu = X86_CPU(cs);
218     CPUX86State *env = &cpu->env;
219     target_ulong pde_addr, pte_addr;
220     uint64_t pte;
221     int32_t a20_mask;
222     uint32_t page_offset;
223     int page_size;
224 
225     *attrs = cpu_get_mem_attrs(env);
226 
227     a20_mask = x86_get_a20_mask(env);
228     if (!(env->cr[0] & CR0_PG_MASK)) {
229         pte = addr & a20_mask;
230         page_size = 4096;
231     } else if (env->cr[4] & CR4_PAE_MASK) {
232         target_ulong pdpe_addr;
233         uint64_t pde, pdpe;
234 
235 #ifdef TARGET_X86_64
236         if (env->hflags & HF_LMA_MASK) {
237             bool la57 = env->cr[4] & CR4_LA57_MASK;
238             uint64_t pml5e_addr, pml5e;
239             uint64_t pml4e_addr, pml4e;
240             int32_t sext;
241 
242             /* test virtual address sign extension */
243             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
244             if (sext != 0 && sext != -1) {
245                 return -1;
246             }
247 
248             if (la57) {
249                 pml5e_addr = ((env->cr[3] & ~0xfff) +
250                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
251                 pml5e = x86_ldq_phys(cs, pml5e_addr);
252                 if (!(pml5e & PG_PRESENT_MASK)) {
253                     return -1;
254                 }
255             } else {
256                 pml5e = env->cr[3];
257             }
258 
259             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
260                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
261             pml4e = x86_ldq_phys(cs, pml4e_addr);
262             if (!(pml4e & PG_PRESENT_MASK)) {
263                 return -1;
264             }
265             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
266                          (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
267             pdpe = x86_ldq_phys(cs, pdpe_addr);
268             if (!(pdpe & PG_PRESENT_MASK)) {
269                 return -1;
270             }
271             if (pdpe & PG_PSE_MASK) {
272                 page_size = 1024 * 1024 * 1024;
273                 pte = pdpe;
274                 goto out;
275             }
276 
277         } else
278 #endif
279         {
280             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
281                 a20_mask;
282             pdpe = x86_ldq_phys(cs, pdpe_addr);
283             if (!(pdpe & PG_PRESENT_MASK))
284                 return -1;
285         }
286 
287         pde_addr = ((pdpe & PG_ADDRESS_MASK) +
288                     (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
289         pde = x86_ldq_phys(cs, pde_addr);
290         if (!(pde & PG_PRESENT_MASK)) {
291             return -1;
292         }
293         if (pde & PG_PSE_MASK) {
294             /* 2 MB page */
295             page_size = 2048 * 1024;
296             pte = pde;
297         } else {
298             /* 4 KB page */
299             pte_addr = ((pde & PG_ADDRESS_MASK) +
300                         (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
301             page_size = 4096;
302             pte = x86_ldq_phys(cs, pte_addr);
303         }
304         if (!(pte & PG_PRESENT_MASK)) {
305             return -1;
306         }
307     } else {
308         uint32_t pde;
309 
310         /* page directory entry */
311         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
312         pde = x86_ldl_phys(cs, pde_addr);
313         if (!(pde & PG_PRESENT_MASK))
314             return -1;
315         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
316             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
317             page_size = 4096 * 1024;
318         } else {
319             /* page directory entry */
320             pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
321             pte = x86_ldl_phys(cs, pte_addr);
322             if (!(pte & PG_PRESENT_MASK)) {
323                 return -1;
324             }
325             page_size = 4096;
326         }
327         pte = pte & a20_mask;
328     }
329 
330 #ifdef TARGET_X86_64
331 out:
332 #endif
333     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
334     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
335     return pte | page_offset;
336 }
337 
338 typedef struct MCEInjectionParams {
339     Monitor *mon;
340     int bank;
341     uint64_t status;
342     uint64_t mcg_status;
343     uint64_t addr;
344     uint64_t misc;
345     int flags;
346 } MCEInjectionParams;
347 
348 static void emit_guest_memory_failure(MemoryFailureAction action, bool ar,
349                                       bool recursive)
350 {
351     MemoryFailureFlags mff = {.action_required = ar, .recursive = recursive};
352 
353     qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST, action,
354                                    &mff);
355 }
356 
357 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
358 {
359     MCEInjectionParams *params = data.host_ptr;
360     X86CPU *cpu = X86_CPU(cs);
361     CPUX86State *cenv = &cpu->env;
362     uint64_t *banks = cenv->mce_banks + 4 * params->bank;
363     g_autofree char *msg = NULL;
364     bool need_reset = false;
365     bool recursive;
366     bool ar = !!(params->status & MCI_STATUS_AR);
367 
368     cpu_synchronize_state(cs);
369     recursive = !!(cenv->mcg_status & MCG_STATUS_MCIP);
370 
371     /*
372      * If there is an MCE exception being processed, ignore this SRAO MCE
373      * unless unconditional injection was requested.
374      */
375     if (!(params->flags & MCE_INJECT_UNCOND_AO) && !ar && recursive) {
376         emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, ar, recursive);
377         return;
378     }
379 
380     if (params->status & MCI_STATUS_UC) {
381         /*
382          * if MSR_MCG_CTL is not all 1s, the uncorrected error
383          * reporting is disabled
384          */
385         if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
386             monitor_printf(params->mon,
387                            "CPU %d: Uncorrected error reporting disabled\n",
388                            cs->cpu_index);
389             return;
390         }
391 
392         /*
393          * if MSR_MCi_CTL is not all 1s, the uncorrected error
394          * reporting is disabled for the bank
395          */
396         if (banks[0] != ~(uint64_t)0) {
397             monitor_printf(params->mon,
398                            "CPU %d: Uncorrected error reporting disabled for"
399                            " bank %d\n",
400                            cs->cpu_index, params->bank);
401             return;
402         }
403 
404         if (!(cenv->cr[4] & CR4_MCE_MASK)) {
405             need_reset = true;
406             msg = g_strdup_printf("CPU %d: MCE capability is not enabled, "
407                                   "raising triple fault", cs->cpu_index);
408         } else if (recursive) {
409             need_reset = true;
410             msg = g_strdup_printf("CPU %d: Previous MCE still in progress, "
411                                   "raising triple fault", cs->cpu_index);
412         }
413 
414         if (need_reset) {
415             emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar,
416                                       recursive);
417             monitor_printf(params->mon, "%s", msg);
418             qemu_log_mask(CPU_LOG_RESET, "%s\n", msg);
419             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
420             return;
421         }
422 
423         if (banks[1] & MCI_STATUS_VAL) {
424             params->status |= MCI_STATUS_OVER;
425         }
426         banks[2] = params->addr;
427         banks[3] = params->misc;
428         cenv->mcg_status = params->mcg_status;
429         banks[1] = params->status;
430         cpu_interrupt(cs, CPU_INTERRUPT_MCE);
431     } else if (!(banks[1] & MCI_STATUS_VAL)
432                || !(banks[1] & MCI_STATUS_UC)) {
433         if (banks[1] & MCI_STATUS_VAL) {
434             params->status |= MCI_STATUS_OVER;
435         }
436         banks[2] = params->addr;
437         banks[3] = params->misc;
438         banks[1] = params->status;
439     } else {
440         banks[1] |= MCI_STATUS_OVER;
441     }
442 
443     emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT, ar, recursive);
444 }
445 
446 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
447                         uint64_t status, uint64_t mcg_status, uint64_t addr,
448                         uint64_t misc, int flags)
449 {
450     CPUState *cs = CPU(cpu);
451     CPUX86State *cenv = &cpu->env;
452     MCEInjectionParams params = {
453         .mon = mon,
454         .bank = bank,
455         .status = status,
456         .mcg_status = mcg_status,
457         .addr = addr,
458         .misc = misc,
459         .flags = flags,
460     };
461     unsigned bank_num = cenv->mcg_cap & 0xff;
462 
463     if (!cenv->mcg_cap) {
464         monitor_printf(mon, "MCE injection not supported\n");
465         return;
466     }
467     if (bank >= bank_num) {
468         monitor_printf(mon, "Invalid MCE bank number\n");
469         return;
470     }
471     if (!(status & MCI_STATUS_VAL)) {
472         monitor_printf(mon, "Invalid MCE status code\n");
473         return;
474     }
475     if ((flags & MCE_INJECT_BROADCAST)
476         && !cpu_x86_support_mca_broadcast(cenv)) {
477         monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
478         return;
479     }
480 
481     run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
482     if (flags & MCE_INJECT_BROADCAST) {
483         CPUState *other_cs;
484 
485         params.bank = 1;
486         params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
487         params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
488         params.addr = 0;
489         params.misc = 0;
490         CPU_FOREACH(other_cs) {
491             if (other_cs == cs) {
492                 continue;
493             }
494             run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
495         }
496     }
497 }
498 
499 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
500 {
501     X86CPU *cpu = env_archcpu(env);
502     CPUState *cs = env_cpu(env);
503 
504     if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) {
505         env->tpr_access_type = access;
506 
507         cpu_interrupt(cs, CPU_INTERRUPT_TPR);
508     } else if (tcg_enabled()) {
509         cpu_restore_state(cs, cs->mem_io_pc, false);
510 
511         apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
512     }
513 }
514 #endif /* !CONFIG_USER_ONLY */
515 
516 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
517                             target_ulong *base, unsigned int *limit,
518                             unsigned int *flags)
519 {
520     CPUState *cs = env_cpu(env);
521     SegmentCache *dt;
522     target_ulong ptr;
523     uint32_t e1, e2;
524     int index;
525 
526     if (selector & 0x4)
527         dt = &env->ldt;
528     else
529         dt = &env->gdt;
530     index = selector & ~7;
531     ptr = dt->base + index;
532     if ((index + 7) > dt->limit
533         || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
534         || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
535         return 0;
536 
537     *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
538     *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
539     if (e2 & DESC_G_MASK)
540         *limit = (*limit << 12) | 0xfff;
541     *flags = e2;
542 
543     return 1;
544 }
545 
546 #if !defined(CONFIG_USER_ONLY)
547 void do_cpu_init(X86CPU *cpu)
548 {
549     CPUState *cs = CPU(cpu);
550     CPUX86State *env = &cpu->env;
551     CPUX86State *save = g_new(CPUX86State, 1);
552     int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
553 
554     *save = *env;
555 
556     cpu_reset(cs);
557     cs->interrupt_request = sipi;
558     memcpy(&env->start_init_save, &save->start_init_save,
559            offsetof(CPUX86State, end_init_save) -
560            offsetof(CPUX86State, start_init_save));
561     g_free(save);
562 
563     if (kvm_enabled()) {
564         kvm_arch_do_init_vcpu(cpu);
565     }
566     apic_init_reset(cpu->apic_state);
567 }
568 
569 void do_cpu_sipi(X86CPU *cpu)
570 {
571     apic_sipi(cpu->apic_state);
572 }
573 #else
574 void do_cpu_init(X86CPU *cpu)
575 {
576 }
577 void do_cpu_sipi(X86CPU *cpu)
578 {
579 }
580 #endif
581 
582 #ifndef CONFIG_USER_ONLY
583 
584 void cpu_load_efer(CPUX86State *env, uint64_t val)
585 {
586     env->efer = val;
587     env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
588     if (env->efer & MSR_EFER_LMA) {
589         env->hflags |= HF_LMA_MASK;
590     }
591     if (env->efer & MSR_EFER_SVME) {
592         env->hflags |= HF_SVME_MASK;
593     }
594 }
595 
596 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
597 {
598     X86CPU *cpu = X86_CPU(cs);
599     CPUX86State *env = &cpu->env;
600     MemTxAttrs attrs = cpu_get_mem_attrs(env);
601     AddressSpace *as = cpu_addressspace(cs, attrs);
602 
603     return address_space_ldub(as, addr, attrs, NULL);
604 }
605 
606 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
607 {
608     X86CPU *cpu = X86_CPU(cs);
609     CPUX86State *env = &cpu->env;
610     MemTxAttrs attrs = cpu_get_mem_attrs(env);
611     AddressSpace *as = cpu_addressspace(cs, attrs);
612 
613     return address_space_lduw(as, addr, attrs, NULL);
614 }
615 
616 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
617 {
618     X86CPU *cpu = X86_CPU(cs);
619     CPUX86State *env = &cpu->env;
620     MemTxAttrs attrs = cpu_get_mem_attrs(env);
621     AddressSpace *as = cpu_addressspace(cs, attrs);
622 
623     return address_space_ldl(as, addr, attrs, NULL);
624 }
625 
626 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
627 {
628     X86CPU *cpu = X86_CPU(cs);
629     CPUX86State *env = &cpu->env;
630     MemTxAttrs attrs = cpu_get_mem_attrs(env);
631     AddressSpace *as = cpu_addressspace(cs, attrs);
632 
633     return address_space_ldq(as, addr, attrs, NULL);
634 }
635 
636 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
637 {
638     X86CPU *cpu = X86_CPU(cs);
639     CPUX86State *env = &cpu->env;
640     MemTxAttrs attrs = cpu_get_mem_attrs(env);
641     AddressSpace *as = cpu_addressspace(cs, attrs);
642 
643     address_space_stb(as, addr, val, attrs, NULL);
644 }
645 
646 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
647 {
648     X86CPU *cpu = X86_CPU(cs);
649     CPUX86State *env = &cpu->env;
650     MemTxAttrs attrs = cpu_get_mem_attrs(env);
651     AddressSpace *as = cpu_addressspace(cs, attrs);
652 
653     address_space_stl_notdirty(as, addr, val, attrs, NULL);
654 }
655 
656 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
657 {
658     X86CPU *cpu = X86_CPU(cs);
659     CPUX86State *env = &cpu->env;
660     MemTxAttrs attrs = cpu_get_mem_attrs(env);
661     AddressSpace *as = cpu_addressspace(cs, attrs);
662 
663     address_space_stw(as, addr, val, attrs, NULL);
664 }
665 
666 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
667 {
668     X86CPU *cpu = X86_CPU(cs);
669     CPUX86State *env = &cpu->env;
670     MemTxAttrs attrs = cpu_get_mem_attrs(env);
671     AddressSpace *as = cpu_addressspace(cs, attrs);
672 
673     address_space_stl(as, addr, val, attrs, NULL);
674 }
675 
676 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
677 {
678     X86CPU *cpu = X86_CPU(cs);
679     CPUX86State *env = &cpu->env;
680     MemTxAttrs attrs = cpu_get_mem_attrs(env);
681     AddressSpace *as = cpu_addressspace(cs, attrs);
682 
683     address_space_stq(as, addr, val, attrs, NULL);
684 }
685 #endif
686