xref: /openbmc/qemu/target/i386/helper.c (revision eb85394f)
1 /*
2  *  i386 helpers (without register variable usage)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qapi/qapi-events-run-state.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "qemu/qemu-print.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/runstate.h"
27 #include "kvm_i386.h"
28 #ifndef CONFIG_USER_ONLY
29 #include "sysemu/tcg.h"
30 #include "sysemu/hw_accel.h"
31 #include "monitor/monitor.h"
32 #include "hw/i386/apic_internal.h"
33 #endif
34 
35 void cpu_sync_bndcs_hflags(CPUX86State *env)
36 {
37     uint32_t hflags = env->hflags;
38     uint32_t hflags2 = env->hflags2;
39     uint32_t bndcsr;
40 
41     if ((hflags & HF_CPL_MASK) == 3) {
42         bndcsr = env->bndcs_regs.cfgu;
43     } else {
44         bndcsr = env->msr_bndcfgs;
45     }
46 
47     if ((env->cr[4] & CR4_OSXSAVE_MASK)
48         && (env->xcr0 & XSTATE_BNDCSR_MASK)
49         && (bndcsr & BNDCFG_ENABLE)) {
50         hflags |= HF_MPX_EN_MASK;
51     } else {
52         hflags &= ~HF_MPX_EN_MASK;
53     }
54 
55     if (bndcsr & BNDCFG_BNDPRESERVE) {
56         hflags2 |= HF2_MPX_PR_MASK;
57     } else {
58         hflags2 &= ~HF2_MPX_PR_MASK;
59     }
60 
61     env->hflags = hflags;
62     env->hflags2 = hflags2;
63 }
64 
65 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
66 {
67     int cpuver = env->cpuid_version;
68 
69     if (family == NULL || model == NULL) {
70         return;
71     }
72 
73     *family = (cpuver >> 8) & 0x0f;
74     *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
75 }
76 
77 /* Broadcast MCA signal for processor version 06H_EH and above */
78 int cpu_x86_support_mca_broadcast(CPUX86State *env)
79 {
80     int family = 0;
81     int model = 0;
82 
83     cpu_x86_version(env, &family, &model);
84     if ((family == 6 && model >= 14) || family > 6) {
85         return 1;
86     }
87 
88     return 0;
89 }
90 
91 /***********************************************************/
92 /* x86 debug */
93 
94 static const char *cc_op_str[CC_OP_NB] = {
95     "DYNAMIC",
96     "EFLAGS",
97 
98     "MULB",
99     "MULW",
100     "MULL",
101     "MULQ",
102 
103     "ADDB",
104     "ADDW",
105     "ADDL",
106     "ADDQ",
107 
108     "ADCB",
109     "ADCW",
110     "ADCL",
111     "ADCQ",
112 
113     "SUBB",
114     "SUBW",
115     "SUBL",
116     "SUBQ",
117 
118     "SBBB",
119     "SBBW",
120     "SBBL",
121     "SBBQ",
122 
123     "LOGICB",
124     "LOGICW",
125     "LOGICL",
126     "LOGICQ",
127 
128     "INCB",
129     "INCW",
130     "INCL",
131     "INCQ",
132 
133     "DECB",
134     "DECW",
135     "DECL",
136     "DECQ",
137 
138     "SHLB",
139     "SHLW",
140     "SHLL",
141     "SHLQ",
142 
143     "SARB",
144     "SARW",
145     "SARL",
146     "SARQ",
147 
148     "BMILGB",
149     "BMILGW",
150     "BMILGL",
151     "BMILGQ",
152 
153     "ADCX",
154     "ADOX",
155     "ADCOX",
156 
157     "CLR",
158 };
159 
160 static void
161 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f,
162                        const char *name, struct SegmentCache *sc)
163 {
164 #ifdef TARGET_X86_64
165     if (env->hflags & HF_CS64_MASK) {
166         qemu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
167                      sc->selector, sc->base, sc->limit,
168                      sc->flags & 0x00ffff00);
169     } else
170 #endif
171     {
172         qemu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
173                      (uint32_t)sc->base, sc->limit,
174                      sc->flags & 0x00ffff00);
175     }
176 
177     if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
178         goto done;
179 
180     qemu_fprintf(f, " DPL=%d ",
181                  (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
182     if (sc->flags & DESC_S_MASK) {
183         if (sc->flags & DESC_CS_MASK) {
184             qemu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
185                          ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
186             qemu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
187                          (sc->flags & DESC_R_MASK) ? 'R' : '-');
188         } else {
189             qemu_fprintf(f, (sc->flags & DESC_B_MASK
190                              || env->hflags & HF_LMA_MASK)
191                          ? "DS  " : "DS16");
192             qemu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
193                          (sc->flags & DESC_W_MASK) ? 'W' : '-');
194         }
195         qemu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
196     } else {
197         static const char *sys_type_name[2][16] = {
198             { /* 32 bit mode */
199                 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
200                 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
201                 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
202                 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
203             },
204             { /* 64 bit mode */
205                 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
206                 "Reserved", "Reserved", "Reserved", "Reserved",
207                 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
208                 "Reserved", "IntGate64", "TrapGate64"
209             }
210         };
211         qemu_fprintf(f, "%s",
212                      sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
213                      [(sc->flags & DESC_TYPE_MASK) >> DESC_TYPE_SHIFT]);
214     }
215 done:
216     qemu_fprintf(f, "\n");
217 }
218 
219 #ifndef CONFIG_USER_ONLY
220 
221 /* ARRAY_SIZE check is not required because
222  * DeliveryMode(dm) has a size of 3 bit.
223  */
224 static inline const char *dm2str(uint32_t dm)
225 {
226     static const char *str[] = {
227         "Fixed",
228         "...",
229         "SMI",
230         "...",
231         "NMI",
232         "INIT",
233         "...",
234         "ExtINT"
235     };
236     return str[dm];
237 }
238 
239 static void dump_apic_lvt(const char *name, uint32_t lvt, bool is_timer)
240 {
241     uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
242     qemu_printf("%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
243                 name, lvt,
244                 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
245                 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
246                 lvt & APIC_LVT_MASKED ? "masked" : "",
247                 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
248                 !is_timer ?
249                     "" : lvt & APIC_LVT_TIMER_PERIODIC ?
250                             "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
251                                             "tsc-deadline" : "one-shot",
252                 dm2str(dm));
253     if (dm != APIC_DM_NMI) {
254         qemu_printf(" (vec %u)\n", lvt & APIC_VECTOR_MASK);
255     } else {
256         qemu_printf("\n");
257     }
258 }
259 
260 /* ARRAY_SIZE check is not required because
261  * destination shorthand has a size of 2 bit.
262  */
263 static inline const char *shorthand2str(uint32_t shorthand)
264 {
265     const char *str[] = {
266         "no-shorthand", "self", "all-self", "all"
267     };
268     return str[shorthand];
269 }
270 
271 static inline uint8_t divider_conf(uint32_t divide_conf)
272 {
273     uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
274 
275     return divide_val == 7 ? 1 : 2 << divide_val;
276 }
277 
278 static inline void mask2str(char *str, uint32_t val, uint8_t size)
279 {
280     while (size--) {
281         *str++ = (val >> size) & 1 ? '1' : '0';
282     }
283     *str = 0;
284 }
285 
286 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
287 
288 static void dump_apic_icr(APICCommonState *s, CPUX86State *env)
289 {
290     uint32_t icr = s->icr[0], icr2 = s->icr[1];
291     uint8_t dest_shorthand = \
292         (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
293     bool logical_mod = icr & APIC_ICR_DEST_MOD;
294     char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
295     uint32_t dest_field;
296     bool x2apic;
297 
298     qemu_printf("ICR\t 0x%08x %s %s %s %s\n",
299                 icr,
300                 logical_mod ? "logical" : "physical",
301                 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
302                 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
303                 shorthand2str(dest_shorthand));
304 
305     qemu_printf("ICR2\t 0x%08x", icr2);
306     if (dest_shorthand != 0) {
307         qemu_printf("\n");
308         return;
309     }
310     x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
311     dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
312 
313     if (!logical_mod) {
314         if (x2apic) {
315             qemu_printf(" cpu %u (X2APIC ID)\n", dest_field);
316         } else {
317             qemu_printf(" cpu %u (APIC ID)\n",
318                         dest_field & APIC_LOGDEST_XAPIC_ID);
319         }
320         return;
321     }
322 
323     if (s->dest_mode == 0xf) { /* flat mode */
324         mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
325         qemu_printf(" mask %s (APIC ID)\n", apic_id_str);
326     } else if (s->dest_mode == 0) { /* cluster mode */
327         if (x2apic) {
328             mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
329             qemu_printf(" cluster %u mask %s (X2APIC ID)\n",
330                         dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
331         } else {
332             mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
333             qemu_printf(" cluster %u mask %s (APIC ID)\n",
334                         dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
335         }
336     }
337 }
338 
339 static void dump_apic_interrupt(const char *name, uint32_t *ireg_tab,
340                                 uint32_t *tmr_tab)
341 {
342     int i, empty = true;
343 
344     qemu_printf("%s\t ", name);
345     for (i = 0; i < 256; i++) {
346         if (apic_get_bit(ireg_tab, i)) {
347             qemu_printf("%u%s ", i,
348                         apic_get_bit(tmr_tab, i) ? "(level)" : "");
349             empty = false;
350         }
351     }
352     qemu_printf("%s\n", empty ? "(none)" : "");
353 }
354 
355 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
356 {
357     X86CPU *cpu = X86_CPU(cs);
358     APICCommonState *s = APIC_COMMON(cpu->apic_state);
359     if (!s) {
360         qemu_printf("local apic state not available\n");
361         return;
362     }
363     uint32_t *lvt = s->lvt;
364 
365     qemu_printf("dumping local APIC state for CPU %-2u\n\n",
366                 CPU(cpu)->cpu_index);
367     dump_apic_lvt("LVT0", lvt[APIC_LVT_LINT0], false);
368     dump_apic_lvt("LVT1", lvt[APIC_LVT_LINT1], false);
369     dump_apic_lvt("LVTPC", lvt[APIC_LVT_PERFORM], false);
370     dump_apic_lvt("LVTERR", lvt[APIC_LVT_ERROR], false);
371     dump_apic_lvt("LVTTHMR", lvt[APIC_LVT_THERMAL], false);
372     dump_apic_lvt("LVTT", lvt[APIC_LVT_TIMER], true);
373 
374     qemu_printf("Timer\t DCR=0x%x (divide by %u) initial_count = %u"
375                 " current_count = %u\n",
376                 s->divide_conf & APIC_DCR_MASK,
377                 divider_conf(s->divide_conf),
378                 s->initial_count, apic_get_current_count(s));
379 
380     qemu_printf("SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
381                 s->spurious_vec,
382                 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
383                 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
384                 s->spurious_vec & APIC_VECTOR_MASK);
385 
386     dump_apic_icr(s, &cpu->env);
387 
388     qemu_printf("ESR\t 0x%08x\n", s->esr);
389 
390     dump_apic_interrupt("ISR", s->isr, s->tmr);
391     dump_apic_interrupt("IRR", s->irr, s->tmr);
392 
393     qemu_printf("\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
394                 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
395     if (s->dest_mode == 0) {
396         qemu_printf("(cluster %u: id %u)",
397                     s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
398                     s->log_dest & APIC_LOGDEST_XAPIC_ID);
399     }
400     qemu_printf(" PPR 0x%02x\n", apic_get_ppr(s));
401 }
402 #else
403 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
404 {
405 }
406 #endif /* !CONFIG_USER_ONLY */
407 
408 #define DUMP_CODE_BYTES_TOTAL    50
409 #define DUMP_CODE_BYTES_BACKWARD 20
410 
411 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
412 {
413     X86CPU *cpu = X86_CPU(cs);
414     CPUX86State *env = &cpu->env;
415     int eflags, i, nb;
416     char cc_op_name[32];
417     static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
418 
419     eflags = cpu_compute_eflags(env);
420 #ifdef TARGET_X86_64
421     if (env->hflags & HF_CS64_MASK) {
422         qemu_fprintf(f, "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
423                      "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
424                      "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
425                      "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
426                      "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
427                      env->regs[R_EAX],
428                      env->regs[R_EBX],
429                      env->regs[R_ECX],
430                      env->regs[R_EDX],
431                      env->regs[R_ESI],
432                      env->regs[R_EDI],
433                      env->regs[R_EBP],
434                      env->regs[R_ESP],
435                      env->regs[8],
436                      env->regs[9],
437                      env->regs[10],
438                      env->regs[11],
439                      env->regs[12],
440                      env->regs[13],
441                      env->regs[14],
442                      env->regs[15],
443                      env->eip, eflags,
444                      eflags & DF_MASK ? 'D' : '-',
445                      eflags & CC_O ? 'O' : '-',
446                      eflags & CC_S ? 'S' : '-',
447                      eflags & CC_Z ? 'Z' : '-',
448                      eflags & CC_A ? 'A' : '-',
449                      eflags & CC_P ? 'P' : '-',
450                      eflags & CC_C ? 'C' : '-',
451                      env->hflags & HF_CPL_MASK,
452                      (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
453                      (env->a20_mask >> 20) & 1,
454                      (env->hflags >> HF_SMM_SHIFT) & 1,
455                      cs->halted);
456     } else
457 #endif
458     {
459         qemu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
460                      "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
461                      "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
462                      (uint32_t)env->regs[R_EAX],
463                      (uint32_t)env->regs[R_EBX],
464                      (uint32_t)env->regs[R_ECX],
465                      (uint32_t)env->regs[R_EDX],
466                      (uint32_t)env->regs[R_ESI],
467                      (uint32_t)env->regs[R_EDI],
468                      (uint32_t)env->regs[R_EBP],
469                      (uint32_t)env->regs[R_ESP],
470                      (uint32_t)env->eip, eflags,
471                      eflags & DF_MASK ? 'D' : '-',
472                      eflags & CC_O ? 'O' : '-',
473                      eflags & CC_S ? 'S' : '-',
474                      eflags & CC_Z ? 'Z' : '-',
475                      eflags & CC_A ? 'A' : '-',
476                      eflags & CC_P ? 'P' : '-',
477                      eflags & CC_C ? 'C' : '-',
478                      env->hflags & HF_CPL_MASK,
479                      (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
480                      (env->a20_mask >> 20) & 1,
481                      (env->hflags >> HF_SMM_SHIFT) & 1,
482                      cs->halted);
483     }
484 
485     for(i = 0; i < 6; i++) {
486         cpu_x86_dump_seg_cache(env, f, seg_name[i], &env->segs[i]);
487     }
488     cpu_x86_dump_seg_cache(env, f, "LDT", &env->ldt);
489     cpu_x86_dump_seg_cache(env, f, "TR", &env->tr);
490 
491 #ifdef TARGET_X86_64
492     if (env->hflags & HF_LMA_MASK) {
493         qemu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
494                      env->gdt.base, env->gdt.limit);
495         qemu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
496                      env->idt.base, env->idt.limit);
497         qemu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
498                      (uint32_t)env->cr[0],
499                      env->cr[2],
500                      env->cr[3],
501                      (uint32_t)env->cr[4]);
502         for(i = 0; i < 4; i++)
503             qemu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
504         qemu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
505                      env->dr[6], env->dr[7]);
506     } else
507 #endif
508     {
509         qemu_fprintf(f, "GDT=     %08x %08x\n",
510                      (uint32_t)env->gdt.base, env->gdt.limit);
511         qemu_fprintf(f, "IDT=     %08x %08x\n",
512                      (uint32_t)env->idt.base, env->idt.limit);
513         qemu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
514                      (uint32_t)env->cr[0],
515                      (uint32_t)env->cr[2],
516                      (uint32_t)env->cr[3],
517                      (uint32_t)env->cr[4]);
518         for(i = 0; i < 4; i++) {
519             qemu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
520         }
521         qemu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
522                      env->dr[6], env->dr[7]);
523     }
524     if (flags & CPU_DUMP_CCOP) {
525         if ((unsigned)env->cc_op < CC_OP_NB)
526             snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
527         else
528             snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
529 #ifdef TARGET_X86_64
530         if (env->hflags & HF_CS64_MASK) {
531             qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
532                          env->cc_src, env->cc_dst,
533                          cc_op_name);
534         } else
535 #endif
536         {
537             qemu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
538                          (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
539                          cc_op_name);
540         }
541     }
542     qemu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
543     if (flags & CPU_DUMP_FPU) {
544         int fptag;
545         fptag = 0;
546         for(i = 0; i < 8; i++) {
547             fptag |= ((!env->fptags[i]) << i);
548         }
549         update_mxcsr_from_sse_status(env);
550         qemu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
551                      env->fpuc,
552                      (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
553                      env->fpstt,
554                      fptag,
555                      env->mxcsr);
556         for(i=0;i<8;i++) {
557             CPU_LDoubleU u;
558             u.d = env->fpregs[i].d;
559             qemu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
560                          i, u.l.lower, u.l.upper);
561             if ((i & 1) == 1)
562                 qemu_fprintf(f, "\n");
563             else
564                 qemu_fprintf(f, " ");
565         }
566         if (env->hflags & HF_CS64_MASK)
567             nb = 16;
568         else
569             nb = 8;
570         for(i=0;i<nb;i++) {
571             qemu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
572                          i,
573                          env->xmm_regs[i].ZMM_L(3),
574                          env->xmm_regs[i].ZMM_L(2),
575                          env->xmm_regs[i].ZMM_L(1),
576                          env->xmm_regs[i].ZMM_L(0));
577             if ((i & 1) == 1)
578                 qemu_fprintf(f, "\n");
579             else
580                 qemu_fprintf(f, " ");
581         }
582     }
583     if (flags & CPU_DUMP_CODE) {
584         target_ulong base = env->segs[R_CS].base + env->eip;
585         target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
586         uint8_t code;
587         char codestr[3];
588 
589         qemu_fprintf(f, "Code=");
590         for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
591             if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
592                 snprintf(codestr, sizeof(codestr), "%02x", code);
593             } else {
594                 snprintf(codestr, sizeof(codestr), "??");
595             }
596             qemu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
597                          i == offs ? "<" : "", codestr, i == offs ? ">" : "");
598         }
599         qemu_fprintf(f, "\n");
600     }
601 }
602 
603 /***********************************************************/
604 /* x86 mmu */
605 /* XXX: add PGE support */
606 
607 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
608 {
609     CPUX86State *env = &cpu->env;
610 
611     a20_state = (a20_state != 0);
612     if (a20_state != ((env->a20_mask >> 20) & 1)) {
613         CPUState *cs = CPU(cpu);
614 
615         qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
616         /* if the cpu is currently executing code, we must unlink it and
617            all the potentially executing TB */
618         cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
619 
620         /* when a20 is changed, all the MMU mappings are invalid, so
621            we must flush everything */
622         tlb_flush(cs);
623         env->a20_mask = ~(1 << 20) | (a20_state << 20);
624     }
625 }
626 
627 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
628 {
629     X86CPU *cpu = env_archcpu(env);
630     int pe_state;
631 
632     qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
633     if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
634         (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
635         tlb_flush(CPU(cpu));
636     }
637 
638 #ifdef TARGET_X86_64
639     if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
640         (env->efer & MSR_EFER_LME)) {
641         /* enter in long mode */
642         /* XXX: generate an exception */
643         if (!(env->cr[4] & CR4_PAE_MASK))
644             return;
645         env->efer |= MSR_EFER_LMA;
646         env->hflags |= HF_LMA_MASK;
647     } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
648                (env->efer & MSR_EFER_LMA)) {
649         /* exit long mode */
650         env->efer &= ~MSR_EFER_LMA;
651         env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
652         env->eip &= 0xffffffff;
653     }
654 #endif
655     env->cr[0] = new_cr0 | CR0_ET_MASK;
656 
657     /* update PE flag in hidden flags */
658     pe_state = (env->cr[0] & CR0_PE_MASK);
659     env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
660     /* ensure that ADDSEG is always set in real mode */
661     env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
662     /* update FPU flags */
663     env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
664         ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
665 }
666 
667 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
668    the PDPT */
669 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
670 {
671     env->cr[3] = new_cr3;
672     if (env->cr[0] & CR0_PG_MASK) {
673         qemu_log_mask(CPU_LOG_MMU,
674                         "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
675         tlb_flush(env_cpu(env));
676     }
677 }
678 
679 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
680 {
681     uint32_t hflags;
682 
683 #if defined(DEBUG_MMU)
684     printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
685 #endif
686     if ((new_cr4 ^ env->cr[4]) &
687         (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
688          CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
689         tlb_flush(env_cpu(env));
690     }
691 
692     /* Clear bits we're going to recompute.  */
693     hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
694 
695     /* SSE handling */
696     if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
697         new_cr4 &= ~CR4_OSFXSR_MASK;
698     }
699     if (new_cr4 & CR4_OSFXSR_MASK) {
700         hflags |= HF_OSFXSR_MASK;
701     }
702 
703     if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
704         new_cr4 &= ~CR4_SMAP_MASK;
705     }
706     if (new_cr4 & CR4_SMAP_MASK) {
707         hflags |= HF_SMAP_MASK;
708     }
709 
710     if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
711         new_cr4 &= ~CR4_PKE_MASK;
712     }
713 
714     env->cr[4] = new_cr4;
715     env->hflags = hflags;
716 
717     cpu_sync_bndcs_hflags(env);
718 }
719 
720 #if !defined(CONFIG_USER_ONLY)
721 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
722                                          MemTxAttrs *attrs)
723 {
724     X86CPU *cpu = X86_CPU(cs);
725     CPUX86State *env = &cpu->env;
726     target_ulong pde_addr, pte_addr;
727     uint64_t pte;
728     int32_t a20_mask;
729     uint32_t page_offset;
730     int page_size;
731 
732     *attrs = cpu_get_mem_attrs(env);
733 
734     a20_mask = x86_get_a20_mask(env);
735     if (!(env->cr[0] & CR0_PG_MASK)) {
736         pte = addr & a20_mask;
737         page_size = 4096;
738     } else if (env->cr[4] & CR4_PAE_MASK) {
739         target_ulong pdpe_addr;
740         uint64_t pde, pdpe;
741 
742 #ifdef TARGET_X86_64
743         if (env->hflags & HF_LMA_MASK) {
744             bool la57 = env->cr[4] & CR4_LA57_MASK;
745             uint64_t pml5e_addr, pml5e;
746             uint64_t pml4e_addr, pml4e;
747             int32_t sext;
748 
749             /* test virtual address sign extension */
750             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
751             if (sext != 0 && sext != -1) {
752                 return -1;
753             }
754 
755             if (la57) {
756                 pml5e_addr = ((env->cr[3] & ~0xfff) +
757                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
758                 pml5e = x86_ldq_phys(cs, pml5e_addr);
759                 if (!(pml5e & PG_PRESENT_MASK)) {
760                     return -1;
761                 }
762             } else {
763                 pml5e = env->cr[3];
764             }
765 
766             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
767                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
768             pml4e = x86_ldq_phys(cs, pml4e_addr);
769             if (!(pml4e & PG_PRESENT_MASK)) {
770                 return -1;
771             }
772             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
773                          (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
774             pdpe = x86_ldq_phys(cs, pdpe_addr);
775             if (!(pdpe & PG_PRESENT_MASK)) {
776                 return -1;
777             }
778             if (pdpe & PG_PSE_MASK) {
779                 page_size = 1024 * 1024 * 1024;
780                 pte = pdpe;
781                 goto out;
782             }
783 
784         } else
785 #endif
786         {
787             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
788                 a20_mask;
789             pdpe = x86_ldq_phys(cs, pdpe_addr);
790             if (!(pdpe & PG_PRESENT_MASK))
791                 return -1;
792         }
793 
794         pde_addr = ((pdpe & PG_ADDRESS_MASK) +
795                     (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
796         pde = x86_ldq_phys(cs, pde_addr);
797         if (!(pde & PG_PRESENT_MASK)) {
798             return -1;
799         }
800         if (pde & PG_PSE_MASK) {
801             /* 2 MB page */
802             page_size = 2048 * 1024;
803             pte = pde;
804         } else {
805             /* 4 KB page */
806             pte_addr = ((pde & PG_ADDRESS_MASK) +
807                         (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
808             page_size = 4096;
809             pte = x86_ldq_phys(cs, pte_addr);
810         }
811         if (!(pte & PG_PRESENT_MASK)) {
812             return -1;
813         }
814     } else {
815         uint32_t pde;
816 
817         /* page directory entry */
818         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
819         pde = x86_ldl_phys(cs, pde_addr);
820         if (!(pde & PG_PRESENT_MASK))
821             return -1;
822         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
823             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
824             page_size = 4096 * 1024;
825         } else {
826             /* page directory entry */
827             pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
828             pte = x86_ldl_phys(cs, pte_addr);
829             if (!(pte & PG_PRESENT_MASK)) {
830                 return -1;
831             }
832             page_size = 4096;
833         }
834         pte = pte & a20_mask;
835     }
836 
837 #ifdef TARGET_X86_64
838 out:
839 #endif
840     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
841     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
842     return pte | page_offset;
843 }
844 
845 typedef struct MCEInjectionParams {
846     Monitor *mon;
847     int bank;
848     uint64_t status;
849     uint64_t mcg_status;
850     uint64_t addr;
851     uint64_t misc;
852     int flags;
853 } MCEInjectionParams;
854 
855 static void emit_guest_memory_failure(MemoryFailureAction action, bool ar,
856                                       bool recursive)
857 {
858     MemoryFailureFlags mff = {.action_required = ar, .recursive = recursive};
859 
860     qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST, action,
861                                    &mff);
862 }
863 
864 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
865 {
866     MCEInjectionParams *params = data.host_ptr;
867     X86CPU *cpu = X86_CPU(cs);
868     CPUX86State *cenv = &cpu->env;
869     uint64_t *banks = cenv->mce_banks + 4 * params->bank;
870     g_autofree char *msg = NULL;
871     bool need_reset = false;
872     bool recursive;
873     bool ar = !!(params->status & MCI_STATUS_AR);
874 
875     cpu_synchronize_state(cs);
876     recursive = !!(cenv->mcg_status & MCG_STATUS_MCIP);
877 
878     /*
879      * If there is an MCE exception being processed, ignore this SRAO MCE
880      * unless unconditional injection was requested.
881      */
882     if (!(params->flags & MCE_INJECT_UNCOND_AO) && !ar && recursive) {
883         emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, ar, recursive);
884         return;
885     }
886 
887     if (params->status & MCI_STATUS_UC) {
888         /*
889          * if MSR_MCG_CTL is not all 1s, the uncorrected error
890          * reporting is disabled
891          */
892         if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
893             monitor_printf(params->mon,
894                            "CPU %d: Uncorrected error reporting disabled\n",
895                            cs->cpu_index);
896             return;
897         }
898 
899         /*
900          * if MSR_MCi_CTL is not all 1s, the uncorrected error
901          * reporting is disabled for the bank
902          */
903         if (banks[0] != ~(uint64_t)0) {
904             monitor_printf(params->mon,
905                            "CPU %d: Uncorrected error reporting disabled for"
906                            " bank %d\n",
907                            cs->cpu_index, params->bank);
908             return;
909         }
910 
911         if (recursive) {
912             need_reset = true;
913             msg = g_strdup_printf("CPU %d: Previous MCE still in progress, "
914                                   "raising triple fault", cs->cpu_index);
915         }
916 
917         if (!(cenv->cr[4] & CR4_MCE_MASK)) {
918             need_reset = true;
919             msg = g_strdup_printf("CPU %d: MCE capability is not enabled, "
920                                   "raising triple fault", cs->cpu_index);
921         }
922 
923         if (need_reset) {
924             emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar,
925                                       recursive);
926             monitor_printf(params->mon, "%s", msg);
927             qemu_log_mask(CPU_LOG_RESET, "%s\n", msg);
928             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
929             return;
930         }
931 
932         if (banks[1] & MCI_STATUS_VAL) {
933             params->status |= MCI_STATUS_OVER;
934         }
935         banks[2] = params->addr;
936         banks[3] = params->misc;
937         cenv->mcg_status = params->mcg_status;
938         banks[1] = params->status;
939         cpu_interrupt(cs, CPU_INTERRUPT_MCE);
940     } else if (!(banks[1] & MCI_STATUS_VAL)
941                || !(banks[1] & MCI_STATUS_UC)) {
942         if (banks[1] & MCI_STATUS_VAL) {
943             params->status |= MCI_STATUS_OVER;
944         }
945         banks[2] = params->addr;
946         banks[3] = params->misc;
947         banks[1] = params->status;
948     } else {
949         banks[1] |= MCI_STATUS_OVER;
950     }
951 
952     emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT, ar, recursive);
953 }
954 
955 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
956                         uint64_t status, uint64_t mcg_status, uint64_t addr,
957                         uint64_t misc, int flags)
958 {
959     CPUState *cs = CPU(cpu);
960     CPUX86State *cenv = &cpu->env;
961     MCEInjectionParams params = {
962         .mon = mon,
963         .bank = bank,
964         .status = status,
965         .mcg_status = mcg_status,
966         .addr = addr,
967         .misc = misc,
968         .flags = flags,
969     };
970     unsigned bank_num = cenv->mcg_cap & 0xff;
971 
972     if (!cenv->mcg_cap) {
973         monitor_printf(mon, "MCE injection not supported\n");
974         return;
975     }
976     if (bank >= bank_num) {
977         monitor_printf(mon, "Invalid MCE bank number\n");
978         return;
979     }
980     if (!(status & MCI_STATUS_VAL)) {
981         monitor_printf(mon, "Invalid MCE status code\n");
982         return;
983     }
984     if ((flags & MCE_INJECT_BROADCAST)
985         && !cpu_x86_support_mca_broadcast(cenv)) {
986         monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
987         return;
988     }
989 
990     run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
991     if (flags & MCE_INJECT_BROADCAST) {
992         CPUState *other_cs;
993 
994         params.bank = 1;
995         params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
996         params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
997         params.addr = 0;
998         params.misc = 0;
999         CPU_FOREACH(other_cs) {
1000             if (other_cs == cs) {
1001                 continue;
1002             }
1003             run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
1004         }
1005     }
1006 }
1007 
1008 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1009 {
1010     X86CPU *cpu = env_archcpu(env);
1011     CPUState *cs = env_cpu(env);
1012 
1013     if (kvm_enabled() || whpx_enabled()) {
1014         env->tpr_access_type = access;
1015 
1016         cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1017     } else if (tcg_enabled()) {
1018         cpu_restore_state(cs, cs->mem_io_pc, false);
1019 
1020         apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1021     }
1022 }
1023 #endif /* !CONFIG_USER_ONLY */
1024 
1025 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1026                             target_ulong *base, unsigned int *limit,
1027                             unsigned int *flags)
1028 {
1029     CPUState *cs = env_cpu(env);
1030     SegmentCache *dt;
1031     target_ulong ptr;
1032     uint32_t e1, e2;
1033     int index;
1034 
1035     if (selector & 0x4)
1036         dt = &env->ldt;
1037     else
1038         dt = &env->gdt;
1039     index = selector & ~7;
1040     ptr = dt->base + index;
1041     if ((index + 7) > dt->limit
1042         || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1043         || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1044         return 0;
1045 
1046     *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1047     *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1048     if (e2 & DESC_G_MASK)
1049         *limit = (*limit << 12) | 0xfff;
1050     *flags = e2;
1051 
1052     return 1;
1053 }
1054 
1055 #if !defined(CONFIG_USER_ONLY)
1056 void do_cpu_init(X86CPU *cpu)
1057 {
1058     CPUState *cs = CPU(cpu);
1059     CPUX86State *env = &cpu->env;
1060     CPUX86State *save = g_new(CPUX86State, 1);
1061     int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1062 
1063     *save = *env;
1064 
1065     cpu_reset(cs);
1066     cs->interrupt_request = sipi;
1067     memcpy(&env->start_init_save, &save->start_init_save,
1068            offsetof(CPUX86State, end_init_save) -
1069            offsetof(CPUX86State, start_init_save));
1070     g_free(save);
1071 
1072     if (kvm_enabled()) {
1073         kvm_arch_do_init_vcpu(cpu);
1074     }
1075     apic_init_reset(cpu->apic_state);
1076 }
1077 
1078 void do_cpu_sipi(X86CPU *cpu)
1079 {
1080     apic_sipi(cpu->apic_state);
1081 }
1082 #else
1083 void do_cpu_init(X86CPU *cpu)
1084 {
1085 }
1086 void do_cpu_sipi(X86CPU *cpu)
1087 {
1088 }
1089 #endif
1090 
1091 /* Frob eflags into and out of the CPU temporary format.  */
1092 
1093 void x86_cpu_exec_enter(CPUState *cs)
1094 {
1095     X86CPU *cpu = X86_CPU(cs);
1096     CPUX86State *env = &cpu->env;
1097 
1098     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1099     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1100     CC_OP = CC_OP_EFLAGS;
1101     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1102 }
1103 
1104 void x86_cpu_exec_exit(CPUState *cs)
1105 {
1106     X86CPU *cpu = X86_CPU(cs);
1107     CPUX86State *env = &cpu->env;
1108 
1109     env->eflags = cpu_compute_eflags(env);
1110 }
1111 
1112 #ifndef CONFIG_USER_ONLY
1113 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1114 {
1115     X86CPU *cpu = X86_CPU(cs);
1116     CPUX86State *env = &cpu->env;
1117     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1118     AddressSpace *as = cpu_addressspace(cs, attrs);
1119 
1120     return address_space_ldub(as, addr, attrs, NULL);
1121 }
1122 
1123 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1124 {
1125     X86CPU *cpu = X86_CPU(cs);
1126     CPUX86State *env = &cpu->env;
1127     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1128     AddressSpace *as = cpu_addressspace(cs, attrs);
1129 
1130     return address_space_lduw(as, addr, attrs, NULL);
1131 }
1132 
1133 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1134 {
1135     X86CPU *cpu = X86_CPU(cs);
1136     CPUX86State *env = &cpu->env;
1137     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1138     AddressSpace *as = cpu_addressspace(cs, attrs);
1139 
1140     return address_space_ldl(as, addr, attrs, NULL);
1141 }
1142 
1143 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1144 {
1145     X86CPU *cpu = X86_CPU(cs);
1146     CPUX86State *env = &cpu->env;
1147     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1148     AddressSpace *as = cpu_addressspace(cs, attrs);
1149 
1150     return address_space_ldq(as, addr, attrs, NULL);
1151 }
1152 
1153 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1154 {
1155     X86CPU *cpu = X86_CPU(cs);
1156     CPUX86State *env = &cpu->env;
1157     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1158     AddressSpace *as = cpu_addressspace(cs, attrs);
1159 
1160     address_space_stb(as, addr, val, attrs, NULL);
1161 }
1162 
1163 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1164 {
1165     X86CPU *cpu = X86_CPU(cs);
1166     CPUX86State *env = &cpu->env;
1167     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1168     AddressSpace *as = cpu_addressspace(cs, attrs);
1169 
1170     address_space_stl_notdirty(as, addr, val, attrs, NULL);
1171 }
1172 
1173 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1174 {
1175     X86CPU *cpu = X86_CPU(cs);
1176     CPUX86State *env = &cpu->env;
1177     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1178     AddressSpace *as = cpu_addressspace(cs, attrs);
1179 
1180     address_space_stw(as, addr, val, attrs, NULL);
1181 }
1182 
1183 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1184 {
1185     X86CPU *cpu = X86_CPU(cs);
1186     CPUX86State *env = &cpu->env;
1187     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1188     AddressSpace *as = cpu_addressspace(cs, attrs);
1189 
1190     address_space_stl(as, addr, val, attrs, NULL);
1191 }
1192 
1193 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1194 {
1195     X86CPU *cpu = X86_CPU(cs);
1196     CPUX86State *env = &cpu->env;
1197     MemTxAttrs attrs = cpu_get_mem_attrs(env);
1198     AddressSpace *as = cpu_addressspace(cs, attrs);
1199 
1200     address_space_stq(as, addr, val, attrs, NULL);
1201 }
1202 #endif
1203