xref: /openbmc/qemu/target/i386/monitor.c (revision 62aa1d88)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "cpu.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "qapi/qmp/qdict.h"
30 #include "hw/i386/pc.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sev.h"
33 #include "hmp.h"
34 #include "qapi/error.h"
35 #include "sev_i386.h"
36 #include "qapi/qapi-commands-misc.h"
37 
38 /* Perform linear address sign extension */
39 static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
40 {
41 #ifdef TARGET_X86_64
42     if (env->cr[4] & CR4_LA57_MASK) {
43         if (addr & (1ULL << 56)) {
44             addr |= (hwaddr)-(1LL << 57);
45         }
46     } else {
47         if (addr & (1ULL << 47)) {
48             addr |= (hwaddr)-(1LL << 48);
49         }
50     }
51 #endif
52     return addr;
53 }
54 
55 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
56                       hwaddr pte, hwaddr mask)
57 {
58     addr = addr_canonical(env, addr);
59 
60     monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
61                    " %c%c%c%c%c%c%c%c%c\n",
62                    addr,
63                    pte & mask,
64                    pte & PG_NX_MASK ? 'X' : '-',
65                    pte & PG_GLOBAL_MASK ? 'G' : '-',
66                    pte & PG_PSE_MASK ? 'P' : '-',
67                    pte & PG_DIRTY_MASK ? 'D' : '-',
68                    pte & PG_ACCESSED_MASK ? 'A' : '-',
69                    pte & PG_PCD_MASK ? 'C' : '-',
70                    pte & PG_PWT_MASK ? 'T' : '-',
71                    pte & PG_USER_MASK ? 'U' : '-',
72                    pte & PG_RW_MASK ? 'W' : '-');
73 }
74 
75 static void tlb_info_32(Monitor *mon, CPUArchState *env)
76 {
77     unsigned int l1, l2;
78     uint32_t pgd, pde, pte;
79 
80     pgd = env->cr[3] & ~0xfff;
81     for(l1 = 0; l1 < 1024; l1++) {
82         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
83         pde = le32_to_cpu(pde);
84         if (pde & PG_PRESENT_MASK) {
85             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
86                 /* 4M pages */
87                 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
88             } else {
89                 for(l2 = 0; l2 < 1024; l2++) {
90                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
91                     pte = le32_to_cpu(pte);
92                     if (pte & PG_PRESENT_MASK) {
93                         print_pte(mon, env, (l1 << 22) + (l2 << 12),
94                                   pte & ~PG_PSE_MASK,
95                                   ~0xfff);
96                     }
97                 }
98             }
99         }
100     }
101 }
102 
103 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
104 {
105     unsigned int l1, l2, l3;
106     uint64_t pdpe, pde, pte;
107     uint64_t pdp_addr, pd_addr, pt_addr;
108 
109     pdp_addr = env->cr[3] & ~0x1f;
110     for (l1 = 0; l1 < 4; l1++) {
111         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
112         pdpe = le64_to_cpu(pdpe);
113         if (pdpe & PG_PRESENT_MASK) {
114             pd_addr = pdpe & 0x3fffffffff000ULL;
115             for (l2 = 0; l2 < 512; l2++) {
116                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
117                 pde = le64_to_cpu(pde);
118                 if (pde & PG_PRESENT_MASK) {
119                     if (pde & PG_PSE_MASK) {
120                         /* 2M pages with PAE, CR4.PSE is ignored */
121                         print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
122                                   ~((hwaddr)(1 << 20) - 1));
123                     } else {
124                         pt_addr = pde & 0x3fffffffff000ULL;
125                         for (l3 = 0; l3 < 512; l3++) {
126                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
127                             pte = le64_to_cpu(pte);
128                             if (pte & PG_PRESENT_MASK) {
129                                 print_pte(mon, env, (l1 << 30) + (l2 << 21)
130                                           + (l3 << 12),
131                                           pte & ~PG_PSE_MASK,
132                                           ~(hwaddr)0xfff);
133                             }
134                         }
135                     }
136                 }
137             }
138         }
139     }
140 }
141 
142 #ifdef TARGET_X86_64
143 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
144         uint64_t l0, uint64_t pml4_addr)
145 {
146     uint64_t l1, l2, l3, l4;
147     uint64_t pml4e, pdpe, pde, pte;
148     uint64_t pdp_addr, pd_addr, pt_addr;
149 
150     for (l1 = 0; l1 < 512; l1++) {
151         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
152         pml4e = le64_to_cpu(pml4e);
153         if (!(pml4e & PG_PRESENT_MASK)) {
154             continue;
155         }
156 
157         pdp_addr = pml4e & 0x3fffffffff000ULL;
158         for (l2 = 0; l2 < 512; l2++) {
159             cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
160             pdpe = le64_to_cpu(pdpe);
161             if (!(pdpe & PG_PRESENT_MASK)) {
162                 continue;
163             }
164 
165             if (pdpe & PG_PSE_MASK) {
166                 /* 1G pages, CR4.PSE is ignored */
167                 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
168                         pdpe, 0x3ffffc0000000ULL);
169                 continue;
170             }
171 
172             pd_addr = pdpe & 0x3fffffffff000ULL;
173             for (l3 = 0; l3 < 512; l3++) {
174                 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
175                 pde = le64_to_cpu(pde);
176                 if (!(pde & PG_PRESENT_MASK)) {
177                     continue;
178                 }
179 
180                 if (pde & PG_PSE_MASK) {
181                     /* 2M pages, CR4.PSE is ignored */
182                     print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
183                             (l3 << 21), pde, 0x3ffffffe00000ULL);
184                     continue;
185                 }
186 
187                 pt_addr = pde & 0x3fffffffff000ULL;
188                 for (l4 = 0; l4 < 512; l4++) {
189                     cpu_physical_memory_read(pt_addr
190                             + l4 * 8,
191                             &pte, 8);
192                     pte = le64_to_cpu(pte);
193                     if (pte & PG_PRESENT_MASK) {
194                         print_pte(mon, env, (l0 << 48) + (l1 << 39) +
195                                 (l2 << 30) + (l3 << 21) + (l4 << 12),
196                                 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
197                     }
198                 }
199             }
200         }
201     }
202 }
203 
204 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
205 {
206     uint64_t l0;
207     uint64_t pml5e;
208     uint64_t pml5_addr;
209 
210     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
211     for (l0 = 0; l0 < 512; l0++) {
212         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
213         pml5e = le64_to_cpu(pml5e);
214         if (pml5e & PG_PRESENT_MASK) {
215             tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
216         }
217     }
218 }
219 #endif /* TARGET_X86_64 */
220 
221 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
222 {
223     CPUArchState *env;
224 
225     env = mon_get_cpu_env();
226     if (!env) {
227         monitor_printf(mon, "No CPU available\n");
228         return;
229     }
230 
231     if (!(env->cr[0] & CR0_PG_MASK)) {
232         monitor_printf(mon, "PG disabled\n");
233         return;
234     }
235     if (env->cr[4] & CR4_PAE_MASK) {
236 #ifdef TARGET_X86_64
237         if (env->hflags & HF_LMA_MASK) {
238             if (env->cr[4] & CR4_LA57_MASK) {
239                 tlb_info_la57(mon, env);
240             } else {
241                 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
242             }
243         } else
244 #endif
245         {
246             tlb_info_pae32(mon, env);
247         }
248     } else {
249         tlb_info_32(mon, env);
250     }
251 }
252 
253 static void mem_print(Monitor *mon, CPUArchState *env,
254                       hwaddr *pstart, int *plast_prot,
255                       hwaddr end, int prot)
256 {
257     int prot1;
258     prot1 = *plast_prot;
259     if (prot != prot1) {
260         if (*pstart != -1) {
261             monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
262                            TARGET_FMT_plx " %c%c%c\n",
263                            addr_canonical(env, *pstart),
264                            addr_canonical(env, end),
265                            addr_canonical(env, end - *pstart),
266                            prot1 & PG_USER_MASK ? 'u' : '-',
267                            'r',
268                            prot1 & PG_RW_MASK ? 'w' : '-');
269         }
270         if (prot != 0)
271             *pstart = end;
272         else
273             *pstart = -1;
274         *plast_prot = prot;
275     }
276 }
277 
278 static void mem_info_32(Monitor *mon, CPUArchState *env)
279 {
280     unsigned int l1, l2;
281     int prot, last_prot;
282     uint32_t pgd, pde, pte;
283     hwaddr start, end;
284 
285     pgd = env->cr[3] & ~0xfff;
286     last_prot = 0;
287     start = -1;
288     for(l1 = 0; l1 < 1024; l1++) {
289         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
290         pde = le32_to_cpu(pde);
291         end = l1 << 22;
292         if (pde & PG_PRESENT_MASK) {
293             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
294                 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
295                 mem_print(mon, env, &start, &last_prot, end, prot);
296             } else {
297                 for(l2 = 0; l2 < 1024; l2++) {
298                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
299                     pte = le32_to_cpu(pte);
300                     end = (l1 << 22) + (l2 << 12);
301                     if (pte & PG_PRESENT_MASK) {
302                         prot = pte & pde &
303                             (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
304                     } else {
305                         prot = 0;
306                     }
307                     mem_print(mon, env, &start, &last_prot, end, prot);
308                 }
309             }
310         } else {
311             prot = 0;
312             mem_print(mon, env, &start, &last_prot, end, prot);
313         }
314     }
315     /* Flush last range */
316     mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
317 }
318 
319 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
320 {
321     unsigned int l1, l2, l3;
322     int prot, last_prot;
323     uint64_t pdpe, pde, pte;
324     uint64_t pdp_addr, pd_addr, pt_addr;
325     hwaddr start, end;
326 
327     pdp_addr = env->cr[3] & ~0x1f;
328     last_prot = 0;
329     start = -1;
330     for (l1 = 0; l1 < 4; l1++) {
331         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
332         pdpe = le64_to_cpu(pdpe);
333         end = l1 << 30;
334         if (pdpe & PG_PRESENT_MASK) {
335             pd_addr = pdpe & 0x3fffffffff000ULL;
336             for (l2 = 0; l2 < 512; l2++) {
337                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
338                 pde = le64_to_cpu(pde);
339                 end = (l1 << 30) + (l2 << 21);
340                 if (pde & PG_PRESENT_MASK) {
341                     if (pde & PG_PSE_MASK) {
342                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
343                                       PG_PRESENT_MASK);
344                         mem_print(mon, env, &start, &last_prot, end, prot);
345                     } else {
346                         pt_addr = pde & 0x3fffffffff000ULL;
347                         for (l3 = 0; l3 < 512; l3++) {
348                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
349                             pte = le64_to_cpu(pte);
350                             end = (l1 << 30) + (l2 << 21) + (l3 << 12);
351                             if (pte & PG_PRESENT_MASK) {
352                                 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
353                                                     PG_PRESENT_MASK);
354                             } else {
355                                 prot = 0;
356                             }
357                             mem_print(mon, env, &start, &last_prot, end, prot);
358                         }
359                     }
360                 } else {
361                     prot = 0;
362                     mem_print(mon, env, &start, &last_prot, end, prot);
363                 }
364             }
365         } else {
366             prot = 0;
367             mem_print(mon, env, &start, &last_prot, end, prot);
368         }
369     }
370     /* Flush last range */
371     mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
372 }
373 
374 
375 #ifdef TARGET_X86_64
376 static void mem_info_la48(Monitor *mon, CPUArchState *env)
377 {
378     int prot, last_prot;
379     uint64_t l1, l2, l3, l4;
380     uint64_t pml4e, pdpe, pde, pte;
381     uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
382 
383     pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
384     last_prot = 0;
385     start = -1;
386     for (l1 = 0; l1 < 512; l1++) {
387         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
388         pml4e = le64_to_cpu(pml4e);
389         end = l1 << 39;
390         if (pml4e & PG_PRESENT_MASK) {
391             pdp_addr = pml4e & 0x3fffffffff000ULL;
392             for (l2 = 0; l2 < 512; l2++) {
393                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
394                 pdpe = le64_to_cpu(pdpe);
395                 end = (l1 << 39) + (l2 << 30);
396                 if (pdpe & PG_PRESENT_MASK) {
397                     if (pdpe & PG_PSE_MASK) {
398                         prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
399                                        PG_PRESENT_MASK);
400                         prot &= pml4e;
401                         mem_print(mon, env, &start, &last_prot, end, prot);
402                     } else {
403                         pd_addr = pdpe & 0x3fffffffff000ULL;
404                         for (l3 = 0; l3 < 512; l3++) {
405                             cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
406                             pde = le64_to_cpu(pde);
407                             end = (l1 << 39) + (l2 << 30) + (l3 << 21);
408                             if (pde & PG_PRESENT_MASK) {
409                                 if (pde & PG_PSE_MASK) {
410                                     prot = pde & (PG_USER_MASK | PG_RW_MASK |
411                                                   PG_PRESENT_MASK);
412                                     prot &= pml4e & pdpe;
413                                     mem_print(mon, env, &start,
414                                               &last_prot, end, prot);
415                                 } else {
416                                     pt_addr = pde & 0x3fffffffff000ULL;
417                                     for (l4 = 0; l4 < 512; l4++) {
418                                         cpu_physical_memory_read(pt_addr
419                                                                  + l4 * 8,
420                                                                  &pte, 8);
421                                         pte = le64_to_cpu(pte);
422                                         end = (l1 << 39) + (l2 << 30) +
423                                             (l3 << 21) + (l4 << 12);
424                                         if (pte & PG_PRESENT_MASK) {
425                                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
426                                                           PG_PRESENT_MASK);
427                                             prot &= pml4e & pdpe & pde;
428                                         } else {
429                                             prot = 0;
430                                         }
431                                         mem_print(mon, env, &start,
432                                                   &last_prot, end, prot);
433                                     }
434                                 }
435                             } else {
436                                 prot = 0;
437                                 mem_print(mon, env, &start,
438                                           &last_prot, end, prot);
439                             }
440                         }
441                     }
442                 } else {
443                     prot = 0;
444                     mem_print(mon, env, &start, &last_prot, end, prot);
445                 }
446             }
447         } else {
448             prot = 0;
449             mem_print(mon, env, &start, &last_prot, end, prot);
450         }
451     }
452     /* Flush last range */
453     mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
454 }
455 
456 static void mem_info_la57(Monitor *mon, CPUArchState *env)
457 {
458     int prot, last_prot;
459     uint64_t l0, l1, l2, l3, l4;
460     uint64_t pml5e, pml4e, pdpe, pde, pte;
461     uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
462 
463     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
464     last_prot = 0;
465     start = -1;
466     for (l0 = 0; l0 < 512; l0++) {
467         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
468         pml5e = le64_to_cpu(pml5e);
469         end = l0 << 48;
470         if (!(pml5e & PG_PRESENT_MASK)) {
471             prot = 0;
472             mem_print(mon, env, &start, &last_prot, end, prot);
473             continue;
474         }
475 
476         pml4_addr = pml5e & 0x3fffffffff000ULL;
477         for (l1 = 0; l1 < 512; l1++) {
478             cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
479             pml4e = le64_to_cpu(pml4e);
480             end = (l0 << 48) + (l1 << 39);
481             if (!(pml4e & PG_PRESENT_MASK)) {
482                 prot = 0;
483                 mem_print(mon, env, &start, &last_prot, end, prot);
484                 continue;
485             }
486 
487             pdp_addr = pml4e & 0x3fffffffff000ULL;
488             for (l2 = 0; l2 < 512; l2++) {
489                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
490                 pdpe = le64_to_cpu(pdpe);
491                 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
492                 if (pdpe & PG_PRESENT_MASK) {
493                     prot = 0;
494                     mem_print(mon, env, &start, &last_prot, end, prot);
495                     continue;
496                 }
497 
498                 if (pdpe & PG_PSE_MASK) {
499                     prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
500                             PG_PRESENT_MASK);
501                     prot &= pml5e & pml4e;
502                     mem_print(mon, env, &start, &last_prot, end, prot);
503                     continue;
504                 }
505 
506                 pd_addr = pdpe & 0x3fffffffff000ULL;
507                 for (l3 = 0; l3 < 512; l3++) {
508                     cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
509                     pde = le64_to_cpu(pde);
510                     end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
511                     if (pde & PG_PRESENT_MASK) {
512                         prot = 0;
513                         mem_print(mon, env, &start, &last_prot, end, prot);
514                         continue;
515                     }
516 
517                     if (pde & PG_PSE_MASK) {
518                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
519                                 PG_PRESENT_MASK);
520                         prot &= pml5e & pml4e & pdpe;
521                         mem_print(mon, env, &start, &last_prot, end, prot);
522                         continue;
523                     }
524 
525                     pt_addr = pde & 0x3fffffffff000ULL;
526                     for (l4 = 0; l4 < 512; l4++) {
527                         cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
528                         pte = le64_to_cpu(pte);
529                         end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
530                             (l3 << 21) + (l4 << 12);
531                         if (pte & PG_PRESENT_MASK) {
532                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
533                                     PG_PRESENT_MASK);
534                             prot &= pml5e & pml4e & pdpe & pde;
535                         } else {
536                             prot = 0;
537                         }
538                         mem_print(mon, env, &start, &last_prot, end, prot);
539                     }
540                 }
541             }
542         }
543     }
544     /* Flush last range */
545     mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
546 }
547 #endif /* TARGET_X86_64 */
548 
549 void hmp_info_mem(Monitor *mon, const QDict *qdict)
550 {
551     CPUArchState *env;
552 
553     env = mon_get_cpu_env();
554     if (!env) {
555         monitor_printf(mon, "No CPU available\n");
556         return;
557     }
558 
559     if (!(env->cr[0] & CR0_PG_MASK)) {
560         monitor_printf(mon, "PG disabled\n");
561         return;
562     }
563     if (env->cr[4] & CR4_PAE_MASK) {
564 #ifdef TARGET_X86_64
565         if (env->hflags & HF_LMA_MASK) {
566             if (env->cr[4] & CR4_LA57_MASK) {
567                 mem_info_la57(mon, env);
568             } else {
569                 mem_info_la48(mon, env);
570             }
571         } else
572 #endif
573         {
574             mem_info_pae32(mon, env);
575         }
576     } else {
577         mem_info_32(mon, env);
578     }
579 }
580 
581 void hmp_mce(Monitor *mon, const QDict *qdict)
582 {
583     X86CPU *cpu;
584     CPUState *cs;
585     int cpu_index = qdict_get_int(qdict, "cpu_index");
586     int bank = qdict_get_int(qdict, "bank");
587     uint64_t status = qdict_get_int(qdict, "status");
588     uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
589     uint64_t addr = qdict_get_int(qdict, "addr");
590     uint64_t misc = qdict_get_int(qdict, "misc");
591     int flags = MCE_INJECT_UNCOND_AO;
592 
593     if (qdict_get_try_bool(qdict, "broadcast", false)) {
594         flags |= MCE_INJECT_BROADCAST;
595     }
596     cs = qemu_get_cpu(cpu_index);
597     if (cs != NULL) {
598         cpu = X86_CPU(cs);
599         cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
600                            flags);
601     }
602 }
603 
604 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
605 {
606     CPUArchState *env = mon_get_cpu_env();
607     return env->eip + env->segs[R_CS].base;
608 }
609 
610 const MonitorDef monitor_defs[] = {
611 #define SEG(name, seg) \
612     { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
613     { name ".base", offsetof(CPUX86State, segs[seg].base) },\
614     { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
615 
616     { "eax", offsetof(CPUX86State, regs[0]) },
617     { "ecx", offsetof(CPUX86State, regs[1]) },
618     { "edx", offsetof(CPUX86State, regs[2]) },
619     { "ebx", offsetof(CPUX86State, regs[3]) },
620     { "esp|sp", offsetof(CPUX86State, regs[4]) },
621     { "ebp|fp", offsetof(CPUX86State, regs[5]) },
622     { "esi", offsetof(CPUX86State, regs[6]) },
623     { "edi", offsetof(CPUX86State, regs[7]) },
624 #ifdef TARGET_X86_64
625     { "r8", offsetof(CPUX86State, regs[8]) },
626     { "r9", offsetof(CPUX86State, regs[9]) },
627     { "r10", offsetof(CPUX86State, regs[10]) },
628     { "r11", offsetof(CPUX86State, regs[11]) },
629     { "r12", offsetof(CPUX86State, regs[12]) },
630     { "r13", offsetof(CPUX86State, regs[13]) },
631     { "r14", offsetof(CPUX86State, regs[14]) },
632     { "r15", offsetof(CPUX86State, regs[15]) },
633 #endif
634     { "eflags", offsetof(CPUX86State, eflags) },
635     { "eip", offsetof(CPUX86State, eip) },
636     SEG("cs", R_CS)
637     SEG("ds", R_DS)
638     SEG("es", R_ES)
639     SEG("ss", R_SS)
640     SEG("fs", R_FS)
641     SEG("gs", R_GS)
642     { "pc", 0, monitor_get_pc, },
643     { NULL },
644 };
645 
646 const MonitorDef *target_monitor_defs(void)
647 {
648     return monitor_defs;
649 }
650 
651 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
652 {
653     CPUState *cs;
654 
655     if (qdict_haskey(qdict, "apic-id")) {
656         int id = qdict_get_try_int(qdict, "apic-id", 0);
657         cs = cpu_by_arch_id(id);
658     } else {
659         cs = mon_get_cpu();
660     }
661 
662 
663     if (!cs) {
664         monitor_printf(mon, "No CPU available\n");
665         return;
666     }
667     x86_cpu_dump_local_apic_state(cs, (FILE *)mon, monitor_fprintf,
668                                   CPU_DUMP_FPU);
669 }
670 
671 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
672 {
673     monitor_printf(mon, "This command is obsolete and will be "
674                    "removed soon. Please use 'info pic' instead.\n");
675 }
676 
677 SevInfo *qmp_query_sev(Error **errp)
678 {
679     SevInfo *info;
680 
681     info = sev_get_info();
682     if (!info) {
683         error_setg(errp, "SEV feature is not available");
684         return NULL;
685     }
686 
687     return info;
688 }
689 
690 void hmp_info_sev(Monitor *mon, const QDict *qdict)
691 {
692     SevInfo *info = sev_get_info();
693 
694     if (info && info->enabled) {
695         monitor_printf(mon, "handle: %d\n", info->handle);
696         monitor_printf(mon, "state: %s\n", SevState_str(info->state));
697         monitor_printf(mon, "build: %d\n", info->build_id);
698         monitor_printf(mon, "api version: %d.%d\n",
699                        info->api_major, info->api_minor);
700         monitor_printf(mon, "debug: %s\n",
701                        info->policy & SEV_POLICY_NODBG ? "off" : "on");
702         monitor_printf(mon, "key-sharing: %s\n",
703                        info->policy & SEV_POLICY_NOKS ? "off" : "on");
704     } else {
705         monitor_printf(mon, "SEV is not enabled\n");
706     }
707 
708     qapi_free_SevInfo(info);
709 }
710 
711 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
712 {
713     char *data;
714     SevLaunchMeasureInfo *info;
715 
716     data = sev_get_launch_measurement();
717     if (!data) {
718         error_setg(errp, "Measurement is not available");
719         return NULL;
720     }
721 
722     info = g_malloc0(sizeof(*info));
723     info->data = data;
724 
725     return info;
726 }
727 
728 SevCapability *qmp_query_sev_capabilities(Error **errp)
729 {
730     SevCapability *data;
731 
732     data = sev_get_capabilities();
733     if (!data) {
734         error_setg(errp, "SEV feature is not available");
735         return NULL;
736     }
737 
738     return data;
739 }
740