xref: /openbmc/qemu/target/i386/monitor.c (revision 2266d443)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "cpu.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "qapi/qmp/qdict.h"
30 #include "hw/i386/pc.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sev.h"
33 #include "hmp.h"
34 #include "qapi/error.h"
35 #include "sev_i386.h"
36 #include "qapi/qapi-commands-misc.h"
37 
38 
39 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
40                       hwaddr pte, hwaddr mask)
41 {
42 #ifdef TARGET_X86_64
43     if (env->cr[4] & CR4_LA57_MASK) {
44         if (addr & (1ULL << 56)) {
45             addr |= -1LL << 57;
46         }
47     } else {
48         if (addr & (1ULL << 47)) {
49             addr |= -1LL << 48;
50         }
51     }
52 #endif
53     monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
54                    " %c%c%c%c%c%c%c%c%c\n",
55                    addr,
56                    pte & mask,
57                    pte & PG_NX_MASK ? 'X' : '-',
58                    pte & PG_GLOBAL_MASK ? 'G' : '-',
59                    pte & PG_PSE_MASK ? 'P' : '-',
60                    pte & PG_DIRTY_MASK ? 'D' : '-',
61                    pte & PG_ACCESSED_MASK ? 'A' : '-',
62                    pte & PG_PCD_MASK ? 'C' : '-',
63                    pte & PG_PWT_MASK ? 'T' : '-',
64                    pte & PG_USER_MASK ? 'U' : '-',
65                    pte & PG_RW_MASK ? 'W' : '-');
66 }
67 
68 static void tlb_info_32(Monitor *mon, CPUArchState *env)
69 {
70     unsigned int l1, l2;
71     uint32_t pgd, pde, pte;
72 
73     pgd = env->cr[3] & ~0xfff;
74     for(l1 = 0; l1 < 1024; l1++) {
75         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
76         pde = le32_to_cpu(pde);
77         if (pde & PG_PRESENT_MASK) {
78             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
79                 /* 4M pages */
80                 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
81             } else {
82                 for(l2 = 0; l2 < 1024; l2++) {
83                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
84                     pte = le32_to_cpu(pte);
85                     if (pte & PG_PRESENT_MASK) {
86                         print_pte(mon, env, (l1 << 22) + (l2 << 12),
87                                   pte & ~PG_PSE_MASK,
88                                   ~0xfff);
89                     }
90                 }
91             }
92         }
93     }
94 }
95 
96 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
97 {
98     unsigned int l1, l2, l3;
99     uint64_t pdpe, pde, pte;
100     uint64_t pdp_addr, pd_addr, pt_addr;
101 
102     pdp_addr = env->cr[3] & ~0x1f;
103     for (l1 = 0; l1 < 4; l1++) {
104         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
105         pdpe = le64_to_cpu(pdpe);
106         if (pdpe & PG_PRESENT_MASK) {
107             pd_addr = pdpe & 0x3fffffffff000ULL;
108             for (l2 = 0; l2 < 512; l2++) {
109                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
110                 pde = le64_to_cpu(pde);
111                 if (pde & PG_PRESENT_MASK) {
112                     if (pde & PG_PSE_MASK) {
113                         /* 2M pages with PAE, CR4.PSE is ignored */
114                         print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
115                                   ~((hwaddr)(1 << 20) - 1));
116                     } else {
117                         pt_addr = pde & 0x3fffffffff000ULL;
118                         for (l3 = 0; l3 < 512; l3++) {
119                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
120                             pte = le64_to_cpu(pte);
121                             if (pte & PG_PRESENT_MASK) {
122                                 print_pte(mon, env, (l1 << 30) + (l2 << 21)
123                                           + (l3 << 12),
124                                           pte & ~PG_PSE_MASK,
125                                           ~(hwaddr)0xfff);
126                             }
127                         }
128                     }
129                 }
130             }
131         }
132     }
133 }
134 
135 #ifdef TARGET_X86_64
136 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
137         uint64_t l0, uint64_t pml4_addr)
138 {
139     uint64_t l1, l2, l3, l4;
140     uint64_t pml4e, pdpe, pde, pte;
141     uint64_t pdp_addr, pd_addr, pt_addr;
142 
143     for (l1 = 0; l1 < 512; l1++) {
144         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
145         pml4e = le64_to_cpu(pml4e);
146         if (!(pml4e & PG_PRESENT_MASK)) {
147             continue;
148         }
149 
150         pdp_addr = pml4e & 0x3fffffffff000ULL;
151         for (l2 = 0; l2 < 512; l2++) {
152             cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
153             pdpe = le64_to_cpu(pdpe);
154             if (!(pdpe & PG_PRESENT_MASK)) {
155                 continue;
156             }
157 
158             if (pdpe & PG_PSE_MASK) {
159                 /* 1G pages, CR4.PSE is ignored */
160                 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
161                         pdpe, 0x3ffffc0000000ULL);
162                 continue;
163             }
164 
165             pd_addr = pdpe & 0x3fffffffff000ULL;
166             for (l3 = 0; l3 < 512; l3++) {
167                 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
168                 pde = le64_to_cpu(pde);
169                 if (!(pde & PG_PRESENT_MASK)) {
170                     continue;
171                 }
172 
173                 if (pde & PG_PSE_MASK) {
174                     /* 2M pages, CR4.PSE is ignored */
175                     print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
176                             (l3 << 21), pde, 0x3ffffffe00000ULL);
177                     continue;
178                 }
179 
180                 pt_addr = pde & 0x3fffffffff000ULL;
181                 for (l4 = 0; l4 < 512; l4++) {
182                     cpu_physical_memory_read(pt_addr
183                             + l4 * 8,
184                             &pte, 8);
185                     pte = le64_to_cpu(pte);
186                     if (pte & PG_PRESENT_MASK) {
187                         print_pte(mon, env, (l0 << 48) + (l1 << 39) +
188                                 (l2 << 30) + (l3 << 21) + (l4 << 12),
189                                 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
190                     }
191                 }
192             }
193         }
194     }
195 }
196 
197 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
198 {
199     uint64_t l0;
200     uint64_t pml5e;
201     uint64_t pml5_addr;
202 
203     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
204     for (l0 = 0; l0 < 512; l0++) {
205         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
206         pml5e = le64_to_cpu(pml5e);
207         if (pml5e & PG_PRESENT_MASK) {
208             tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
209         }
210     }
211 }
212 #endif /* TARGET_X86_64 */
213 
214 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
215 {
216     CPUArchState *env;
217 
218     env = mon_get_cpu_env();
219     if (!env) {
220         monitor_printf(mon, "No CPU available\n");
221         return;
222     }
223 
224     if (!(env->cr[0] & CR0_PG_MASK)) {
225         monitor_printf(mon, "PG disabled\n");
226         return;
227     }
228     if (env->cr[4] & CR4_PAE_MASK) {
229 #ifdef TARGET_X86_64
230         if (env->hflags & HF_LMA_MASK) {
231             if (env->cr[4] & CR4_LA57_MASK) {
232                 tlb_info_la57(mon, env);
233             } else {
234                 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
235             }
236         } else
237 #endif
238         {
239             tlb_info_pae32(mon, env);
240         }
241     } else {
242         tlb_info_32(mon, env);
243     }
244 }
245 
246 static void mem_print(Monitor *mon, hwaddr *pstart,
247                       int *plast_prot,
248                       hwaddr end, int prot)
249 {
250     int prot1;
251     prot1 = *plast_prot;
252     if (prot != prot1) {
253         if (*pstart != -1) {
254             monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
255                            TARGET_FMT_plx " %c%c%c\n",
256                            *pstart, end, end - *pstart,
257                            prot1 & PG_USER_MASK ? 'u' : '-',
258                            'r',
259                            prot1 & PG_RW_MASK ? 'w' : '-');
260         }
261         if (prot != 0)
262             *pstart = end;
263         else
264             *pstart = -1;
265         *plast_prot = prot;
266     }
267 }
268 
269 static void mem_info_32(Monitor *mon, CPUArchState *env)
270 {
271     unsigned int l1, l2;
272     int prot, last_prot;
273     uint32_t pgd, pde, pte;
274     hwaddr start, end;
275 
276     pgd = env->cr[3] & ~0xfff;
277     last_prot = 0;
278     start = -1;
279     for(l1 = 0; l1 < 1024; l1++) {
280         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
281         pde = le32_to_cpu(pde);
282         end = l1 << 22;
283         if (pde & PG_PRESENT_MASK) {
284             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
285                 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
286                 mem_print(mon, &start, &last_prot, end, prot);
287             } else {
288                 for(l2 = 0; l2 < 1024; l2++) {
289                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
290                     pte = le32_to_cpu(pte);
291                     end = (l1 << 22) + (l2 << 12);
292                     if (pte & PG_PRESENT_MASK) {
293                         prot = pte & pde &
294                             (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
295                     } else {
296                         prot = 0;
297                     }
298                     mem_print(mon, &start, &last_prot, end, prot);
299                 }
300             }
301         } else {
302             prot = 0;
303             mem_print(mon, &start, &last_prot, end, prot);
304         }
305     }
306     /* Flush last range */
307     mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
308 }
309 
310 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
311 {
312     unsigned int l1, l2, l3;
313     int prot, last_prot;
314     uint64_t pdpe, pde, pte;
315     uint64_t pdp_addr, pd_addr, pt_addr;
316     hwaddr start, end;
317 
318     pdp_addr = env->cr[3] & ~0x1f;
319     last_prot = 0;
320     start = -1;
321     for (l1 = 0; l1 < 4; l1++) {
322         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
323         pdpe = le64_to_cpu(pdpe);
324         end = l1 << 30;
325         if (pdpe & PG_PRESENT_MASK) {
326             pd_addr = pdpe & 0x3fffffffff000ULL;
327             for (l2 = 0; l2 < 512; l2++) {
328                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
329                 pde = le64_to_cpu(pde);
330                 end = (l1 << 30) + (l2 << 21);
331                 if (pde & PG_PRESENT_MASK) {
332                     if (pde & PG_PSE_MASK) {
333                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
334                                       PG_PRESENT_MASK);
335                         mem_print(mon, &start, &last_prot, end, prot);
336                     } else {
337                         pt_addr = pde & 0x3fffffffff000ULL;
338                         for (l3 = 0; l3 < 512; l3++) {
339                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
340                             pte = le64_to_cpu(pte);
341                             end = (l1 << 30) + (l2 << 21) + (l3 << 12);
342                             if (pte & PG_PRESENT_MASK) {
343                                 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
344                                                     PG_PRESENT_MASK);
345                             } else {
346                                 prot = 0;
347                             }
348                             mem_print(mon, &start, &last_prot, end, prot);
349                         }
350                     }
351                 } else {
352                     prot = 0;
353                     mem_print(mon, &start, &last_prot, end, prot);
354                 }
355             }
356         } else {
357             prot = 0;
358             mem_print(mon, &start, &last_prot, end, prot);
359         }
360     }
361     /* Flush last range */
362     mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
363 }
364 
365 
366 #ifdef TARGET_X86_64
367 static void mem_info_la48(Monitor *mon, CPUArchState *env)
368 {
369     int prot, last_prot;
370     uint64_t l1, l2, l3, l4;
371     uint64_t pml4e, pdpe, pde, pte;
372     uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
373 
374     pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
375     last_prot = 0;
376     start = -1;
377     for (l1 = 0; l1 < 512; l1++) {
378         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
379         pml4e = le64_to_cpu(pml4e);
380         end = l1 << 39;
381         if (pml4e & PG_PRESENT_MASK) {
382             pdp_addr = pml4e & 0x3fffffffff000ULL;
383             for (l2 = 0; l2 < 512; l2++) {
384                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
385                 pdpe = le64_to_cpu(pdpe);
386                 end = (l1 << 39) + (l2 << 30);
387                 if (pdpe & PG_PRESENT_MASK) {
388                     if (pdpe & PG_PSE_MASK) {
389                         prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
390                                        PG_PRESENT_MASK);
391                         prot &= pml4e;
392                         mem_print(mon, &start, &last_prot, end, prot);
393                     } else {
394                         pd_addr = pdpe & 0x3fffffffff000ULL;
395                         for (l3 = 0; l3 < 512; l3++) {
396                             cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
397                             pde = le64_to_cpu(pde);
398                             end = (l1 << 39) + (l2 << 30) + (l3 << 21);
399                             if (pde & PG_PRESENT_MASK) {
400                                 if (pde & PG_PSE_MASK) {
401                                     prot = pde & (PG_USER_MASK | PG_RW_MASK |
402                                                   PG_PRESENT_MASK);
403                                     prot &= pml4e & pdpe;
404                                     mem_print(mon, &start, &last_prot, end, prot);
405                                 } else {
406                                     pt_addr = pde & 0x3fffffffff000ULL;
407                                     for (l4 = 0; l4 < 512; l4++) {
408                                         cpu_physical_memory_read(pt_addr
409                                                                  + l4 * 8,
410                                                                  &pte, 8);
411                                         pte = le64_to_cpu(pte);
412                                         end = (l1 << 39) + (l2 << 30) +
413                                             (l3 << 21) + (l4 << 12);
414                                         if (pte & PG_PRESENT_MASK) {
415                                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
416                                                           PG_PRESENT_MASK);
417                                             prot &= pml4e & pdpe & pde;
418                                         } else {
419                                             prot = 0;
420                                         }
421                                         mem_print(mon, &start, &last_prot, end, prot);
422                                     }
423                                 }
424                             } else {
425                                 prot = 0;
426                                 mem_print(mon, &start, &last_prot, end, prot);
427                             }
428                         }
429                     }
430                 } else {
431                     prot = 0;
432                     mem_print(mon, &start, &last_prot, end, prot);
433                 }
434             }
435         } else {
436             prot = 0;
437             mem_print(mon, &start, &last_prot, end, prot);
438         }
439     }
440     /* Flush last range */
441     mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
442 }
443 
444 static void mem_info_la57(Monitor *mon, CPUArchState *env)
445 {
446     int prot, last_prot;
447     uint64_t l0, l1, l2, l3, l4;
448     uint64_t pml5e, pml4e, pdpe, pde, pte;
449     uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
450 
451     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
452     last_prot = 0;
453     start = -1;
454     for (l0 = 0; l0 < 512; l0++) {
455         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
456         pml5e = le64_to_cpu(pml5e);
457         end = l0 << 48;
458         if (!(pml5e & PG_PRESENT_MASK)) {
459             prot = 0;
460             mem_print(mon, &start, &last_prot, end, prot);
461             continue;
462         }
463 
464         pml4_addr = pml5e & 0x3fffffffff000ULL;
465         for (l1 = 0; l1 < 512; l1++) {
466             cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
467             pml4e = le64_to_cpu(pml4e);
468             end = (l0 << 48) + (l1 << 39);
469             if (!(pml4e & PG_PRESENT_MASK)) {
470                 prot = 0;
471                 mem_print(mon, &start, &last_prot, end, prot);
472                 continue;
473             }
474 
475             pdp_addr = pml4e & 0x3fffffffff000ULL;
476             for (l2 = 0; l2 < 512; l2++) {
477                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
478                 pdpe = le64_to_cpu(pdpe);
479                 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
480                 if (pdpe & PG_PRESENT_MASK) {
481                     prot = 0;
482                     mem_print(mon, &start, &last_prot, end, prot);
483                     continue;
484                 }
485 
486                 if (pdpe & PG_PSE_MASK) {
487                     prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
488                             PG_PRESENT_MASK);
489                     prot &= pml5e & pml4e;
490                     mem_print(mon, &start, &last_prot, end, prot);
491                     continue;
492                 }
493 
494                 pd_addr = pdpe & 0x3fffffffff000ULL;
495                 for (l3 = 0; l3 < 512; l3++) {
496                     cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
497                     pde = le64_to_cpu(pde);
498                     end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
499                     if (pde & PG_PRESENT_MASK) {
500                         prot = 0;
501                         mem_print(mon, &start, &last_prot, end, prot);
502                         continue;
503                     }
504 
505                     if (pde & PG_PSE_MASK) {
506                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
507                                 PG_PRESENT_MASK);
508                         prot &= pml5e & pml4e & pdpe;
509                         mem_print(mon, &start, &last_prot, end, prot);
510                         continue;
511                     }
512 
513                     pt_addr = pde & 0x3fffffffff000ULL;
514                     for (l4 = 0; l4 < 512; l4++) {
515                         cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
516                         pte = le64_to_cpu(pte);
517                         end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
518                             (l3 << 21) + (l4 << 12);
519                         if (pte & PG_PRESENT_MASK) {
520                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
521                                     PG_PRESENT_MASK);
522                             prot &= pml5e & pml4e & pdpe & pde;
523                         } else {
524                             prot = 0;
525                         }
526                         mem_print(mon, &start, &last_prot, end, prot);
527                     }
528                 }
529             }
530         }
531     }
532     /* Flush last range */
533     mem_print(mon, &start, &last_prot, (hwaddr)1 << 57, 0);
534 }
535 #endif /* TARGET_X86_64 */
536 
537 void hmp_info_mem(Monitor *mon, const QDict *qdict)
538 {
539     CPUArchState *env;
540 
541     env = mon_get_cpu_env();
542     if (!env) {
543         monitor_printf(mon, "No CPU available\n");
544         return;
545     }
546 
547     if (!(env->cr[0] & CR0_PG_MASK)) {
548         monitor_printf(mon, "PG disabled\n");
549         return;
550     }
551     if (env->cr[4] & CR4_PAE_MASK) {
552 #ifdef TARGET_X86_64
553         if (env->hflags & HF_LMA_MASK) {
554             if (env->cr[4] & CR4_LA57_MASK) {
555                 mem_info_la57(mon, env);
556             } else {
557                 mem_info_la48(mon, env);
558             }
559         } else
560 #endif
561         {
562             mem_info_pae32(mon, env);
563         }
564     } else {
565         mem_info_32(mon, env);
566     }
567 }
568 
569 void hmp_mce(Monitor *mon, const QDict *qdict)
570 {
571     X86CPU *cpu;
572     CPUState *cs;
573     int cpu_index = qdict_get_int(qdict, "cpu_index");
574     int bank = qdict_get_int(qdict, "bank");
575     uint64_t status = qdict_get_int(qdict, "status");
576     uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
577     uint64_t addr = qdict_get_int(qdict, "addr");
578     uint64_t misc = qdict_get_int(qdict, "misc");
579     int flags = MCE_INJECT_UNCOND_AO;
580 
581     if (qdict_get_try_bool(qdict, "broadcast", false)) {
582         flags |= MCE_INJECT_BROADCAST;
583     }
584     cs = qemu_get_cpu(cpu_index);
585     if (cs != NULL) {
586         cpu = X86_CPU(cs);
587         cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
588                            flags);
589     }
590 }
591 
592 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
593 {
594     CPUArchState *env = mon_get_cpu_env();
595     return env->eip + env->segs[R_CS].base;
596 }
597 
598 const MonitorDef monitor_defs[] = {
599 #define SEG(name, seg) \
600     { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
601     { name ".base", offsetof(CPUX86State, segs[seg].base) },\
602     { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
603 
604     { "eax", offsetof(CPUX86State, regs[0]) },
605     { "ecx", offsetof(CPUX86State, regs[1]) },
606     { "edx", offsetof(CPUX86State, regs[2]) },
607     { "ebx", offsetof(CPUX86State, regs[3]) },
608     { "esp|sp", offsetof(CPUX86State, regs[4]) },
609     { "ebp|fp", offsetof(CPUX86State, regs[5]) },
610     { "esi", offsetof(CPUX86State, regs[6]) },
611     { "edi", offsetof(CPUX86State, regs[7]) },
612 #ifdef TARGET_X86_64
613     { "r8", offsetof(CPUX86State, regs[8]) },
614     { "r9", offsetof(CPUX86State, regs[9]) },
615     { "r10", offsetof(CPUX86State, regs[10]) },
616     { "r11", offsetof(CPUX86State, regs[11]) },
617     { "r12", offsetof(CPUX86State, regs[12]) },
618     { "r13", offsetof(CPUX86State, regs[13]) },
619     { "r14", offsetof(CPUX86State, regs[14]) },
620     { "r15", offsetof(CPUX86State, regs[15]) },
621 #endif
622     { "eflags", offsetof(CPUX86State, eflags) },
623     { "eip", offsetof(CPUX86State, eip) },
624     SEG("cs", R_CS)
625     SEG("ds", R_DS)
626     SEG("es", R_ES)
627     SEG("ss", R_SS)
628     SEG("fs", R_FS)
629     SEG("gs", R_GS)
630     { "pc", 0, monitor_get_pc, },
631     { NULL },
632 };
633 
634 const MonitorDef *target_monitor_defs(void)
635 {
636     return monitor_defs;
637 }
638 
639 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
640 {
641     CPUState *cs;
642 
643     if (qdict_haskey(qdict, "apic-id")) {
644         int id = qdict_get_try_int(qdict, "apic-id", 0);
645         cs = cpu_by_arch_id(id);
646     } else {
647         cs = mon_get_cpu();
648     }
649 
650 
651     if (!cs) {
652         monitor_printf(mon, "No CPU available\n");
653         return;
654     }
655     x86_cpu_dump_local_apic_state(cs, (FILE *)mon, monitor_fprintf,
656                                   CPU_DUMP_FPU);
657 }
658 
659 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
660 {
661     monitor_printf(mon, "This command is obsolete and will be "
662                    "removed soon. Please use 'info pic' instead.\n");
663 }
664 
665 SevInfo *qmp_query_sev(Error **errp)
666 {
667     SevInfo *info;
668 
669     info = sev_get_info();
670     if (!info) {
671         error_setg(errp, "SEV feature is not available");
672         return NULL;
673     }
674 
675     return info;
676 }
677 
678 void hmp_info_sev(Monitor *mon, const QDict *qdict)
679 {
680     SevInfo *info = sev_get_info();
681 
682     if (info && info->enabled) {
683         monitor_printf(mon, "handle: %d\n", info->handle);
684         monitor_printf(mon, "state: %s\n", SevState_str(info->state));
685         monitor_printf(mon, "build: %d\n", info->build_id);
686         monitor_printf(mon, "api version: %d.%d\n",
687                        info->api_major, info->api_minor);
688         monitor_printf(mon, "debug: %s\n",
689                        info->policy & SEV_POLICY_NODBG ? "off" : "on");
690         monitor_printf(mon, "key-sharing: %s\n",
691                        info->policy & SEV_POLICY_NOKS ? "off" : "on");
692     } else {
693         monitor_printf(mon, "SEV is not enabled\n");
694     }
695 
696     qapi_free_SevInfo(info);
697 }
698 
699 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
700 {
701     char *data;
702     SevLaunchMeasureInfo *info;
703 
704     data = sev_get_launch_measurement();
705     if (!data) {
706         error_setg(errp, "Measurement is not available");
707         return NULL;
708     }
709 
710     info = g_malloc0(sizeof(*info));
711     info->data = data;
712 
713     return info;
714 }
715 
716 SevCapability *qmp_query_sev_capabilities(Error **errp)
717 {
718     SevCapability *data;
719 
720     data = sev_get_capabilities();
721     if (!data) {
722         error_setg(errp, "SEV feature is not available");
723         return NULL;
724     }
725 
726     return data;
727 }
728