xref: /openbmc/qemu/target/i386/monitor.c (revision 93777de3650e7db4b0434d63dd461505b85519f7)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "cpu.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "monitor/hmp.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qapi/qmp/qerror.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sev.h"
34 #include "qapi/error.h"
35 #include "sev.h"
36 #include "qapi/qapi-commands-misc-target.h"
37 #include "qapi/qapi-commands-misc.h"
38 #include "hw/i386/pc.h"
39 #include "hw/i386/sgx.h"
40 
41 /* Perform linear address sign extension */
42 static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
43 {
44 #ifdef TARGET_X86_64
45     if (env->cr[4] & CR4_LA57_MASK) {
46         if (addr & (1ULL << 56)) {
47             addr |= (hwaddr)-(1LL << 57);
48         }
49     } else {
50         if (addr & (1ULL << 47)) {
51             addr |= (hwaddr)-(1LL << 48);
52         }
53     }
54 #endif
55     return addr;
56 }
57 
58 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
59                       hwaddr pte, hwaddr mask)
60 {
61     addr = addr_canonical(env, addr);
62 
63     monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
64                    " %c%c%c%c%c%c%c%c%c\n",
65                    addr,
66                    pte & mask,
67                    pte & PG_NX_MASK ? 'X' : '-',
68                    pte & PG_GLOBAL_MASK ? 'G' : '-',
69                    pte & PG_PSE_MASK ? 'P' : '-',
70                    pte & PG_DIRTY_MASK ? 'D' : '-',
71                    pte & PG_ACCESSED_MASK ? 'A' : '-',
72                    pte & PG_PCD_MASK ? 'C' : '-',
73                    pte & PG_PWT_MASK ? 'T' : '-',
74                    pte & PG_USER_MASK ? 'U' : '-',
75                    pte & PG_RW_MASK ? 'W' : '-');
76 }
77 
78 static void tlb_info_32(Monitor *mon, CPUArchState *env)
79 {
80     unsigned int l1, l2;
81     uint32_t pgd, pde, pte;
82 
83     pgd = env->cr[3] & ~0xfff;
84     for(l1 = 0; l1 < 1024; l1++) {
85         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
86         pde = le32_to_cpu(pde);
87         if (pde & PG_PRESENT_MASK) {
88             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
89                 /* 4M pages */
90                 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
91             } else {
92                 for(l2 = 0; l2 < 1024; l2++) {
93                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
94                     pte = le32_to_cpu(pte);
95                     if (pte & PG_PRESENT_MASK) {
96                         print_pte(mon, env, (l1 << 22) + (l2 << 12),
97                                   pte & ~PG_PSE_MASK,
98                                   ~0xfff);
99                     }
100                 }
101             }
102         }
103     }
104 }
105 
106 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
107 {
108     unsigned int l1, l2, l3;
109     uint64_t pdpe, pde, pte;
110     uint64_t pdp_addr, pd_addr, pt_addr;
111 
112     pdp_addr = env->cr[3] & ~0x1f;
113     for (l1 = 0; l1 < 4; l1++) {
114         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
115         pdpe = le64_to_cpu(pdpe);
116         if (pdpe & PG_PRESENT_MASK) {
117             pd_addr = pdpe & 0x3fffffffff000ULL;
118             for (l2 = 0; l2 < 512; l2++) {
119                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
120                 pde = le64_to_cpu(pde);
121                 if (pde & PG_PRESENT_MASK) {
122                     if (pde & PG_PSE_MASK) {
123                         /* 2M pages with PAE, CR4.PSE is ignored */
124                         print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
125                                   ~((hwaddr)(1 << 20) - 1));
126                     } else {
127                         pt_addr = pde & 0x3fffffffff000ULL;
128                         for (l3 = 0; l3 < 512; l3++) {
129                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
130                             pte = le64_to_cpu(pte);
131                             if (pte & PG_PRESENT_MASK) {
132                                 print_pte(mon, env, (l1 << 30) + (l2 << 21)
133                                           + (l3 << 12),
134                                           pte & ~PG_PSE_MASK,
135                                           ~(hwaddr)0xfff);
136                             }
137                         }
138                     }
139                 }
140             }
141         }
142     }
143 }
144 
145 #ifdef TARGET_X86_64
146 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
147         uint64_t l0, uint64_t pml4_addr)
148 {
149     uint64_t l1, l2, l3, l4;
150     uint64_t pml4e, pdpe, pde, pte;
151     uint64_t pdp_addr, pd_addr, pt_addr;
152 
153     for (l1 = 0; l1 < 512; l1++) {
154         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
155         pml4e = le64_to_cpu(pml4e);
156         if (!(pml4e & PG_PRESENT_MASK)) {
157             continue;
158         }
159 
160         pdp_addr = pml4e & 0x3fffffffff000ULL;
161         for (l2 = 0; l2 < 512; l2++) {
162             cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
163             pdpe = le64_to_cpu(pdpe);
164             if (!(pdpe & PG_PRESENT_MASK)) {
165                 continue;
166             }
167 
168             if (pdpe & PG_PSE_MASK) {
169                 /* 1G pages, CR4.PSE is ignored */
170                 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
171                         pdpe, 0x3ffffc0000000ULL);
172                 continue;
173             }
174 
175             pd_addr = pdpe & 0x3fffffffff000ULL;
176             for (l3 = 0; l3 < 512; l3++) {
177                 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
178                 pde = le64_to_cpu(pde);
179                 if (!(pde & PG_PRESENT_MASK)) {
180                     continue;
181                 }
182 
183                 if (pde & PG_PSE_MASK) {
184                     /* 2M pages, CR4.PSE is ignored */
185                     print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
186                             (l3 << 21), pde, 0x3ffffffe00000ULL);
187                     continue;
188                 }
189 
190                 pt_addr = pde & 0x3fffffffff000ULL;
191                 for (l4 = 0; l4 < 512; l4++) {
192                     cpu_physical_memory_read(pt_addr
193                             + l4 * 8,
194                             &pte, 8);
195                     pte = le64_to_cpu(pte);
196                     if (pte & PG_PRESENT_MASK) {
197                         print_pte(mon, env, (l0 << 48) + (l1 << 39) +
198                                 (l2 << 30) + (l3 << 21) + (l4 << 12),
199                                 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
200                     }
201                 }
202             }
203         }
204     }
205 }
206 
207 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
208 {
209     uint64_t l0;
210     uint64_t pml5e;
211     uint64_t pml5_addr;
212 
213     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
214     for (l0 = 0; l0 < 512; l0++) {
215         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
216         pml5e = le64_to_cpu(pml5e);
217         if (pml5e & PG_PRESENT_MASK) {
218             tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
219         }
220     }
221 }
222 #endif /* TARGET_X86_64 */
223 
224 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
225 {
226     CPUArchState *env;
227 
228     env = mon_get_cpu_env(mon);
229     if (!env) {
230         monitor_printf(mon, "No CPU available\n");
231         return;
232     }
233 
234     if (!(env->cr[0] & CR0_PG_MASK)) {
235         monitor_printf(mon, "PG disabled\n");
236         return;
237     }
238     if (env->cr[4] & CR4_PAE_MASK) {
239 #ifdef TARGET_X86_64
240         if (env->hflags & HF_LMA_MASK) {
241             if (env->cr[4] & CR4_LA57_MASK) {
242                 tlb_info_la57(mon, env);
243             } else {
244                 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
245             }
246         } else
247 #endif
248         {
249             tlb_info_pae32(mon, env);
250         }
251     } else {
252         tlb_info_32(mon, env);
253     }
254 }
255 
256 static void mem_print(Monitor *mon, CPUArchState *env,
257                       hwaddr *pstart, int *plast_prot,
258                       hwaddr end, int prot)
259 {
260     int prot1;
261     prot1 = *plast_prot;
262     if (prot != prot1) {
263         if (*pstart != -1) {
264             monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
265                            TARGET_FMT_plx " %c%c%c\n",
266                            addr_canonical(env, *pstart),
267                            addr_canonical(env, end),
268                            addr_canonical(env, end - *pstart),
269                            prot1 & PG_USER_MASK ? 'u' : '-',
270                            'r',
271                            prot1 & PG_RW_MASK ? 'w' : '-');
272         }
273         if (prot != 0)
274             *pstart = end;
275         else
276             *pstart = -1;
277         *plast_prot = prot;
278     }
279 }
280 
281 static void mem_info_32(Monitor *mon, CPUArchState *env)
282 {
283     unsigned int l1, l2;
284     int prot, last_prot;
285     uint32_t pgd, pde, pte;
286     hwaddr start, end;
287 
288     pgd = env->cr[3] & ~0xfff;
289     last_prot = 0;
290     start = -1;
291     for(l1 = 0; l1 < 1024; l1++) {
292         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
293         pde = le32_to_cpu(pde);
294         end = l1 << 22;
295         if (pde & PG_PRESENT_MASK) {
296             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
297                 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
298                 mem_print(mon, env, &start, &last_prot, end, prot);
299             } else {
300                 for(l2 = 0; l2 < 1024; l2++) {
301                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
302                     pte = le32_to_cpu(pte);
303                     end = (l1 << 22) + (l2 << 12);
304                     if (pte & PG_PRESENT_MASK) {
305                         prot = pte & pde &
306                             (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
307                     } else {
308                         prot = 0;
309                     }
310                     mem_print(mon, env, &start, &last_prot, end, prot);
311                 }
312             }
313         } else {
314             prot = 0;
315             mem_print(mon, env, &start, &last_prot, end, prot);
316         }
317     }
318     /* Flush last range */
319     mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
320 }
321 
322 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
323 {
324     unsigned int l1, l2, l3;
325     int prot, last_prot;
326     uint64_t pdpe, pde, pte;
327     uint64_t pdp_addr, pd_addr, pt_addr;
328     hwaddr start, end;
329 
330     pdp_addr = env->cr[3] & ~0x1f;
331     last_prot = 0;
332     start = -1;
333     for (l1 = 0; l1 < 4; l1++) {
334         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
335         pdpe = le64_to_cpu(pdpe);
336         end = l1 << 30;
337         if (pdpe & PG_PRESENT_MASK) {
338             pd_addr = pdpe & 0x3fffffffff000ULL;
339             for (l2 = 0; l2 < 512; l2++) {
340                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
341                 pde = le64_to_cpu(pde);
342                 end = (l1 << 30) + (l2 << 21);
343                 if (pde & PG_PRESENT_MASK) {
344                     if (pde & PG_PSE_MASK) {
345                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
346                                       PG_PRESENT_MASK);
347                         mem_print(mon, env, &start, &last_prot, end, prot);
348                     } else {
349                         pt_addr = pde & 0x3fffffffff000ULL;
350                         for (l3 = 0; l3 < 512; l3++) {
351                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
352                             pte = le64_to_cpu(pte);
353                             end = (l1 << 30) + (l2 << 21) + (l3 << 12);
354                             if (pte & PG_PRESENT_MASK) {
355                                 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
356                                                     PG_PRESENT_MASK);
357                             } else {
358                                 prot = 0;
359                             }
360                             mem_print(mon, env, &start, &last_prot, end, prot);
361                         }
362                     }
363                 } else {
364                     prot = 0;
365                     mem_print(mon, env, &start, &last_prot, end, prot);
366                 }
367             }
368         } else {
369             prot = 0;
370             mem_print(mon, env, &start, &last_prot, end, prot);
371         }
372     }
373     /* Flush last range */
374     mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
375 }
376 
377 
378 #ifdef TARGET_X86_64
379 static void mem_info_la48(Monitor *mon, CPUArchState *env)
380 {
381     int prot, last_prot;
382     uint64_t l1, l2, l3, l4;
383     uint64_t pml4e, pdpe, pde, pte;
384     uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
385 
386     pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
387     last_prot = 0;
388     start = -1;
389     for (l1 = 0; l1 < 512; l1++) {
390         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
391         pml4e = le64_to_cpu(pml4e);
392         end = l1 << 39;
393         if (pml4e & PG_PRESENT_MASK) {
394             pdp_addr = pml4e & 0x3fffffffff000ULL;
395             for (l2 = 0; l2 < 512; l2++) {
396                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
397                 pdpe = le64_to_cpu(pdpe);
398                 end = (l1 << 39) + (l2 << 30);
399                 if (pdpe & PG_PRESENT_MASK) {
400                     if (pdpe & PG_PSE_MASK) {
401                         prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
402                                        PG_PRESENT_MASK);
403                         prot &= pml4e;
404                         mem_print(mon, env, &start, &last_prot, end, prot);
405                     } else {
406                         pd_addr = pdpe & 0x3fffffffff000ULL;
407                         for (l3 = 0; l3 < 512; l3++) {
408                             cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
409                             pde = le64_to_cpu(pde);
410                             end = (l1 << 39) + (l2 << 30) + (l3 << 21);
411                             if (pde & PG_PRESENT_MASK) {
412                                 if (pde & PG_PSE_MASK) {
413                                     prot = pde & (PG_USER_MASK | PG_RW_MASK |
414                                                   PG_PRESENT_MASK);
415                                     prot &= pml4e & pdpe;
416                                     mem_print(mon, env, &start,
417                                               &last_prot, end, prot);
418                                 } else {
419                                     pt_addr = pde & 0x3fffffffff000ULL;
420                                     for (l4 = 0; l4 < 512; l4++) {
421                                         cpu_physical_memory_read(pt_addr
422                                                                  + l4 * 8,
423                                                                  &pte, 8);
424                                         pte = le64_to_cpu(pte);
425                                         end = (l1 << 39) + (l2 << 30) +
426                                             (l3 << 21) + (l4 << 12);
427                                         if (pte & PG_PRESENT_MASK) {
428                                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
429                                                           PG_PRESENT_MASK);
430                                             prot &= pml4e & pdpe & pde;
431                                         } else {
432                                             prot = 0;
433                                         }
434                                         mem_print(mon, env, &start,
435                                                   &last_prot, end, prot);
436                                     }
437                                 }
438                             } else {
439                                 prot = 0;
440                                 mem_print(mon, env, &start,
441                                           &last_prot, end, prot);
442                             }
443                         }
444                     }
445                 } else {
446                     prot = 0;
447                     mem_print(mon, env, &start, &last_prot, end, prot);
448                 }
449             }
450         } else {
451             prot = 0;
452             mem_print(mon, env, &start, &last_prot, end, prot);
453         }
454     }
455     /* Flush last range */
456     mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
457 }
458 
459 static void mem_info_la57(Monitor *mon, CPUArchState *env)
460 {
461     int prot, last_prot;
462     uint64_t l0, l1, l2, l3, l4;
463     uint64_t pml5e, pml4e, pdpe, pde, pte;
464     uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
465 
466     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
467     last_prot = 0;
468     start = -1;
469     for (l0 = 0; l0 < 512; l0++) {
470         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
471         pml5e = le64_to_cpu(pml5e);
472         end = l0 << 48;
473         if (!(pml5e & PG_PRESENT_MASK)) {
474             prot = 0;
475             mem_print(mon, env, &start, &last_prot, end, prot);
476             continue;
477         }
478 
479         pml4_addr = pml5e & 0x3fffffffff000ULL;
480         for (l1 = 0; l1 < 512; l1++) {
481             cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
482             pml4e = le64_to_cpu(pml4e);
483             end = (l0 << 48) + (l1 << 39);
484             if (!(pml4e & PG_PRESENT_MASK)) {
485                 prot = 0;
486                 mem_print(mon, env, &start, &last_prot, end, prot);
487                 continue;
488             }
489 
490             pdp_addr = pml4e & 0x3fffffffff000ULL;
491             for (l2 = 0; l2 < 512; l2++) {
492                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
493                 pdpe = le64_to_cpu(pdpe);
494                 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
495                 if (pdpe & PG_PRESENT_MASK) {
496                     prot = 0;
497                     mem_print(mon, env, &start, &last_prot, end, prot);
498                     continue;
499                 }
500 
501                 if (pdpe & PG_PSE_MASK) {
502                     prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
503                             PG_PRESENT_MASK);
504                     prot &= pml5e & pml4e;
505                     mem_print(mon, env, &start, &last_prot, end, prot);
506                     continue;
507                 }
508 
509                 pd_addr = pdpe & 0x3fffffffff000ULL;
510                 for (l3 = 0; l3 < 512; l3++) {
511                     cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
512                     pde = le64_to_cpu(pde);
513                     end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
514                     if (pde & PG_PRESENT_MASK) {
515                         prot = 0;
516                         mem_print(mon, env, &start, &last_prot, end, prot);
517                         continue;
518                     }
519 
520                     if (pde & PG_PSE_MASK) {
521                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
522                                 PG_PRESENT_MASK);
523                         prot &= pml5e & pml4e & pdpe;
524                         mem_print(mon, env, &start, &last_prot, end, prot);
525                         continue;
526                     }
527 
528                     pt_addr = pde & 0x3fffffffff000ULL;
529                     for (l4 = 0; l4 < 512; l4++) {
530                         cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
531                         pte = le64_to_cpu(pte);
532                         end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
533                             (l3 << 21) + (l4 << 12);
534                         if (pte & PG_PRESENT_MASK) {
535                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
536                                     PG_PRESENT_MASK);
537                             prot &= pml5e & pml4e & pdpe & pde;
538                         } else {
539                             prot = 0;
540                         }
541                         mem_print(mon, env, &start, &last_prot, end, prot);
542                     }
543                 }
544             }
545         }
546     }
547     /* Flush last range */
548     mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
549 }
550 #endif /* TARGET_X86_64 */
551 
552 void hmp_info_mem(Monitor *mon, const QDict *qdict)
553 {
554     CPUArchState *env;
555 
556     env = mon_get_cpu_env(mon);
557     if (!env) {
558         monitor_printf(mon, "No CPU available\n");
559         return;
560     }
561 
562     if (!(env->cr[0] & CR0_PG_MASK)) {
563         monitor_printf(mon, "PG disabled\n");
564         return;
565     }
566     if (env->cr[4] & CR4_PAE_MASK) {
567 #ifdef TARGET_X86_64
568         if (env->hflags & HF_LMA_MASK) {
569             if (env->cr[4] & CR4_LA57_MASK) {
570                 mem_info_la57(mon, env);
571             } else {
572                 mem_info_la48(mon, env);
573             }
574         } else
575 #endif
576         {
577             mem_info_pae32(mon, env);
578         }
579     } else {
580         mem_info_32(mon, env);
581     }
582 }
583 
584 void hmp_mce(Monitor *mon, const QDict *qdict)
585 {
586     X86CPU *cpu;
587     CPUState *cs;
588     int cpu_index = qdict_get_int(qdict, "cpu_index");
589     int bank = qdict_get_int(qdict, "bank");
590     uint64_t status = qdict_get_int(qdict, "status");
591     uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
592     uint64_t addr = qdict_get_int(qdict, "addr");
593     uint64_t misc = qdict_get_int(qdict, "misc");
594     int flags = MCE_INJECT_UNCOND_AO;
595 
596     if (qdict_get_try_bool(qdict, "broadcast", false)) {
597         flags |= MCE_INJECT_BROADCAST;
598     }
599     cs = qemu_get_cpu(cpu_index);
600     if (cs != NULL) {
601         cpu = X86_CPU(cs);
602         cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
603                            flags);
604     }
605 }
606 
607 static target_long monitor_get_pc(Monitor *mon, const struct MonitorDef *md,
608                                   int val)
609 {
610     CPUArchState *env = mon_get_cpu_env(mon);
611     return env->eip + env->segs[R_CS].base;
612 }
613 
614 const MonitorDef monitor_defs[] = {
615 #define SEG(name, seg) \
616     { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
617     { name ".base", offsetof(CPUX86State, segs[seg].base) },\
618     { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
619 
620     { "eax", offsetof(CPUX86State, regs[0]) },
621     { "ecx", offsetof(CPUX86State, regs[1]) },
622     { "edx", offsetof(CPUX86State, regs[2]) },
623     { "ebx", offsetof(CPUX86State, regs[3]) },
624     { "esp|sp", offsetof(CPUX86State, regs[4]) },
625     { "ebp|fp", offsetof(CPUX86State, regs[5]) },
626     { "esi", offsetof(CPUX86State, regs[6]) },
627     { "edi", offsetof(CPUX86State, regs[7]) },
628 #ifdef TARGET_X86_64
629     { "r8", offsetof(CPUX86State, regs[8]) },
630     { "r9", offsetof(CPUX86State, regs[9]) },
631     { "r10", offsetof(CPUX86State, regs[10]) },
632     { "r11", offsetof(CPUX86State, regs[11]) },
633     { "r12", offsetof(CPUX86State, regs[12]) },
634     { "r13", offsetof(CPUX86State, regs[13]) },
635     { "r14", offsetof(CPUX86State, regs[14]) },
636     { "r15", offsetof(CPUX86State, regs[15]) },
637 #endif
638     { "eflags", offsetof(CPUX86State, eflags) },
639     { "eip", offsetof(CPUX86State, eip) },
640     SEG("cs", R_CS)
641     SEG("ds", R_DS)
642     SEG("es", R_ES)
643     SEG("ss", R_SS)
644     SEG("fs", R_FS)
645     SEG("gs", R_GS)
646     { "pc", 0, monitor_get_pc, },
647     { NULL },
648 };
649 
650 const MonitorDef *target_monitor_defs(void)
651 {
652     return monitor_defs;
653 }
654 
655 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
656 {
657     CPUState *cs;
658 
659     if (qdict_haskey(qdict, "apic-id")) {
660         int id = qdict_get_try_int(qdict, "apic-id", 0);
661         cs = cpu_by_arch_id(id);
662     } else {
663         cs = mon_get_cpu(mon);
664     }
665 
666 
667     if (!cs) {
668         monitor_printf(mon, "No CPU available\n");
669         return;
670     }
671     x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU);
672 }
673 
674 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
675 {
676     monitor_printf(mon, "This command is obsolete and will be "
677                    "removed soon. Please use 'info pic' instead.\n");
678 }
679 
680 SevInfo *qmp_query_sev(Error **errp)
681 {
682     SevInfo *info;
683 
684     info = sev_get_info();
685     if (!info) {
686         error_setg(errp, "SEV feature is not available");
687         return NULL;
688     }
689 
690     return info;
691 }
692 
693 void hmp_info_sev(Monitor *mon, const QDict *qdict)
694 {
695     SevInfo *info = sev_get_info();
696 
697     if (info && info->enabled) {
698         monitor_printf(mon, "handle: %d\n", info->handle);
699         monitor_printf(mon, "state: %s\n", SevState_str(info->state));
700         monitor_printf(mon, "build: %d\n", info->build_id);
701         monitor_printf(mon, "api version: %d.%d\n",
702                        info->api_major, info->api_minor);
703         monitor_printf(mon, "debug: %s\n",
704                        info->policy & SEV_POLICY_NODBG ? "off" : "on");
705         monitor_printf(mon, "key-sharing: %s\n",
706                        info->policy & SEV_POLICY_NOKS ? "off" : "on");
707     } else {
708         monitor_printf(mon, "SEV is not enabled\n");
709     }
710 
711     qapi_free_SevInfo(info);
712 }
713 
714 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
715 {
716     char *data;
717     SevLaunchMeasureInfo *info;
718 
719     data = sev_get_launch_measurement();
720     if (!data) {
721         error_setg(errp, "SEV launch measurement is not available");
722         return NULL;
723     }
724 
725     info = g_malloc0(sizeof(*info));
726     info->data = data;
727 
728     return info;
729 }
730 
731 SevCapability *qmp_query_sev_capabilities(Error **errp)
732 {
733     return sev_get_capabilities(errp);
734 }
735 
736 #define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294"
737 struct sev_secret_area {
738     uint32_t base;
739     uint32_t size;
740 };
741 
742 void qmp_sev_inject_launch_secret(const char *packet_hdr,
743                                   const char *secret,
744                                   bool has_gpa, uint64_t gpa,
745                                   Error **errp)
746 {
747     if (!sev_enabled()) {
748         error_setg(errp, "SEV not enabled for guest");
749         return;
750     }
751     if (!has_gpa) {
752         uint8_t *data;
753         struct sev_secret_area *area;
754 
755         if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) {
756             error_setg(errp, "SEV: no secret area found in OVMF,"
757                        " gpa must be specified.");
758             return;
759         }
760         area = (struct sev_secret_area *)data;
761         gpa = area->base;
762     }
763 
764     sev_inject_launch_secret(packet_hdr, secret, gpa, errp);
765 }
766 
767 SevAttestationReport *
768 qmp_query_sev_attestation_report(const char *mnonce, Error **errp)
769 {
770     return sev_get_attestation_report(mnonce, errp);
771 }
772 
773 SGXInfo *qmp_query_sgx(Error **errp)
774 {
775     return sgx_get_info(errp);
776 }
777 
778 void hmp_info_sgx(Monitor *mon, const QDict *qdict)
779 {
780     Error *err = NULL;
781     g_autoptr(SGXInfo) info = qmp_query_sgx(&err);
782 
783     if (err) {
784         error_report_err(err);
785         return;
786     }
787     monitor_printf(mon, "SGX support: %s\n",
788                    info->sgx ? "enabled" : "disabled");
789     monitor_printf(mon, "SGX1 support: %s\n",
790                    info->sgx1 ? "enabled" : "disabled");
791     monitor_printf(mon, "SGX2 support: %s\n",
792                    info->sgx2 ? "enabled" : "disabled");
793     monitor_printf(mon, "FLC support: %s\n",
794                    info->flc ? "enabled" : "disabled");
795     monitor_printf(mon, "size: %" PRIu64 "\n",
796                    info->section_size);
797 }
798 
799 SGXInfo *qmp_query_sgx_capabilities(Error **errp)
800 {
801     return sgx_get_capabilities(errp);
802 }
803