xref: /openbmc/qemu/target/i386/monitor.c (revision 9d8ad11429fed6c54dcc7e0018dcb494927e3440)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "cpu.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "qapi/qmp/qdict.h"
30 #include "hw/i386/pc.h"
31 #include "sysemu/kvm.h"
32 #include "hmp.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-commands-misc.h"
35 
36 
37 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
38                       hwaddr pte, hwaddr mask)
39 {
40 #ifdef TARGET_X86_64
41     if (env->cr[4] & CR4_LA57_MASK) {
42         if (addr & (1ULL << 56)) {
43             addr |= -1LL << 57;
44         }
45     } else {
46         if (addr & (1ULL << 47)) {
47             addr |= -1LL << 48;
48         }
49     }
50 #endif
51     monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
52                    " %c%c%c%c%c%c%c%c%c\n",
53                    addr,
54                    pte & mask,
55                    pte & PG_NX_MASK ? 'X' : '-',
56                    pte & PG_GLOBAL_MASK ? 'G' : '-',
57                    pte & PG_PSE_MASK ? 'P' : '-',
58                    pte & PG_DIRTY_MASK ? 'D' : '-',
59                    pte & PG_ACCESSED_MASK ? 'A' : '-',
60                    pte & PG_PCD_MASK ? 'C' : '-',
61                    pte & PG_PWT_MASK ? 'T' : '-',
62                    pte & PG_USER_MASK ? 'U' : '-',
63                    pte & PG_RW_MASK ? 'W' : '-');
64 }
65 
66 static void tlb_info_32(Monitor *mon, CPUArchState *env)
67 {
68     unsigned int l1, l2;
69     uint32_t pgd, pde, pte;
70 
71     pgd = env->cr[3] & ~0xfff;
72     for(l1 = 0; l1 < 1024; l1++) {
73         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
74         pde = le32_to_cpu(pde);
75         if (pde & PG_PRESENT_MASK) {
76             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
77                 /* 4M pages */
78                 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
79             } else {
80                 for(l2 = 0; l2 < 1024; l2++) {
81                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
82                     pte = le32_to_cpu(pte);
83                     if (pte & PG_PRESENT_MASK) {
84                         print_pte(mon, env, (l1 << 22) + (l2 << 12),
85                                   pte & ~PG_PSE_MASK,
86                                   ~0xfff);
87                     }
88                 }
89             }
90         }
91     }
92 }
93 
94 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
95 {
96     unsigned int l1, l2, l3;
97     uint64_t pdpe, pde, pte;
98     uint64_t pdp_addr, pd_addr, pt_addr;
99 
100     pdp_addr = env->cr[3] & ~0x1f;
101     for (l1 = 0; l1 < 4; l1++) {
102         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
103         pdpe = le64_to_cpu(pdpe);
104         if (pdpe & PG_PRESENT_MASK) {
105             pd_addr = pdpe & 0x3fffffffff000ULL;
106             for (l2 = 0; l2 < 512; l2++) {
107                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
108                 pde = le64_to_cpu(pde);
109                 if (pde & PG_PRESENT_MASK) {
110                     if (pde & PG_PSE_MASK) {
111                         /* 2M pages with PAE, CR4.PSE is ignored */
112                         print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
113                                   ~((hwaddr)(1 << 20) - 1));
114                     } else {
115                         pt_addr = pde & 0x3fffffffff000ULL;
116                         for (l3 = 0; l3 < 512; l3++) {
117                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
118                             pte = le64_to_cpu(pte);
119                             if (pte & PG_PRESENT_MASK) {
120                                 print_pte(mon, env, (l1 << 30) + (l2 << 21)
121                                           + (l3 << 12),
122                                           pte & ~PG_PSE_MASK,
123                                           ~(hwaddr)0xfff);
124                             }
125                         }
126                     }
127                 }
128             }
129         }
130     }
131 }
132 
133 #ifdef TARGET_X86_64
134 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
135         uint64_t l0, uint64_t pml4_addr)
136 {
137     uint64_t l1, l2, l3, l4;
138     uint64_t pml4e, pdpe, pde, pte;
139     uint64_t pdp_addr, pd_addr, pt_addr;
140 
141     for (l1 = 0; l1 < 512; l1++) {
142         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
143         pml4e = le64_to_cpu(pml4e);
144         if (!(pml4e & PG_PRESENT_MASK)) {
145             continue;
146         }
147 
148         pdp_addr = pml4e & 0x3fffffffff000ULL;
149         for (l2 = 0; l2 < 512; l2++) {
150             cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
151             pdpe = le64_to_cpu(pdpe);
152             if (!(pdpe & PG_PRESENT_MASK)) {
153                 continue;
154             }
155 
156             if (pdpe & PG_PSE_MASK) {
157                 /* 1G pages, CR4.PSE is ignored */
158                 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
159                         pdpe, 0x3ffffc0000000ULL);
160                 continue;
161             }
162 
163             pd_addr = pdpe & 0x3fffffffff000ULL;
164             for (l3 = 0; l3 < 512; l3++) {
165                 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
166                 pde = le64_to_cpu(pde);
167                 if (!(pde & PG_PRESENT_MASK)) {
168                     continue;
169                 }
170 
171                 if (pde & PG_PSE_MASK) {
172                     /* 2M pages, CR4.PSE is ignored */
173                     print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
174                             (l3 << 21), pde, 0x3ffffffe00000ULL);
175                     continue;
176                 }
177 
178                 pt_addr = pde & 0x3fffffffff000ULL;
179                 for (l4 = 0; l4 < 512; l4++) {
180                     cpu_physical_memory_read(pt_addr
181                             + l4 * 8,
182                             &pte, 8);
183                     pte = le64_to_cpu(pte);
184                     if (pte & PG_PRESENT_MASK) {
185                         print_pte(mon, env, (l0 << 48) + (l1 << 39) +
186                                 (l2 << 30) + (l3 << 21) + (l4 << 12),
187                                 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
188                     }
189                 }
190             }
191         }
192     }
193 }
194 
195 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
196 {
197     uint64_t l0;
198     uint64_t pml5e;
199     uint64_t pml5_addr;
200 
201     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
202     for (l0 = 0; l0 < 512; l0++) {
203         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
204         pml5e = le64_to_cpu(pml5e);
205         if (pml5e & PG_PRESENT_MASK) {
206             tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
207         }
208     }
209 }
210 #endif /* TARGET_X86_64 */
211 
212 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
213 {
214     CPUArchState *env;
215 
216     env = mon_get_cpu_env();
217     if (!env) {
218         monitor_printf(mon, "No CPU available\n");
219         return;
220     }
221 
222     if (!(env->cr[0] & CR0_PG_MASK)) {
223         monitor_printf(mon, "PG disabled\n");
224         return;
225     }
226     if (env->cr[4] & CR4_PAE_MASK) {
227 #ifdef TARGET_X86_64
228         if (env->hflags & HF_LMA_MASK) {
229             if (env->cr[4] & CR4_LA57_MASK) {
230                 tlb_info_la57(mon, env);
231             } else {
232                 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
233             }
234         } else
235 #endif
236         {
237             tlb_info_pae32(mon, env);
238         }
239     } else {
240         tlb_info_32(mon, env);
241     }
242 }
243 
244 static void mem_print(Monitor *mon, hwaddr *pstart,
245                       int *plast_prot,
246                       hwaddr end, int prot)
247 {
248     int prot1;
249     prot1 = *plast_prot;
250     if (prot != prot1) {
251         if (*pstart != -1) {
252             monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
253                            TARGET_FMT_plx " %c%c%c\n",
254                            *pstart, end, end - *pstart,
255                            prot1 & PG_USER_MASK ? 'u' : '-',
256                            'r',
257                            prot1 & PG_RW_MASK ? 'w' : '-');
258         }
259         if (prot != 0)
260             *pstart = end;
261         else
262             *pstart = -1;
263         *plast_prot = prot;
264     }
265 }
266 
267 static void mem_info_32(Monitor *mon, CPUArchState *env)
268 {
269     unsigned int l1, l2;
270     int prot, last_prot;
271     uint32_t pgd, pde, pte;
272     hwaddr start, end;
273 
274     pgd = env->cr[3] & ~0xfff;
275     last_prot = 0;
276     start = -1;
277     for(l1 = 0; l1 < 1024; l1++) {
278         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
279         pde = le32_to_cpu(pde);
280         end = l1 << 22;
281         if (pde & PG_PRESENT_MASK) {
282             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
283                 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
284                 mem_print(mon, &start, &last_prot, end, prot);
285             } else {
286                 for(l2 = 0; l2 < 1024; l2++) {
287                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
288                     pte = le32_to_cpu(pte);
289                     end = (l1 << 22) + (l2 << 12);
290                     if (pte & PG_PRESENT_MASK) {
291                         prot = pte & pde &
292                             (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
293                     } else {
294                         prot = 0;
295                     }
296                     mem_print(mon, &start, &last_prot, end, prot);
297                 }
298             }
299         } else {
300             prot = 0;
301             mem_print(mon, &start, &last_prot, end, prot);
302         }
303     }
304     /* Flush last range */
305     mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
306 }
307 
308 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
309 {
310     unsigned int l1, l2, l3;
311     int prot, last_prot;
312     uint64_t pdpe, pde, pte;
313     uint64_t pdp_addr, pd_addr, pt_addr;
314     hwaddr start, end;
315 
316     pdp_addr = env->cr[3] & ~0x1f;
317     last_prot = 0;
318     start = -1;
319     for (l1 = 0; l1 < 4; l1++) {
320         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
321         pdpe = le64_to_cpu(pdpe);
322         end = l1 << 30;
323         if (pdpe & PG_PRESENT_MASK) {
324             pd_addr = pdpe & 0x3fffffffff000ULL;
325             for (l2 = 0; l2 < 512; l2++) {
326                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
327                 pde = le64_to_cpu(pde);
328                 end = (l1 << 30) + (l2 << 21);
329                 if (pde & PG_PRESENT_MASK) {
330                     if (pde & PG_PSE_MASK) {
331                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
332                                       PG_PRESENT_MASK);
333                         mem_print(mon, &start, &last_prot, end, prot);
334                     } else {
335                         pt_addr = pde & 0x3fffffffff000ULL;
336                         for (l3 = 0; l3 < 512; l3++) {
337                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
338                             pte = le64_to_cpu(pte);
339                             end = (l1 << 30) + (l2 << 21) + (l3 << 12);
340                             if (pte & PG_PRESENT_MASK) {
341                                 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
342                                                     PG_PRESENT_MASK);
343                             } else {
344                                 prot = 0;
345                             }
346                             mem_print(mon, &start, &last_prot, end, prot);
347                         }
348                     }
349                 } else {
350                     prot = 0;
351                     mem_print(mon, &start, &last_prot, end, prot);
352                 }
353             }
354         } else {
355             prot = 0;
356             mem_print(mon, &start, &last_prot, end, prot);
357         }
358     }
359     /* Flush last range */
360     mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
361 }
362 
363 
364 #ifdef TARGET_X86_64
365 static void mem_info_la48(Monitor *mon, CPUArchState *env)
366 {
367     int prot, last_prot;
368     uint64_t l1, l2, l3, l4;
369     uint64_t pml4e, pdpe, pde, pte;
370     uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
371 
372     pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
373     last_prot = 0;
374     start = -1;
375     for (l1 = 0; l1 < 512; l1++) {
376         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
377         pml4e = le64_to_cpu(pml4e);
378         end = l1 << 39;
379         if (pml4e & PG_PRESENT_MASK) {
380             pdp_addr = pml4e & 0x3fffffffff000ULL;
381             for (l2 = 0; l2 < 512; l2++) {
382                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
383                 pdpe = le64_to_cpu(pdpe);
384                 end = (l1 << 39) + (l2 << 30);
385                 if (pdpe & PG_PRESENT_MASK) {
386                     if (pdpe & PG_PSE_MASK) {
387                         prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
388                                        PG_PRESENT_MASK);
389                         prot &= pml4e;
390                         mem_print(mon, &start, &last_prot, end, prot);
391                     } else {
392                         pd_addr = pdpe & 0x3fffffffff000ULL;
393                         for (l3 = 0; l3 < 512; l3++) {
394                             cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
395                             pde = le64_to_cpu(pde);
396                             end = (l1 << 39) + (l2 << 30) + (l3 << 21);
397                             if (pde & PG_PRESENT_MASK) {
398                                 if (pde & PG_PSE_MASK) {
399                                     prot = pde & (PG_USER_MASK | PG_RW_MASK |
400                                                   PG_PRESENT_MASK);
401                                     prot &= pml4e & pdpe;
402                                     mem_print(mon, &start, &last_prot, end, prot);
403                                 } else {
404                                     pt_addr = pde & 0x3fffffffff000ULL;
405                                     for (l4 = 0; l4 < 512; l4++) {
406                                         cpu_physical_memory_read(pt_addr
407                                                                  + l4 * 8,
408                                                                  &pte, 8);
409                                         pte = le64_to_cpu(pte);
410                                         end = (l1 << 39) + (l2 << 30) +
411                                             (l3 << 21) + (l4 << 12);
412                                         if (pte & PG_PRESENT_MASK) {
413                                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
414                                                           PG_PRESENT_MASK);
415                                             prot &= pml4e & pdpe & pde;
416                                         } else {
417                                             prot = 0;
418                                         }
419                                         mem_print(mon, &start, &last_prot, end, prot);
420                                     }
421                                 }
422                             } else {
423                                 prot = 0;
424                                 mem_print(mon, &start, &last_prot, end, prot);
425                             }
426                         }
427                     }
428                 } else {
429                     prot = 0;
430                     mem_print(mon, &start, &last_prot, end, prot);
431                 }
432             }
433         } else {
434             prot = 0;
435             mem_print(mon, &start, &last_prot, end, prot);
436         }
437     }
438     /* Flush last range */
439     mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
440 }
441 
442 static void mem_info_la57(Monitor *mon, CPUArchState *env)
443 {
444     int prot, last_prot;
445     uint64_t l0, l1, l2, l3, l4;
446     uint64_t pml5e, pml4e, pdpe, pde, pte;
447     uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
448 
449     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
450     last_prot = 0;
451     start = -1;
452     for (l0 = 0; l0 < 512; l0++) {
453         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
454         pml5e = le64_to_cpu(pml5e);
455         end = l0 << 48;
456         if (!(pml5e & PG_PRESENT_MASK)) {
457             prot = 0;
458             mem_print(mon, &start, &last_prot, end, prot);
459             continue;
460         }
461 
462         pml4_addr = pml5e & 0x3fffffffff000ULL;
463         for (l1 = 0; l1 < 512; l1++) {
464             cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
465             pml4e = le64_to_cpu(pml4e);
466             end = (l0 << 48) + (l1 << 39);
467             if (!(pml4e & PG_PRESENT_MASK)) {
468                 prot = 0;
469                 mem_print(mon, &start, &last_prot, end, prot);
470                 continue;
471             }
472 
473             pdp_addr = pml4e & 0x3fffffffff000ULL;
474             for (l2 = 0; l2 < 512; l2++) {
475                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
476                 pdpe = le64_to_cpu(pdpe);
477                 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
478                 if (pdpe & PG_PRESENT_MASK) {
479                     prot = 0;
480                     mem_print(mon, &start, &last_prot, end, prot);
481                     continue;
482                 }
483 
484                 if (pdpe & PG_PSE_MASK) {
485                     prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
486                             PG_PRESENT_MASK);
487                     prot &= pml5e & pml4e;
488                     mem_print(mon, &start, &last_prot, end, prot);
489                     continue;
490                 }
491 
492                 pd_addr = pdpe & 0x3fffffffff000ULL;
493                 for (l3 = 0; l3 < 512; l3++) {
494                     cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
495                     pde = le64_to_cpu(pde);
496                     end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
497                     if (pde & PG_PRESENT_MASK) {
498                         prot = 0;
499                         mem_print(mon, &start, &last_prot, end, prot);
500                         continue;
501                     }
502 
503                     if (pde & PG_PSE_MASK) {
504                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
505                                 PG_PRESENT_MASK);
506                         prot &= pml5e & pml4e & pdpe;
507                         mem_print(mon, &start, &last_prot, end, prot);
508                         continue;
509                     }
510 
511                     pt_addr = pde & 0x3fffffffff000ULL;
512                     for (l4 = 0; l4 < 512; l4++) {
513                         cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
514                         pte = le64_to_cpu(pte);
515                         end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
516                             (l3 << 21) + (l4 << 12);
517                         if (pte & PG_PRESENT_MASK) {
518                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
519                                     PG_PRESENT_MASK);
520                             prot &= pml5e & pml4e & pdpe & pde;
521                         } else {
522                             prot = 0;
523                         }
524                         mem_print(mon, &start, &last_prot, end, prot);
525                     }
526                 }
527             }
528         }
529     }
530     /* Flush last range */
531     mem_print(mon, &start, &last_prot, (hwaddr)1 << 57, 0);
532 }
533 #endif /* TARGET_X86_64 */
534 
535 void hmp_info_mem(Monitor *mon, const QDict *qdict)
536 {
537     CPUArchState *env;
538 
539     env = mon_get_cpu_env();
540     if (!env) {
541         monitor_printf(mon, "No CPU available\n");
542         return;
543     }
544 
545     if (!(env->cr[0] & CR0_PG_MASK)) {
546         monitor_printf(mon, "PG disabled\n");
547         return;
548     }
549     if (env->cr[4] & CR4_PAE_MASK) {
550 #ifdef TARGET_X86_64
551         if (env->hflags & HF_LMA_MASK) {
552             if (env->cr[4] & CR4_LA57_MASK) {
553                 mem_info_la57(mon, env);
554             } else {
555                 mem_info_la48(mon, env);
556             }
557         } else
558 #endif
559         {
560             mem_info_pae32(mon, env);
561         }
562     } else {
563         mem_info_32(mon, env);
564     }
565 }
566 
567 void hmp_mce(Monitor *mon, const QDict *qdict)
568 {
569     X86CPU *cpu;
570     CPUState *cs;
571     int cpu_index = qdict_get_int(qdict, "cpu_index");
572     int bank = qdict_get_int(qdict, "bank");
573     uint64_t status = qdict_get_int(qdict, "status");
574     uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
575     uint64_t addr = qdict_get_int(qdict, "addr");
576     uint64_t misc = qdict_get_int(qdict, "misc");
577     int flags = MCE_INJECT_UNCOND_AO;
578 
579     if (qdict_get_try_bool(qdict, "broadcast", false)) {
580         flags |= MCE_INJECT_BROADCAST;
581     }
582     cs = qemu_get_cpu(cpu_index);
583     if (cs != NULL) {
584         cpu = X86_CPU(cs);
585         cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
586                            flags);
587     }
588 }
589 
590 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
591 {
592     CPUArchState *env = mon_get_cpu_env();
593     return env->eip + env->segs[R_CS].base;
594 }
595 
596 const MonitorDef monitor_defs[] = {
597 #define SEG(name, seg) \
598     { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
599     { name ".base", offsetof(CPUX86State, segs[seg].base) },\
600     { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
601 
602     { "eax", offsetof(CPUX86State, regs[0]) },
603     { "ecx", offsetof(CPUX86State, regs[1]) },
604     { "edx", offsetof(CPUX86State, regs[2]) },
605     { "ebx", offsetof(CPUX86State, regs[3]) },
606     { "esp|sp", offsetof(CPUX86State, regs[4]) },
607     { "ebp|fp", offsetof(CPUX86State, regs[5]) },
608     { "esi", offsetof(CPUX86State, regs[6]) },
609     { "edi", offsetof(CPUX86State, regs[7]) },
610 #ifdef TARGET_X86_64
611     { "r8", offsetof(CPUX86State, regs[8]) },
612     { "r9", offsetof(CPUX86State, regs[9]) },
613     { "r10", offsetof(CPUX86State, regs[10]) },
614     { "r11", offsetof(CPUX86State, regs[11]) },
615     { "r12", offsetof(CPUX86State, regs[12]) },
616     { "r13", offsetof(CPUX86State, regs[13]) },
617     { "r14", offsetof(CPUX86State, regs[14]) },
618     { "r15", offsetof(CPUX86State, regs[15]) },
619 #endif
620     { "eflags", offsetof(CPUX86State, eflags) },
621     { "eip", offsetof(CPUX86State, eip) },
622     SEG("cs", R_CS)
623     SEG("ds", R_DS)
624     SEG("es", R_ES)
625     SEG("ss", R_SS)
626     SEG("fs", R_FS)
627     SEG("gs", R_GS)
628     { "pc", 0, monitor_get_pc, },
629     { NULL },
630 };
631 
632 const MonitorDef *target_monitor_defs(void)
633 {
634     return monitor_defs;
635 }
636 
637 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
638 {
639     CPUState *cs;
640 
641     if (qdict_haskey(qdict, "apic-id")) {
642         int id = qdict_get_try_int(qdict, "apic-id", 0);
643         cs = cpu_by_arch_id(id);
644     } else {
645         cs = mon_get_cpu();
646     }
647 
648 
649     if (!cs) {
650         monitor_printf(mon, "No CPU available\n");
651         return;
652     }
653     x86_cpu_dump_local_apic_state(cs, (FILE *)mon, monitor_fprintf,
654                                   CPU_DUMP_FPU);
655 }
656 
657 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
658 {
659     if (kvm_irqchip_in_kernel() &&
660         !kvm_irqchip_is_split()) {
661         kvm_ioapic_dump_state(mon, qdict);
662     } else {
663         ioapic_dump_state(mon, qdict);
664     }
665 }
666 
667 SevInfo *qmp_query_sev(Error **errp)
668 {
669     error_setg(errp, "SEV feature is not available");
670     return NULL;
671 }
672