xref: /openbmc/qemu/target/i386/monitor.c (revision 4c44a007)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "qemu/osdep.h"
25 #include "cpu.h"
26 #include "monitor/monitor.h"
27 #include "monitor/hmp-target.h"
28 #include "hw/i386/pc.h"
29 #include "sysemu/kvm.h"
30 #include "hmp.h"
31 
32 
33 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
34                       hwaddr pte, hwaddr mask)
35 {
36 #ifdef TARGET_X86_64
37     if (env->cr[4] & CR4_LA57_MASK) {
38         if (addr & (1ULL << 56)) {
39             addr |= -1LL << 57;
40         }
41     } else {
42         if (addr & (1ULL << 47)) {
43             addr |= -1LL << 48;
44         }
45     }
46 #endif
47     monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
48                    " %c%c%c%c%c%c%c%c%c\n",
49                    addr,
50                    pte & mask,
51                    pte & PG_NX_MASK ? 'X' : '-',
52                    pte & PG_GLOBAL_MASK ? 'G' : '-',
53                    pte & PG_PSE_MASK ? 'P' : '-',
54                    pte & PG_DIRTY_MASK ? 'D' : '-',
55                    pte & PG_ACCESSED_MASK ? 'A' : '-',
56                    pte & PG_PCD_MASK ? 'C' : '-',
57                    pte & PG_PWT_MASK ? 'T' : '-',
58                    pte & PG_USER_MASK ? 'U' : '-',
59                    pte & PG_RW_MASK ? 'W' : '-');
60 }
61 
62 static void tlb_info_32(Monitor *mon, CPUArchState *env)
63 {
64     unsigned int l1, l2;
65     uint32_t pgd, pde, pte;
66 
67     pgd = env->cr[3] & ~0xfff;
68     for(l1 = 0; l1 < 1024; l1++) {
69         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
70         pde = le32_to_cpu(pde);
71         if (pde & PG_PRESENT_MASK) {
72             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
73                 /* 4M pages */
74                 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
75             } else {
76                 for(l2 = 0; l2 < 1024; l2++) {
77                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
78                     pte = le32_to_cpu(pte);
79                     if (pte & PG_PRESENT_MASK) {
80                         print_pte(mon, env, (l1 << 22) + (l2 << 12),
81                                   pte & ~PG_PSE_MASK,
82                                   ~0xfff);
83                     }
84                 }
85             }
86         }
87     }
88 }
89 
90 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
91 {
92     unsigned int l1, l2, l3;
93     uint64_t pdpe, pde, pte;
94     uint64_t pdp_addr, pd_addr, pt_addr;
95 
96     pdp_addr = env->cr[3] & ~0x1f;
97     for (l1 = 0; l1 < 4; l1++) {
98         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
99         pdpe = le64_to_cpu(pdpe);
100         if (pdpe & PG_PRESENT_MASK) {
101             pd_addr = pdpe & 0x3fffffffff000ULL;
102             for (l2 = 0; l2 < 512; l2++) {
103                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
104                 pde = le64_to_cpu(pde);
105                 if (pde & PG_PRESENT_MASK) {
106                     if (pde & PG_PSE_MASK) {
107                         /* 2M pages with PAE, CR4.PSE is ignored */
108                         print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
109                                   ~((hwaddr)(1 << 20) - 1));
110                     } else {
111                         pt_addr = pde & 0x3fffffffff000ULL;
112                         for (l3 = 0; l3 < 512; l3++) {
113                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
114                             pte = le64_to_cpu(pte);
115                             if (pte & PG_PRESENT_MASK) {
116                                 print_pte(mon, env, (l1 << 30) + (l2 << 21)
117                                           + (l3 << 12),
118                                           pte & ~PG_PSE_MASK,
119                                           ~(hwaddr)0xfff);
120                             }
121                         }
122                     }
123                 }
124             }
125         }
126     }
127 }
128 
129 #ifdef TARGET_X86_64
130 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
131         uint64_t l0, uint64_t pml4_addr)
132 {
133     uint64_t l1, l2, l3, l4;
134     uint64_t pml4e, pdpe, pde, pte;
135     uint64_t pdp_addr, pd_addr, pt_addr;
136 
137     for (l1 = 0; l1 < 512; l1++) {
138         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
139         pml4e = le64_to_cpu(pml4e);
140         if (!(pml4e & PG_PRESENT_MASK)) {
141             continue;
142         }
143 
144         pdp_addr = pml4e & 0x3fffffffff000ULL;
145         for (l2 = 0; l2 < 512; l2++) {
146             cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
147             pdpe = le64_to_cpu(pdpe);
148             if (!(pdpe & PG_PRESENT_MASK)) {
149                 continue;
150             }
151 
152             if (pdpe & PG_PSE_MASK) {
153                 /* 1G pages, CR4.PSE is ignored */
154                 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
155                         pdpe, 0x3ffffc0000000ULL);
156                 continue;
157             }
158 
159             pd_addr = pdpe & 0x3fffffffff000ULL;
160             for (l3 = 0; l3 < 512; l3++) {
161                 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
162                 pde = le64_to_cpu(pde);
163                 if (!(pde & PG_PRESENT_MASK)) {
164                     continue;
165                 }
166 
167                 if (pde & PG_PSE_MASK) {
168                     /* 2M pages, CR4.PSE is ignored */
169                     print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
170                             (l3 << 21), pde, 0x3ffffffe00000ULL);
171                     continue;
172                 }
173 
174                 pt_addr = pde & 0x3fffffffff000ULL;
175                 for (l4 = 0; l4 < 512; l4++) {
176                     cpu_physical_memory_read(pt_addr
177                             + l4 * 8,
178                             &pte, 8);
179                     pte = le64_to_cpu(pte);
180                     if (pte & PG_PRESENT_MASK) {
181                         print_pte(mon, env, (l0 << 48) + (l1 << 39) +
182                                 (l2 << 30) + (l3 << 21) + (l4 << 12),
183                                 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
184                     }
185                 }
186             }
187         }
188     }
189 }
190 
191 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
192 {
193     uint64_t l0;
194     uint64_t pml5e;
195     uint64_t pml5_addr;
196 
197     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
198     for (l0 = 0; l0 < 512; l0++) {
199         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
200         pml5e = le64_to_cpu(pml5e);
201         if (pml5e & PG_PRESENT_MASK) {
202             tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
203         }
204     }
205 }
206 #endif /* TARGET_X86_64 */
207 
208 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
209 {
210     CPUArchState *env;
211 
212     env = mon_get_cpu_env();
213     if (!env) {
214         monitor_printf(mon, "No CPU available\n");
215         return;
216     }
217 
218     if (!(env->cr[0] & CR0_PG_MASK)) {
219         monitor_printf(mon, "PG disabled\n");
220         return;
221     }
222     if (env->cr[4] & CR4_PAE_MASK) {
223 #ifdef TARGET_X86_64
224         if (env->hflags & HF_LMA_MASK) {
225             if (env->cr[4] & CR4_LA57_MASK) {
226                 tlb_info_la57(mon, env);
227             } else {
228                 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
229             }
230         } else
231 #endif
232         {
233             tlb_info_pae32(mon, env);
234         }
235     } else {
236         tlb_info_32(mon, env);
237     }
238 }
239 
240 static void mem_print(Monitor *mon, hwaddr *pstart,
241                       int *plast_prot,
242                       hwaddr end, int prot)
243 {
244     int prot1;
245     prot1 = *plast_prot;
246     if (prot != prot1) {
247         if (*pstart != -1) {
248             monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
249                            TARGET_FMT_plx " %c%c%c\n",
250                            *pstart, end, end - *pstart,
251                            prot1 & PG_USER_MASK ? 'u' : '-',
252                            'r',
253                            prot1 & PG_RW_MASK ? 'w' : '-');
254         }
255         if (prot != 0)
256             *pstart = end;
257         else
258             *pstart = -1;
259         *plast_prot = prot;
260     }
261 }
262 
263 static void mem_info_32(Monitor *mon, CPUArchState *env)
264 {
265     unsigned int l1, l2;
266     int prot, last_prot;
267     uint32_t pgd, pde, pte;
268     hwaddr start, end;
269 
270     pgd = env->cr[3] & ~0xfff;
271     last_prot = 0;
272     start = -1;
273     for(l1 = 0; l1 < 1024; l1++) {
274         cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
275         pde = le32_to_cpu(pde);
276         end = l1 << 22;
277         if (pde & PG_PRESENT_MASK) {
278             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
279                 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
280                 mem_print(mon, &start, &last_prot, end, prot);
281             } else {
282                 for(l2 = 0; l2 < 1024; l2++) {
283                     cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
284                     pte = le32_to_cpu(pte);
285                     end = (l1 << 22) + (l2 << 12);
286                     if (pte & PG_PRESENT_MASK) {
287                         prot = pte & pde &
288                             (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
289                     } else {
290                         prot = 0;
291                     }
292                     mem_print(mon, &start, &last_prot, end, prot);
293                 }
294             }
295         } else {
296             prot = 0;
297             mem_print(mon, &start, &last_prot, end, prot);
298         }
299     }
300     /* Flush last range */
301     mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
302 }
303 
304 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
305 {
306     unsigned int l1, l2, l3;
307     int prot, last_prot;
308     uint64_t pdpe, pde, pte;
309     uint64_t pdp_addr, pd_addr, pt_addr;
310     hwaddr start, end;
311 
312     pdp_addr = env->cr[3] & ~0x1f;
313     last_prot = 0;
314     start = -1;
315     for (l1 = 0; l1 < 4; l1++) {
316         cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
317         pdpe = le64_to_cpu(pdpe);
318         end = l1 << 30;
319         if (pdpe & PG_PRESENT_MASK) {
320             pd_addr = pdpe & 0x3fffffffff000ULL;
321             for (l2 = 0; l2 < 512; l2++) {
322                 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
323                 pde = le64_to_cpu(pde);
324                 end = (l1 << 30) + (l2 << 21);
325                 if (pde & PG_PRESENT_MASK) {
326                     if (pde & PG_PSE_MASK) {
327                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
328                                       PG_PRESENT_MASK);
329                         mem_print(mon, &start, &last_prot, end, prot);
330                     } else {
331                         pt_addr = pde & 0x3fffffffff000ULL;
332                         for (l3 = 0; l3 < 512; l3++) {
333                             cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
334                             pte = le64_to_cpu(pte);
335                             end = (l1 << 30) + (l2 << 21) + (l3 << 12);
336                             if (pte & PG_PRESENT_MASK) {
337                                 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
338                                                     PG_PRESENT_MASK);
339                             } else {
340                                 prot = 0;
341                             }
342                             mem_print(mon, &start, &last_prot, end, prot);
343                         }
344                     }
345                 } else {
346                     prot = 0;
347                     mem_print(mon, &start, &last_prot, end, prot);
348                 }
349             }
350         } else {
351             prot = 0;
352             mem_print(mon, &start, &last_prot, end, prot);
353         }
354     }
355     /* Flush last range */
356     mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
357 }
358 
359 
360 #ifdef TARGET_X86_64
361 static void mem_info_la48(Monitor *mon, CPUArchState *env)
362 {
363     int prot, last_prot;
364     uint64_t l1, l2, l3, l4;
365     uint64_t pml4e, pdpe, pde, pte;
366     uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
367 
368     pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
369     last_prot = 0;
370     start = -1;
371     for (l1 = 0; l1 < 512; l1++) {
372         cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
373         pml4e = le64_to_cpu(pml4e);
374         end = l1 << 39;
375         if (pml4e & PG_PRESENT_MASK) {
376             pdp_addr = pml4e & 0x3fffffffff000ULL;
377             for (l2 = 0; l2 < 512; l2++) {
378                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
379                 pdpe = le64_to_cpu(pdpe);
380                 end = (l1 << 39) + (l2 << 30);
381                 if (pdpe & PG_PRESENT_MASK) {
382                     if (pdpe & PG_PSE_MASK) {
383                         prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
384                                        PG_PRESENT_MASK);
385                         prot &= pml4e;
386                         mem_print(mon, &start, &last_prot, end, prot);
387                     } else {
388                         pd_addr = pdpe & 0x3fffffffff000ULL;
389                         for (l3 = 0; l3 < 512; l3++) {
390                             cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
391                             pde = le64_to_cpu(pde);
392                             end = (l1 << 39) + (l2 << 30) + (l3 << 21);
393                             if (pde & PG_PRESENT_MASK) {
394                                 if (pde & PG_PSE_MASK) {
395                                     prot = pde & (PG_USER_MASK | PG_RW_MASK |
396                                                   PG_PRESENT_MASK);
397                                     prot &= pml4e & pdpe;
398                                     mem_print(mon, &start, &last_prot, end, prot);
399                                 } else {
400                                     pt_addr = pde & 0x3fffffffff000ULL;
401                                     for (l4 = 0; l4 < 512; l4++) {
402                                         cpu_physical_memory_read(pt_addr
403                                                                  + l4 * 8,
404                                                                  &pte, 8);
405                                         pte = le64_to_cpu(pte);
406                                         end = (l1 << 39) + (l2 << 30) +
407                                             (l3 << 21) + (l4 << 12);
408                                         if (pte & PG_PRESENT_MASK) {
409                                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
410                                                           PG_PRESENT_MASK);
411                                             prot &= pml4e & pdpe & pde;
412                                         } else {
413                                             prot = 0;
414                                         }
415                                         mem_print(mon, &start, &last_prot, end, prot);
416                                     }
417                                 }
418                             } else {
419                                 prot = 0;
420                                 mem_print(mon, &start, &last_prot, end, prot);
421                             }
422                         }
423                     }
424                 } else {
425                     prot = 0;
426                     mem_print(mon, &start, &last_prot, end, prot);
427                 }
428             }
429         } else {
430             prot = 0;
431             mem_print(mon, &start, &last_prot, end, prot);
432         }
433     }
434     /* Flush last range */
435     mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
436 }
437 
438 static void mem_info_la57(Monitor *mon, CPUArchState *env)
439 {
440     int prot, last_prot;
441     uint64_t l0, l1, l2, l3, l4;
442     uint64_t pml5e, pml4e, pdpe, pde, pte;
443     uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
444 
445     pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
446     last_prot = 0;
447     start = -1;
448     for (l0 = 0; l0 < 512; l0++) {
449         cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
450         pml5e = le64_to_cpu(pml5e);
451         end = l0 << 48;
452         if (!(pml5e & PG_PRESENT_MASK)) {
453             prot = 0;
454             mem_print(mon, &start, &last_prot, end, prot);
455             continue;
456         }
457 
458         pml4_addr = pml5e & 0x3fffffffff000ULL;
459         for (l1 = 0; l1 < 512; l1++) {
460             cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
461             pml4e = le64_to_cpu(pml4e);
462             end = (l0 << 48) + (l1 << 39);
463             if (!(pml4e & PG_PRESENT_MASK)) {
464                 prot = 0;
465                 mem_print(mon, &start, &last_prot, end, prot);
466                 continue;
467             }
468 
469             pdp_addr = pml4e & 0x3fffffffff000ULL;
470             for (l2 = 0; l2 < 512; l2++) {
471                 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
472                 pdpe = le64_to_cpu(pdpe);
473                 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
474                 if (pdpe & PG_PRESENT_MASK) {
475                     prot = 0;
476                     mem_print(mon, &start, &last_prot, end, prot);
477                     continue;
478                 }
479 
480                 if (pdpe & PG_PSE_MASK) {
481                     prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
482                             PG_PRESENT_MASK);
483                     prot &= pml5e & pml4e;
484                     mem_print(mon, &start, &last_prot, end, prot);
485                     continue;
486                 }
487 
488                 pd_addr = pdpe & 0x3fffffffff000ULL;
489                 for (l3 = 0; l3 < 512; l3++) {
490                     cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
491                     pde = le64_to_cpu(pde);
492                     end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
493                     if (pde & PG_PRESENT_MASK) {
494                         prot = 0;
495                         mem_print(mon, &start, &last_prot, end, prot);
496                         continue;
497                     }
498 
499                     if (pde & PG_PSE_MASK) {
500                         prot = pde & (PG_USER_MASK | PG_RW_MASK |
501                                 PG_PRESENT_MASK);
502                         prot &= pml5e & pml4e & pdpe;
503                         mem_print(mon, &start, &last_prot, end, prot);
504                         continue;
505                     }
506 
507                     pt_addr = pde & 0x3fffffffff000ULL;
508                     for (l4 = 0; l4 < 512; l4++) {
509                         cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
510                         pte = le64_to_cpu(pte);
511                         end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
512                             (l3 << 21) + (l4 << 12);
513                         if (pte & PG_PRESENT_MASK) {
514                             prot = pte & (PG_USER_MASK | PG_RW_MASK |
515                                     PG_PRESENT_MASK);
516                             prot &= pml5e & pml4e & pdpe & pde;
517                         } else {
518                             prot = 0;
519                         }
520                         mem_print(mon, &start, &last_prot, end, prot);
521                     }
522                 }
523             }
524         }
525     }
526     /* Flush last range */
527     mem_print(mon, &start, &last_prot, (hwaddr)1 << 57, 0);
528 }
529 #endif /* TARGET_X86_64 */
530 
531 void hmp_info_mem(Monitor *mon, const QDict *qdict)
532 {
533     CPUArchState *env;
534 
535     env = mon_get_cpu_env();
536     if (!env) {
537         monitor_printf(mon, "No CPU available\n");
538         return;
539     }
540 
541     if (!(env->cr[0] & CR0_PG_MASK)) {
542         monitor_printf(mon, "PG disabled\n");
543         return;
544     }
545     if (env->cr[4] & CR4_PAE_MASK) {
546 #ifdef TARGET_X86_64
547         if (env->hflags & HF_LMA_MASK) {
548             if (env->cr[4] & CR4_LA57_MASK) {
549                 mem_info_la57(mon, env);
550             } else {
551                 mem_info_la48(mon, env);
552             }
553         } else
554 #endif
555         {
556             mem_info_pae32(mon, env);
557         }
558     } else {
559         mem_info_32(mon, env);
560     }
561 }
562 
563 void hmp_mce(Monitor *mon, const QDict *qdict)
564 {
565     X86CPU *cpu;
566     CPUState *cs;
567     int cpu_index = qdict_get_int(qdict, "cpu_index");
568     int bank = qdict_get_int(qdict, "bank");
569     uint64_t status = qdict_get_int(qdict, "status");
570     uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
571     uint64_t addr = qdict_get_int(qdict, "addr");
572     uint64_t misc = qdict_get_int(qdict, "misc");
573     int flags = MCE_INJECT_UNCOND_AO;
574 
575     if (qdict_get_try_bool(qdict, "broadcast", false)) {
576         flags |= MCE_INJECT_BROADCAST;
577     }
578     cs = qemu_get_cpu(cpu_index);
579     if (cs != NULL) {
580         cpu = X86_CPU(cs);
581         cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
582                            flags);
583     }
584 }
585 
586 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
587 {
588     CPUArchState *env = mon_get_cpu_env();
589     return env->eip + env->segs[R_CS].base;
590 }
591 
592 const MonitorDef monitor_defs[] = {
593 #define SEG(name, seg) \
594     { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
595     { name ".base", offsetof(CPUX86State, segs[seg].base) },\
596     { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
597 
598     { "eax", offsetof(CPUX86State, regs[0]) },
599     { "ecx", offsetof(CPUX86State, regs[1]) },
600     { "edx", offsetof(CPUX86State, regs[2]) },
601     { "ebx", offsetof(CPUX86State, regs[3]) },
602     { "esp|sp", offsetof(CPUX86State, regs[4]) },
603     { "ebp|fp", offsetof(CPUX86State, regs[5]) },
604     { "esi", offsetof(CPUX86State, regs[6]) },
605     { "edi", offsetof(CPUX86State, regs[7]) },
606 #ifdef TARGET_X86_64
607     { "r8", offsetof(CPUX86State, regs[8]) },
608     { "r9", offsetof(CPUX86State, regs[9]) },
609     { "r10", offsetof(CPUX86State, regs[10]) },
610     { "r11", offsetof(CPUX86State, regs[11]) },
611     { "r12", offsetof(CPUX86State, regs[12]) },
612     { "r13", offsetof(CPUX86State, regs[13]) },
613     { "r14", offsetof(CPUX86State, regs[14]) },
614     { "r15", offsetof(CPUX86State, regs[15]) },
615 #endif
616     { "eflags", offsetof(CPUX86State, eflags) },
617     { "eip", offsetof(CPUX86State, eip) },
618     SEG("cs", R_CS)
619     SEG("ds", R_DS)
620     SEG("es", R_ES)
621     SEG("ss", R_SS)
622     SEG("fs", R_FS)
623     SEG("gs", R_GS)
624     { "pc", 0, monitor_get_pc, },
625     { NULL },
626 };
627 
628 const MonitorDef *target_monitor_defs(void)
629 {
630     return monitor_defs;
631 }
632 
633 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
634 {
635     CPUState *cs;
636 
637     if (qdict_haskey(qdict, "apic-id")) {
638         int id = qdict_get_try_int(qdict, "apic-id", 0);
639         cs = cpu_by_arch_id(id);
640     } else {
641         cs = mon_get_cpu();
642     }
643 
644 
645     if (!cs) {
646         monitor_printf(mon, "No CPU available\n");
647         return;
648     }
649     x86_cpu_dump_local_apic_state(cs, (FILE *)mon, monitor_fprintf,
650                                   CPU_DUMP_FPU);
651 }
652 
653 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
654 {
655     if (kvm_irqchip_in_kernel() &&
656         !kvm_irqchip_is_split()) {
657         kvm_ioapic_dump_state(mon, qdict);
658     } else {
659         ioapic_dump_state(mon, qdict);
660     }
661 }
662