1 /*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "cpu.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "monitor/hmp.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-commands-misc-target.h"
33 #include "qapi/qapi-commands-misc.h"
34
35 /* Perform linear address sign extension */
addr_canonical(CPUArchState * env,hwaddr addr)36 static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
37 {
38 #ifdef TARGET_X86_64
39 if (env->cr[4] & CR4_LA57_MASK) {
40 if (addr & (1ULL << 56)) {
41 addr |= (hwaddr)-(1LL << 57);
42 }
43 } else {
44 if (addr & (1ULL << 47)) {
45 addr |= (hwaddr)-(1LL << 48);
46 }
47 }
48 #endif
49 return addr;
50 }
51
print_pte(Monitor * mon,CPUArchState * env,hwaddr addr,hwaddr pte,hwaddr mask)52 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
53 hwaddr pte, hwaddr mask)
54 {
55 addr = addr_canonical(env, addr);
56
57 monitor_printf(mon, HWADDR_FMT_plx ": " HWADDR_FMT_plx
58 " %c%c%c%c%c%c%c%c%c\n",
59 addr,
60 pte & mask,
61 pte & PG_NX_MASK ? 'X' : '-',
62 pte & PG_GLOBAL_MASK ? 'G' : '-',
63 pte & PG_PSE_MASK ? 'P' : '-',
64 pte & PG_DIRTY_MASK ? 'D' : '-',
65 pte & PG_ACCESSED_MASK ? 'A' : '-',
66 pte & PG_PCD_MASK ? 'C' : '-',
67 pte & PG_PWT_MASK ? 'T' : '-',
68 pte & PG_USER_MASK ? 'U' : '-',
69 pte & PG_RW_MASK ? 'W' : '-');
70 }
71
tlb_info_32(Monitor * mon,CPUArchState * env)72 static void tlb_info_32(Monitor *mon, CPUArchState *env)
73 {
74 unsigned int l1, l2;
75 uint32_t pgd, pde, pte;
76
77 pgd = env->cr[3] & ~0xfff;
78 for(l1 = 0; l1 < 1024; l1++) {
79 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
80 pde = le32_to_cpu(pde);
81 if (pde & PG_PRESENT_MASK) {
82 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
83 /* 4M pages */
84 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
85 } else {
86 for(l2 = 0; l2 < 1024; l2++) {
87 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
88 pte = le32_to_cpu(pte);
89 if (pte & PG_PRESENT_MASK) {
90 print_pte(mon, env, (l1 << 22) + (l2 << 12),
91 pte & ~PG_PSE_MASK,
92 ~0xfff);
93 }
94 }
95 }
96 }
97 }
98 }
99
tlb_info_pae32(Monitor * mon,CPUArchState * env)100 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
101 {
102 unsigned int l1, l2, l3;
103 uint64_t pdpe, pde, pte;
104 uint64_t pdp_addr, pd_addr, pt_addr;
105
106 pdp_addr = env->cr[3] & ~0x1f;
107 for (l1 = 0; l1 < 4; l1++) {
108 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
109 pdpe = le64_to_cpu(pdpe);
110 if (pdpe & PG_PRESENT_MASK) {
111 pd_addr = pdpe & 0x3fffffffff000ULL;
112 for (l2 = 0; l2 < 512; l2++) {
113 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
114 pde = le64_to_cpu(pde);
115 if (pde & PG_PRESENT_MASK) {
116 if (pde & PG_PSE_MASK) {
117 /* 2M pages with PAE, CR4.PSE is ignored */
118 print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
119 ~((hwaddr)(1 << 20) - 1));
120 } else {
121 pt_addr = pde & 0x3fffffffff000ULL;
122 for (l3 = 0; l3 < 512; l3++) {
123 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
124 pte = le64_to_cpu(pte);
125 if (pte & PG_PRESENT_MASK) {
126 print_pte(mon, env, (l1 << 30) + (l2 << 21)
127 + (l3 << 12),
128 pte & ~PG_PSE_MASK,
129 ~(hwaddr)0xfff);
130 }
131 }
132 }
133 }
134 }
135 }
136 }
137 }
138
139 #ifdef TARGET_X86_64
tlb_info_la48(Monitor * mon,CPUArchState * env,uint64_t l0,uint64_t pml4_addr)140 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
141 uint64_t l0, uint64_t pml4_addr)
142 {
143 uint64_t l1, l2, l3, l4;
144 uint64_t pml4e, pdpe, pde, pte;
145 uint64_t pdp_addr, pd_addr, pt_addr;
146
147 for (l1 = 0; l1 < 512; l1++) {
148 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
149 pml4e = le64_to_cpu(pml4e);
150 if (!(pml4e & PG_PRESENT_MASK)) {
151 continue;
152 }
153
154 pdp_addr = pml4e & 0x3fffffffff000ULL;
155 for (l2 = 0; l2 < 512; l2++) {
156 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
157 pdpe = le64_to_cpu(pdpe);
158 if (!(pdpe & PG_PRESENT_MASK)) {
159 continue;
160 }
161
162 if (pdpe & PG_PSE_MASK) {
163 /* 1G pages, CR4.PSE is ignored */
164 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
165 pdpe, 0x3ffffc0000000ULL);
166 continue;
167 }
168
169 pd_addr = pdpe & 0x3fffffffff000ULL;
170 for (l3 = 0; l3 < 512; l3++) {
171 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
172 pde = le64_to_cpu(pde);
173 if (!(pde & PG_PRESENT_MASK)) {
174 continue;
175 }
176
177 if (pde & PG_PSE_MASK) {
178 /* 2M pages, CR4.PSE is ignored */
179 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
180 (l3 << 21), pde, 0x3ffffffe00000ULL);
181 continue;
182 }
183
184 pt_addr = pde & 0x3fffffffff000ULL;
185 for (l4 = 0; l4 < 512; l4++) {
186 cpu_physical_memory_read(pt_addr
187 + l4 * 8,
188 &pte, 8);
189 pte = le64_to_cpu(pte);
190 if (pte & PG_PRESENT_MASK) {
191 print_pte(mon, env, (l0 << 48) + (l1 << 39) +
192 (l2 << 30) + (l3 << 21) + (l4 << 12),
193 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
194 }
195 }
196 }
197 }
198 }
199 }
200
tlb_info_la57(Monitor * mon,CPUArchState * env)201 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
202 {
203 uint64_t l0;
204 uint64_t pml5e;
205 uint64_t pml5_addr;
206
207 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
208 for (l0 = 0; l0 < 512; l0++) {
209 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
210 pml5e = le64_to_cpu(pml5e);
211 if (pml5e & PG_PRESENT_MASK) {
212 tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
213 }
214 }
215 }
216 #endif /* TARGET_X86_64 */
217
hmp_info_tlb(Monitor * mon,const QDict * qdict)218 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
219 {
220 CPUArchState *env;
221
222 env = mon_get_cpu_env(mon);
223 if (!env) {
224 monitor_printf(mon, "No CPU available\n");
225 return;
226 }
227
228 if (!(env->cr[0] & CR0_PG_MASK)) {
229 monitor_printf(mon, "PG disabled\n");
230 return;
231 }
232 if (env->cr[4] & CR4_PAE_MASK) {
233 #ifdef TARGET_X86_64
234 if (env->hflags & HF_LMA_MASK) {
235 if (env->cr[4] & CR4_LA57_MASK) {
236 tlb_info_la57(mon, env);
237 } else {
238 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
239 }
240 } else
241 #endif
242 {
243 tlb_info_pae32(mon, env);
244 }
245 } else {
246 tlb_info_32(mon, env);
247 }
248 }
249
mem_print(Monitor * mon,CPUArchState * env,hwaddr * pstart,int * plast_prot,hwaddr end,int prot)250 static void mem_print(Monitor *mon, CPUArchState *env,
251 hwaddr *pstart, int *plast_prot,
252 hwaddr end, int prot)
253 {
254 int prot1;
255 prot1 = *plast_prot;
256 if (prot != prot1) {
257 if (*pstart != -1) {
258 monitor_printf(mon, HWADDR_FMT_plx "-" HWADDR_FMT_plx " "
259 HWADDR_FMT_plx " %c%c%c\n",
260 addr_canonical(env, *pstart),
261 addr_canonical(env, end),
262 addr_canonical(env, end - *pstart),
263 prot1 & PG_USER_MASK ? 'u' : '-',
264 'r',
265 prot1 & PG_RW_MASK ? 'w' : '-');
266 }
267 if (prot != 0)
268 *pstart = end;
269 else
270 *pstart = -1;
271 *plast_prot = prot;
272 }
273 }
274
mem_info_32(Monitor * mon,CPUArchState * env)275 static void mem_info_32(Monitor *mon, CPUArchState *env)
276 {
277 unsigned int l1, l2;
278 int prot, last_prot;
279 uint32_t pgd, pde, pte;
280 hwaddr start, end;
281
282 pgd = env->cr[3] & ~0xfff;
283 last_prot = 0;
284 start = -1;
285 for(l1 = 0; l1 < 1024; l1++) {
286 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
287 pde = le32_to_cpu(pde);
288 end = l1 << 22;
289 if (pde & PG_PRESENT_MASK) {
290 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
291 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
292 mem_print(mon, env, &start, &last_prot, end, prot);
293 } else {
294 for(l2 = 0; l2 < 1024; l2++) {
295 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
296 pte = le32_to_cpu(pte);
297 end = (l1 << 22) + (l2 << 12);
298 if (pte & PG_PRESENT_MASK) {
299 prot = pte & pde &
300 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
301 } else {
302 prot = 0;
303 }
304 mem_print(mon, env, &start, &last_prot, end, prot);
305 }
306 }
307 } else {
308 prot = 0;
309 mem_print(mon, env, &start, &last_prot, end, prot);
310 }
311 }
312 /* Flush last range */
313 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
314 }
315
mem_info_pae32(Monitor * mon,CPUArchState * env)316 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
317 {
318 unsigned int l1, l2, l3;
319 int prot, last_prot;
320 uint64_t pdpe, pde, pte;
321 uint64_t pdp_addr, pd_addr, pt_addr;
322 hwaddr start, end;
323
324 pdp_addr = env->cr[3] & ~0x1f;
325 last_prot = 0;
326 start = -1;
327 for (l1 = 0; l1 < 4; l1++) {
328 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
329 pdpe = le64_to_cpu(pdpe);
330 end = l1 << 30;
331 if (pdpe & PG_PRESENT_MASK) {
332 pd_addr = pdpe & 0x3fffffffff000ULL;
333 for (l2 = 0; l2 < 512; l2++) {
334 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
335 pde = le64_to_cpu(pde);
336 end = (l1 << 30) + (l2 << 21);
337 if (pde & PG_PRESENT_MASK) {
338 if (pde & PG_PSE_MASK) {
339 prot = pde & (PG_USER_MASK | PG_RW_MASK |
340 PG_PRESENT_MASK);
341 mem_print(mon, env, &start, &last_prot, end, prot);
342 } else {
343 pt_addr = pde & 0x3fffffffff000ULL;
344 for (l3 = 0; l3 < 512; l3++) {
345 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
346 pte = le64_to_cpu(pte);
347 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
348 if (pte & PG_PRESENT_MASK) {
349 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
350 PG_PRESENT_MASK);
351 } else {
352 prot = 0;
353 }
354 mem_print(mon, env, &start, &last_prot, end, prot);
355 }
356 }
357 } else {
358 prot = 0;
359 mem_print(mon, env, &start, &last_prot, end, prot);
360 }
361 }
362 } else {
363 prot = 0;
364 mem_print(mon, env, &start, &last_prot, end, prot);
365 }
366 }
367 /* Flush last range */
368 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
369 }
370
371
372 #ifdef TARGET_X86_64
mem_info_la48(Monitor * mon,CPUArchState * env)373 static void mem_info_la48(Monitor *mon, CPUArchState *env)
374 {
375 int prot, last_prot;
376 uint64_t l1, l2, l3, l4;
377 uint64_t pml4e, pdpe, pde, pte;
378 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
379
380 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
381 last_prot = 0;
382 start = -1;
383 for (l1 = 0; l1 < 512; l1++) {
384 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
385 pml4e = le64_to_cpu(pml4e);
386 end = l1 << 39;
387 if (pml4e & PG_PRESENT_MASK) {
388 pdp_addr = pml4e & 0x3fffffffff000ULL;
389 for (l2 = 0; l2 < 512; l2++) {
390 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
391 pdpe = le64_to_cpu(pdpe);
392 end = (l1 << 39) + (l2 << 30);
393 if (pdpe & PG_PRESENT_MASK) {
394 if (pdpe & PG_PSE_MASK) {
395 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
396 PG_PRESENT_MASK);
397 prot &= pml4e;
398 mem_print(mon, env, &start, &last_prot, end, prot);
399 } else {
400 pd_addr = pdpe & 0x3fffffffff000ULL;
401 for (l3 = 0; l3 < 512; l3++) {
402 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
403 pde = le64_to_cpu(pde);
404 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
405 if (pde & PG_PRESENT_MASK) {
406 if (pde & PG_PSE_MASK) {
407 prot = pde & (PG_USER_MASK | PG_RW_MASK |
408 PG_PRESENT_MASK);
409 prot &= pml4e & pdpe;
410 mem_print(mon, env, &start,
411 &last_prot, end, prot);
412 } else {
413 pt_addr = pde & 0x3fffffffff000ULL;
414 for (l4 = 0; l4 < 512; l4++) {
415 cpu_physical_memory_read(pt_addr
416 + l4 * 8,
417 &pte, 8);
418 pte = le64_to_cpu(pte);
419 end = (l1 << 39) + (l2 << 30) +
420 (l3 << 21) + (l4 << 12);
421 if (pte & PG_PRESENT_MASK) {
422 prot = pte & (PG_USER_MASK | PG_RW_MASK |
423 PG_PRESENT_MASK);
424 prot &= pml4e & pdpe & pde;
425 } else {
426 prot = 0;
427 }
428 mem_print(mon, env, &start,
429 &last_prot, end, prot);
430 }
431 }
432 } else {
433 prot = 0;
434 mem_print(mon, env, &start,
435 &last_prot, end, prot);
436 }
437 }
438 }
439 } else {
440 prot = 0;
441 mem_print(mon, env, &start, &last_prot, end, prot);
442 }
443 }
444 } else {
445 prot = 0;
446 mem_print(mon, env, &start, &last_prot, end, prot);
447 }
448 }
449 /* Flush last range */
450 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
451 }
452
mem_info_la57(Monitor * mon,CPUArchState * env)453 static void mem_info_la57(Monitor *mon, CPUArchState *env)
454 {
455 int prot, last_prot;
456 uint64_t l0, l1, l2, l3, l4;
457 uint64_t pml5e, pml4e, pdpe, pde, pte;
458 uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
459
460 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
461 last_prot = 0;
462 start = -1;
463 for (l0 = 0; l0 < 512; l0++) {
464 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
465 pml5e = le64_to_cpu(pml5e);
466 end = l0 << 48;
467 if (!(pml5e & PG_PRESENT_MASK)) {
468 prot = 0;
469 mem_print(mon, env, &start, &last_prot, end, prot);
470 continue;
471 }
472
473 pml4_addr = pml5e & 0x3fffffffff000ULL;
474 for (l1 = 0; l1 < 512; l1++) {
475 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
476 pml4e = le64_to_cpu(pml4e);
477 end = (l0 << 48) + (l1 << 39);
478 if (!(pml4e & PG_PRESENT_MASK)) {
479 prot = 0;
480 mem_print(mon, env, &start, &last_prot, end, prot);
481 continue;
482 }
483
484 pdp_addr = pml4e & 0x3fffffffff000ULL;
485 for (l2 = 0; l2 < 512; l2++) {
486 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
487 pdpe = le64_to_cpu(pdpe);
488 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
489 if (pdpe & PG_PRESENT_MASK) {
490 prot = 0;
491 mem_print(mon, env, &start, &last_prot, end, prot);
492 continue;
493 }
494
495 if (pdpe & PG_PSE_MASK) {
496 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
497 PG_PRESENT_MASK);
498 prot &= pml5e & pml4e;
499 mem_print(mon, env, &start, &last_prot, end, prot);
500 continue;
501 }
502
503 pd_addr = pdpe & 0x3fffffffff000ULL;
504 for (l3 = 0; l3 < 512; l3++) {
505 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
506 pde = le64_to_cpu(pde);
507 end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
508 if (pde & PG_PRESENT_MASK) {
509 prot = 0;
510 mem_print(mon, env, &start, &last_prot, end, prot);
511 continue;
512 }
513
514 if (pde & PG_PSE_MASK) {
515 prot = pde & (PG_USER_MASK | PG_RW_MASK |
516 PG_PRESENT_MASK);
517 prot &= pml5e & pml4e & pdpe;
518 mem_print(mon, env, &start, &last_prot, end, prot);
519 continue;
520 }
521
522 pt_addr = pde & 0x3fffffffff000ULL;
523 for (l4 = 0; l4 < 512; l4++) {
524 cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
525 pte = le64_to_cpu(pte);
526 end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
527 (l3 << 21) + (l4 << 12);
528 if (pte & PG_PRESENT_MASK) {
529 prot = pte & (PG_USER_MASK | PG_RW_MASK |
530 PG_PRESENT_MASK);
531 prot &= pml5e & pml4e & pdpe & pde;
532 } else {
533 prot = 0;
534 }
535 mem_print(mon, env, &start, &last_prot, end, prot);
536 }
537 }
538 }
539 }
540 }
541 /* Flush last range */
542 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
543 }
544 #endif /* TARGET_X86_64 */
545
hmp_info_mem(Monitor * mon,const QDict * qdict)546 void hmp_info_mem(Monitor *mon, const QDict *qdict)
547 {
548 CPUArchState *env;
549
550 env = mon_get_cpu_env(mon);
551 if (!env) {
552 monitor_printf(mon, "No CPU available\n");
553 return;
554 }
555
556 if (!(env->cr[0] & CR0_PG_MASK)) {
557 monitor_printf(mon, "PG disabled\n");
558 return;
559 }
560 if (env->cr[4] & CR4_PAE_MASK) {
561 #ifdef TARGET_X86_64
562 if (env->hflags & HF_LMA_MASK) {
563 if (env->cr[4] & CR4_LA57_MASK) {
564 mem_info_la57(mon, env);
565 } else {
566 mem_info_la48(mon, env);
567 }
568 } else
569 #endif
570 {
571 mem_info_pae32(mon, env);
572 }
573 } else {
574 mem_info_32(mon, env);
575 }
576 }
577
hmp_mce(Monitor * mon,const QDict * qdict)578 void hmp_mce(Monitor *mon, const QDict *qdict)
579 {
580 X86CPU *cpu;
581 CPUState *cs;
582 int cpu_index = qdict_get_int(qdict, "cpu_index");
583 int bank = qdict_get_int(qdict, "bank");
584 uint64_t status = qdict_get_int(qdict, "status");
585 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
586 uint64_t addr = qdict_get_int(qdict, "addr");
587 uint64_t misc = qdict_get_int(qdict, "misc");
588 int flags = MCE_INJECT_UNCOND_AO;
589
590 if (qdict_get_try_bool(qdict, "broadcast", false)) {
591 flags |= MCE_INJECT_BROADCAST;
592 }
593 cs = qemu_get_cpu(cpu_index);
594 if (cs != NULL) {
595 cpu = X86_CPU(cs);
596 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
597 flags);
598 }
599 }
600
monitor_get_pc(Monitor * mon,const struct MonitorDef * md,int val)601 static target_long monitor_get_pc(Monitor *mon, const struct MonitorDef *md,
602 int val)
603 {
604 CPUArchState *env = mon_get_cpu_env(mon);
605 return env->eip + env->segs[R_CS].base;
606 }
607
608 const MonitorDef monitor_defs[] = {
609 #define SEG(name, seg) \
610 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
611 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
612 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
613
614 { "eax", offsetof(CPUX86State, regs[0]) },
615 { "ecx", offsetof(CPUX86State, regs[1]) },
616 { "edx", offsetof(CPUX86State, regs[2]) },
617 { "ebx", offsetof(CPUX86State, regs[3]) },
618 { "esp|sp", offsetof(CPUX86State, regs[4]) },
619 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
620 { "esi", offsetof(CPUX86State, regs[6]) },
621 { "edi", offsetof(CPUX86State, regs[7]) },
622 #ifdef TARGET_X86_64
623 { "r8", offsetof(CPUX86State, regs[8]) },
624 { "r9", offsetof(CPUX86State, regs[9]) },
625 { "r10", offsetof(CPUX86State, regs[10]) },
626 { "r11", offsetof(CPUX86State, regs[11]) },
627 { "r12", offsetof(CPUX86State, regs[12]) },
628 { "r13", offsetof(CPUX86State, regs[13]) },
629 { "r14", offsetof(CPUX86State, regs[14]) },
630 { "r15", offsetof(CPUX86State, regs[15]) },
631 #endif
632 { "eflags", offsetof(CPUX86State, eflags) },
633 { "eip", offsetof(CPUX86State, eip) },
634 SEG("cs", R_CS)
635 SEG("ds", R_DS)
636 SEG("es", R_ES)
637 SEG("ss", R_SS)
638 SEG("fs", R_FS)
639 SEG("gs", R_GS)
640 { "pc", 0, monitor_get_pc, },
641 { NULL },
642 };
643
target_monitor_defs(void)644 const MonitorDef *target_monitor_defs(void)
645 {
646 return monitor_defs;
647 }
648