1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_util.h>
3 #include <kvm_util.h>
4 #include <processor.h>
5 #include <linux/bitfield.h>
6
7 #define MDSCR_KDE (1 << 13)
8 #define MDSCR_MDE (1 << 15)
9 #define MDSCR_SS (1 << 0)
10
11 #define DBGBCR_LEN8 (0xff << 5)
12 #define DBGBCR_EXEC (0x0 << 3)
13 #define DBGBCR_EL1 (0x1 << 1)
14 #define DBGBCR_E (0x1 << 0)
15 #define DBGBCR_LBN_SHIFT 16
16 #define DBGBCR_BT_SHIFT 20
17 #define DBGBCR_BT_ADDR_LINK_CTX (0x1 << DBGBCR_BT_SHIFT)
18 #define DBGBCR_BT_CTX_LINK (0x3 << DBGBCR_BT_SHIFT)
19
20 #define DBGWCR_LEN8 (0xff << 5)
21 #define DBGWCR_RD (0x1 << 3)
22 #define DBGWCR_WR (0x2 << 3)
23 #define DBGWCR_EL1 (0x1 << 1)
24 #define DBGWCR_E (0x1 << 0)
25 #define DBGWCR_LBN_SHIFT 16
26 #define DBGWCR_WT_SHIFT 20
27 #define DBGWCR_WT_LINK (0x1 << DBGWCR_WT_SHIFT)
28
29 #define SPSR_D (1 << 9)
30 #define SPSR_SS (1 << 21)
31
32 extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx;
33 extern unsigned char iter_ss_begin, iter_ss_end;
34 static volatile uint64_t sw_bp_addr, hw_bp_addr;
35 static volatile uint64_t wp_addr, wp_data_addr;
36 static volatile uint64_t svc_addr;
37 static volatile uint64_t ss_addr[4], ss_idx;
38 #define PC(v) ((uint64_t)&(v))
39
40 #define GEN_DEBUG_WRITE_REG(reg_name) \
41 static void write_##reg_name(int num, uint64_t val) \
42 { \
43 switch (num) { \
44 case 0: \
45 write_sysreg(val, reg_name##0_el1); \
46 break; \
47 case 1: \
48 write_sysreg(val, reg_name##1_el1); \
49 break; \
50 case 2: \
51 write_sysreg(val, reg_name##2_el1); \
52 break; \
53 case 3: \
54 write_sysreg(val, reg_name##3_el1); \
55 break; \
56 case 4: \
57 write_sysreg(val, reg_name##4_el1); \
58 break; \
59 case 5: \
60 write_sysreg(val, reg_name##5_el1); \
61 break; \
62 case 6: \
63 write_sysreg(val, reg_name##6_el1); \
64 break; \
65 case 7: \
66 write_sysreg(val, reg_name##7_el1); \
67 break; \
68 case 8: \
69 write_sysreg(val, reg_name##8_el1); \
70 break; \
71 case 9: \
72 write_sysreg(val, reg_name##9_el1); \
73 break; \
74 case 10: \
75 write_sysreg(val, reg_name##10_el1); \
76 break; \
77 case 11: \
78 write_sysreg(val, reg_name##11_el1); \
79 break; \
80 case 12: \
81 write_sysreg(val, reg_name##12_el1); \
82 break; \
83 case 13: \
84 write_sysreg(val, reg_name##13_el1); \
85 break; \
86 case 14: \
87 write_sysreg(val, reg_name##14_el1); \
88 break; \
89 case 15: \
90 write_sysreg(val, reg_name##15_el1); \
91 break; \
92 default: \
93 GUEST_ASSERT(0); \
94 } \
95 }
96
97 /* Define write_dbgbcr()/write_dbgbvr()/write_dbgwcr()/write_dbgwvr() */
98 GEN_DEBUG_WRITE_REG(dbgbcr)
GEN_DEBUG_WRITE_REG(dbgbvr)99 GEN_DEBUG_WRITE_REG(dbgbvr)
100 GEN_DEBUG_WRITE_REG(dbgwcr)
101 GEN_DEBUG_WRITE_REG(dbgwvr)
102
103 static void reset_debug_state(void)
104 {
105 uint8_t brps, wrps, i;
106 uint64_t dfr0;
107
108 asm volatile("msr daifset, #8");
109
110 write_sysreg(0, osdlr_el1);
111 write_sysreg(0, oslar_el1);
112 isb();
113
114 write_sysreg(0, mdscr_el1);
115 write_sysreg(0, contextidr_el1);
116
117 /* Reset all bcr/bvr/wcr/wvr registers */
118 dfr0 = read_sysreg(id_aa64dfr0_el1);
119 brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), dfr0);
120 for (i = 0; i <= brps; i++) {
121 write_dbgbcr(i, 0);
122 write_dbgbvr(i, 0);
123 }
124 wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), dfr0);
125 for (i = 0; i <= wrps; i++) {
126 write_dbgwcr(i, 0);
127 write_dbgwvr(i, 0);
128 }
129
130 isb();
131 }
132
enable_os_lock(void)133 static void enable_os_lock(void)
134 {
135 write_sysreg(1, oslar_el1);
136 isb();
137
138 GUEST_ASSERT(read_sysreg(oslsr_el1) & 2);
139 }
140
enable_monitor_debug_exceptions(void)141 static void enable_monitor_debug_exceptions(void)
142 {
143 uint32_t mdscr;
144
145 asm volatile("msr daifclr, #8");
146
147 mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
148 write_sysreg(mdscr, mdscr_el1);
149 isb();
150 }
151
install_wp(uint8_t wpn,uint64_t addr)152 static void install_wp(uint8_t wpn, uint64_t addr)
153 {
154 uint32_t wcr;
155
156 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
157 write_dbgwcr(wpn, wcr);
158 write_dbgwvr(wpn, addr);
159
160 isb();
161
162 enable_monitor_debug_exceptions();
163 }
164
install_hw_bp(uint8_t bpn,uint64_t addr)165 static void install_hw_bp(uint8_t bpn, uint64_t addr)
166 {
167 uint32_t bcr;
168
169 bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
170 write_dbgbcr(bpn, bcr);
171 write_dbgbvr(bpn, addr);
172 isb();
173
174 enable_monitor_debug_exceptions();
175 }
176
install_wp_ctx(uint8_t addr_wp,uint8_t ctx_bp,uint64_t addr,uint64_t ctx)177 static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
178 uint64_t ctx)
179 {
180 uint32_t wcr;
181 uint64_t ctx_bcr;
182
183 /* Setup a context-aware breakpoint for Linked Context ID Match */
184 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
185 DBGBCR_BT_CTX_LINK;
186 write_dbgbcr(ctx_bp, ctx_bcr);
187 write_dbgbvr(ctx_bp, ctx);
188
189 /* Setup a linked watchpoint (linked to the context-aware breakpoint) */
190 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E |
191 DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT);
192 write_dbgwcr(addr_wp, wcr);
193 write_dbgwvr(addr_wp, addr);
194 isb();
195
196 enable_monitor_debug_exceptions();
197 }
198
install_hw_bp_ctx(uint8_t addr_bp,uint8_t ctx_bp,uint64_t addr,uint64_t ctx)199 void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
200 uint64_t ctx)
201 {
202 uint32_t addr_bcr, ctx_bcr;
203
204 /* Setup a context-aware breakpoint for Linked Context ID Match */
205 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
206 DBGBCR_BT_CTX_LINK;
207 write_dbgbcr(ctx_bp, ctx_bcr);
208 write_dbgbvr(ctx_bp, ctx);
209
210 /*
211 * Setup a normal breakpoint for Linked Address Match, and link it
212 * to the context-aware breakpoint.
213 */
214 addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
215 DBGBCR_BT_ADDR_LINK_CTX |
216 ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT);
217 write_dbgbcr(addr_bp, addr_bcr);
218 write_dbgbvr(addr_bp, addr);
219 isb();
220
221 enable_monitor_debug_exceptions();
222 }
223
install_ss(void)224 static void install_ss(void)
225 {
226 uint32_t mdscr;
227
228 asm volatile("msr daifclr, #8");
229
230 mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS;
231 write_sysreg(mdscr, mdscr_el1);
232 isb();
233 }
234
235 static volatile char write_data;
236
guest_code(uint8_t bpn,uint8_t wpn,uint8_t ctx_bpn)237 static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
238 {
239 uint64_t ctx = 0xabcdef; /* a random context number */
240
241 /* Software-breakpoint */
242 reset_debug_state();
243 asm volatile("sw_bp: brk #0");
244 GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp));
245
246 /* Hardware-breakpoint */
247 reset_debug_state();
248 install_hw_bp(bpn, PC(hw_bp));
249 asm volatile("hw_bp: nop");
250 GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp));
251
252 /* Hardware-breakpoint + svc */
253 reset_debug_state();
254 install_hw_bp(bpn, PC(bp_svc));
255 asm volatile("bp_svc: svc #0");
256 GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_svc));
257 GUEST_ASSERT_EQ(svc_addr, PC(bp_svc) + 4);
258
259 /* Hardware-breakpoint + software-breakpoint */
260 reset_debug_state();
261 install_hw_bp(bpn, PC(bp_brk));
262 asm volatile("bp_brk: brk #0");
263 GUEST_ASSERT_EQ(sw_bp_addr, PC(bp_brk));
264 GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_brk));
265
266 /* Watchpoint */
267 reset_debug_state();
268 install_wp(wpn, PC(write_data));
269 write_data = 'x';
270 GUEST_ASSERT_EQ(write_data, 'x');
271 GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
272
273 /* Single-step */
274 reset_debug_state();
275 install_ss();
276 ss_idx = 0;
277 asm volatile("ss_start:\n"
278 "mrs x0, esr_el1\n"
279 "add x0, x0, #1\n"
280 "msr daifset, #8\n"
281 : : : "x0");
282 GUEST_ASSERT_EQ(ss_addr[0], PC(ss_start));
283 GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4);
284 GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8);
285
286 /* OS Lock does not block software-breakpoint */
287 reset_debug_state();
288 enable_os_lock();
289 sw_bp_addr = 0;
290 asm volatile("sw_bp2: brk #0");
291 GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp2));
292
293 /* OS Lock blocking hardware-breakpoint */
294 reset_debug_state();
295 enable_os_lock();
296 install_hw_bp(bpn, PC(hw_bp2));
297 hw_bp_addr = 0;
298 asm volatile("hw_bp2: nop");
299 GUEST_ASSERT_EQ(hw_bp_addr, 0);
300
301 /* OS Lock blocking watchpoint */
302 reset_debug_state();
303 enable_os_lock();
304 write_data = '\0';
305 wp_data_addr = 0;
306 install_wp(wpn, PC(write_data));
307 write_data = 'x';
308 GUEST_ASSERT_EQ(write_data, 'x');
309 GUEST_ASSERT_EQ(wp_data_addr, 0);
310
311 /* OS Lock blocking single-step */
312 reset_debug_state();
313 enable_os_lock();
314 ss_addr[0] = 0;
315 install_ss();
316 ss_idx = 0;
317 asm volatile("mrs x0, esr_el1\n\t"
318 "add x0, x0, #1\n\t"
319 "msr daifset, #8\n\t"
320 : : : "x0");
321 GUEST_ASSERT_EQ(ss_addr[0], 0);
322
323 /* Linked hardware-breakpoint */
324 hw_bp_addr = 0;
325 reset_debug_state();
326 install_hw_bp_ctx(bpn, ctx_bpn, PC(hw_bp_ctx), ctx);
327 /* Set context id */
328 write_sysreg(ctx, contextidr_el1);
329 isb();
330 asm volatile("hw_bp_ctx: nop");
331 write_sysreg(0, contextidr_el1);
332 GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp_ctx));
333
334 /* Linked watchpoint */
335 reset_debug_state();
336 install_wp_ctx(wpn, ctx_bpn, PC(write_data), ctx);
337 /* Set context id */
338 write_sysreg(ctx, contextidr_el1);
339 isb();
340 write_data = 'x';
341 GUEST_ASSERT_EQ(write_data, 'x');
342 GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
343
344 GUEST_DONE();
345 }
346
guest_sw_bp_handler(struct ex_regs * regs)347 static void guest_sw_bp_handler(struct ex_regs *regs)
348 {
349 sw_bp_addr = regs->pc;
350 regs->pc += 4;
351 }
352
guest_hw_bp_handler(struct ex_regs * regs)353 static void guest_hw_bp_handler(struct ex_regs *regs)
354 {
355 hw_bp_addr = regs->pc;
356 regs->pstate |= SPSR_D;
357 }
358
guest_wp_handler(struct ex_regs * regs)359 static void guest_wp_handler(struct ex_regs *regs)
360 {
361 wp_data_addr = read_sysreg(far_el1);
362 wp_addr = regs->pc;
363 regs->pstate |= SPSR_D;
364 }
365
guest_ss_handler(struct ex_regs * regs)366 static void guest_ss_handler(struct ex_regs *regs)
367 {
368 __GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%u'", ss_idx);
369 ss_addr[ss_idx++] = regs->pc;
370 regs->pstate |= SPSR_SS;
371 }
372
guest_svc_handler(struct ex_regs * regs)373 static void guest_svc_handler(struct ex_regs *regs)
374 {
375 svc_addr = regs->pc;
376 }
377
guest_code_ss(int test_cnt)378 static void guest_code_ss(int test_cnt)
379 {
380 uint64_t i;
381 uint64_t bvr, wvr, w_bvr, w_wvr;
382
383 for (i = 0; i < test_cnt; i++) {
384 /* Bits [1:0] of dbg{b,w}vr are RES0 */
385 w_bvr = i << 2;
386 w_wvr = i << 2;
387
388 /*
389 * Enable Single Step execution. Note! This _must_ be a bare
390 * ucall as the ucall() path uses atomic operations to manage
391 * the ucall structures, and the built-in "atomics" are usually
392 * implemented via exclusive access instructions. The exlusive
393 * monitor is cleared on ERET, and so taking debug exceptions
394 * during a LDREX=>STREX sequence will prevent forward progress
395 * and hang the guest/test.
396 */
397 GUEST_UCALL_NONE();
398
399 /*
400 * The userspace will verify that the pc is as expected during
401 * single step execution between iter_ss_begin and iter_ss_end.
402 */
403 asm volatile("iter_ss_begin:nop\n");
404
405 write_sysreg(w_bvr, dbgbvr0_el1);
406 write_sysreg(w_wvr, dbgwvr0_el1);
407 bvr = read_sysreg(dbgbvr0_el1);
408 wvr = read_sysreg(dbgwvr0_el1);
409
410 /* Userspace disables Single Step when the end is nigh. */
411 asm volatile("iter_ss_end:\n");
412
413 GUEST_ASSERT_EQ(bvr, w_bvr);
414 GUEST_ASSERT_EQ(wvr, w_wvr);
415 }
416 GUEST_DONE();
417 }
418
debug_version(uint64_t id_aa64dfr0)419 static int debug_version(uint64_t id_aa64dfr0)
420 {
421 return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), id_aa64dfr0);
422 }
423
test_guest_debug_exceptions(uint8_t bpn,uint8_t wpn,uint8_t ctx_bpn)424 static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
425 {
426 struct kvm_vcpu *vcpu;
427 struct kvm_vm *vm;
428 struct ucall uc;
429
430 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
431
432 vm_init_descriptor_tables(vm);
433 vcpu_init_descriptor_tables(vcpu);
434
435 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
436 ESR_EC_BRK_INS, guest_sw_bp_handler);
437 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
438 ESR_EC_HW_BP_CURRENT, guest_hw_bp_handler);
439 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
440 ESR_EC_WP_CURRENT, guest_wp_handler);
441 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
442 ESR_EC_SSTEP_CURRENT, guest_ss_handler);
443 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
444 ESR_EC_SVC64, guest_svc_handler);
445
446 /* Specify bpn/wpn/ctx_bpn to be tested */
447 vcpu_args_set(vcpu, 3, bpn, wpn, ctx_bpn);
448 pr_debug("Use bpn#%d, wpn#%d and ctx_bpn#%d\n", bpn, wpn, ctx_bpn);
449
450 vcpu_run(vcpu);
451 switch (get_ucall(vcpu, &uc)) {
452 case UCALL_ABORT:
453 REPORT_GUEST_ASSERT(uc);
454 break;
455 case UCALL_DONE:
456 goto done;
457 default:
458 TEST_FAIL("Unknown ucall %lu", uc.cmd);
459 }
460
461 done:
462 kvm_vm_free(vm);
463 }
464
test_single_step_from_userspace(int test_cnt)465 void test_single_step_from_userspace(int test_cnt)
466 {
467 struct kvm_vcpu *vcpu;
468 struct kvm_vm *vm;
469 struct ucall uc;
470 struct kvm_run *run;
471 uint64_t pc, cmd;
472 uint64_t test_pc = 0;
473 bool ss_enable = false;
474 struct kvm_guest_debug debug = {};
475
476 vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
477 run = vcpu->run;
478 vcpu_args_set(vcpu, 1, test_cnt);
479
480 while (1) {
481 vcpu_run(vcpu);
482 if (run->exit_reason != KVM_EXIT_DEBUG) {
483 cmd = get_ucall(vcpu, &uc);
484 if (cmd == UCALL_ABORT) {
485 REPORT_GUEST_ASSERT(uc);
486 /* NOT REACHED */
487 } else if (cmd == UCALL_DONE) {
488 break;
489 }
490
491 TEST_ASSERT(cmd == UCALL_NONE,
492 "Unexpected ucall cmd 0x%lx", cmd);
493
494 debug.control = KVM_GUESTDBG_ENABLE |
495 KVM_GUESTDBG_SINGLESTEP;
496 ss_enable = true;
497 vcpu_guest_debug_set(vcpu, &debug);
498 continue;
499 }
500
501 TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
502
503 /* Check if the current pc is expected. */
504 vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
505 TEST_ASSERT(!test_pc || pc == test_pc,
506 "Unexpected pc 0x%lx (expected 0x%lx)",
507 pc, test_pc);
508
509 if ((pc + 4) == (uint64_t)&iter_ss_end) {
510 test_pc = 0;
511 debug.control = KVM_GUESTDBG_ENABLE;
512 ss_enable = false;
513 vcpu_guest_debug_set(vcpu, &debug);
514 continue;
515 }
516
517 /*
518 * If the current pc is between iter_ss_bgin and
519 * iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
520 * be the current pc + 4.
521 */
522 if ((pc >= (uint64_t)&iter_ss_begin) &&
523 (pc < (uint64_t)&iter_ss_end))
524 test_pc = pc + 4;
525 else
526 test_pc = 0;
527 }
528
529 kvm_vm_free(vm);
530 }
531
532 /*
533 * Run debug testing using the various breakpoint#, watchpoint# and
534 * context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration.
535 */
test_guest_debug_exceptions_all(uint64_t aa64dfr0)536 void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
537 {
538 uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
539 int b, w, c;
540
541 /* Number of breakpoints */
542 brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), aa64dfr0) + 1;
543 __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
544
545 /* Number of watchpoints */
546 wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), aa64dfr0) + 1;
547
548 /* Number of context aware breakpoints */
549 ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_CTX_CMPS), aa64dfr0) + 1;
550
551 pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
552 brp_num, wrp_num, ctx_brp_num);
553
554 /* Number of normal (non-context aware) breakpoints */
555 normal_brp_num = brp_num - ctx_brp_num;
556
557 /* Lowest context aware breakpoint number */
558 ctx_brp_base = normal_brp_num;
559
560 /* Run tests with all supported breakpoints/watchpoints */
561 for (c = ctx_brp_base; c < ctx_brp_base + ctx_brp_num; c++) {
562 for (b = 0; b < normal_brp_num; b++) {
563 for (w = 0; w < wrp_num; w++)
564 test_guest_debug_exceptions(b, w, c);
565 }
566 }
567 }
568
help(char * name)569 static void help(char *name)
570 {
571 puts("");
572 printf("Usage: %s [-h] [-i iterations of the single step test]\n", name);
573 puts("");
574 exit(0);
575 }
576
main(int argc,char * argv[])577 int main(int argc, char *argv[])
578 {
579 struct kvm_vcpu *vcpu;
580 struct kvm_vm *vm;
581 int opt;
582 int ss_iteration = 10000;
583 uint64_t aa64dfr0;
584
585 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
586 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &aa64dfr0);
587 __TEST_REQUIRE(debug_version(aa64dfr0) >= 6,
588 "Armv8 debug architecture not supported.");
589 kvm_vm_free(vm);
590
591 while ((opt = getopt(argc, argv, "i:")) != -1) {
592 switch (opt) {
593 case 'i':
594 ss_iteration = atoi_positive("Number of iterations", optarg);
595 break;
596 case 'h':
597 default:
598 help(argv[0]);
599 break;
600 }
601 }
602
603 test_guest_debug_exceptions_all(aa64dfr0);
604 test_single_step_from_userspace(ss_iteration);
605
606 return 0;
607 }
608