xref: /openbmc/qemu/target/arm/debug_helper.c (revision ed3a06b1)
1 /*
2  * ARM debug helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "internals.h"
11 #include "exec/exec-all.h"
12 #include "exec/helper-proto.h"
13 
14 
15 /* Return the Exception Level targeted by debug exceptions. */
16 static int arm_debug_target_el(CPUARMState *env)
17 {
18     bool secure = arm_is_secure(env);
19     bool route_to_el2 = false;
20 
21     if (arm_is_el2_enabled(env)) {
22         route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
23                        env->cp15.mdcr_el2 & MDCR_TDE;
24     }
25 
26     if (route_to_el2) {
27         return 2;
28     } else if (arm_feature(env, ARM_FEATURE_EL3) &&
29                !arm_el_is_aa64(env, 3) && secure) {
30         return 3;
31     } else {
32         return 1;
33     }
34 }
35 
36 /*
37  * Raise an exception to the debug target el.
38  * Modify syndrome to indicate when origin and target EL are the same.
39  */
40 G_NORETURN static void
41 raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome)
42 {
43     int debug_el = arm_debug_target_el(env);
44     int cur_el = arm_current_el(env);
45 
46     /*
47      * If singlestep is targeting a lower EL than the current one, then
48      * DisasContext.ss_active must be false and we can never get here.
49      * Similarly for watchpoint and breakpoint matches.
50      */
51     assert(debug_el >= cur_el);
52     syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT;
53     raise_exception(env, excp, syndrome, debug_el);
54 }
55 
56 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */
57 static bool aa64_generate_debug_exceptions(CPUARMState *env)
58 {
59     int cur_el = arm_current_el(env);
60     int debug_el;
61 
62     if (cur_el == 3) {
63         return false;
64     }
65 
66     /* MDCR_EL3.SDD disables debug events from Secure state */
67     if (arm_is_secure_below_el3(env)
68         && extract32(env->cp15.mdcr_el3, 16, 1)) {
69         return false;
70     }
71 
72     /*
73      * Same EL to same EL debug exceptions need MDSCR_KDE enabled
74      * while not masking the (D)ebug bit in DAIF.
75      */
76     debug_el = arm_debug_target_el(env);
77 
78     if (cur_el == debug_el) {
79         return extract32(env->cp15.mdscr_el1, 13, 1)
80             && !(env->daif & PSTATE_D);
81     }
82 
83     /* Otherwise the debug target needs to be a higher EL */
84     return debug_el > cur_el;
85 }
86 
87 static bool aa32_generate_debug_exceptions(CPUARMState *env)
88 {
89     int el = arm_current_el(env);
90 
91     if (el == 0 && arm_el_is_aa64(env, 1)) {
92         return aa64_generate_debug_exceptions(env);
93     }
94 
95     if (arm_is_secure(env)) {
96         int spd;
97 
98         if (el == 0 && (env->cp15.sder & 1)) {
99             /*
100              * SDER.SUIDEN means debug exceptions from Secure EL0
101              * are always enabled. Otherwise they are controlled by
102              * SDCR.SPD like those from other Secure ELs.
103              */
104             return true;
105         }
106 
107         spd = extract32(env->cp15.mdcr_el3, 14, 2);
108         switch (spd) {
109         case 1:
110             /* SPD == 0b01 is reserved, but behaves as 0b00. */
111         case 0:
112             /*
113              * For 0b00 we return true if external secure invasive debug
114              * is enabled. On real hardware this is controlled by external
115              * signals to the core. QEMU always permits debug, and behaves
116              * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
117              */
118             return true;
119         case 2:
120             return false;
121         case 3:
122             return true;
123         }
124     }
125 
126     return el != 2;
127 }
128 
129 /*
130  * Return true if debugging exceptions are currently enabled.
131  * This corresponds to what in ARM ARM pseudocode would be
132  *    if UsingAArch32() then
133  *        return AArch32.GenerateDebugExceptions()
134  *    else
135  *        return AArch64.GenerateDebugExceptions()
136  * We choose to push the if() down into this function for clarity,
137  * since the pseudocode has it at all callsites except for the one in
138  * CheckSoftwareStep(), where it is elided because both branches would
139  * always return the same value.
140  */
141 bool arm_generate_debug_exceptions(CPUARMState *env)
142 {
143     if (is_a64(env)) {
144         return aa64_generate_debug_exceptions(env);
145     } else {
146         return aa32_generate_debug_exceptions(env);
147     }
148 }
149 
150 /*
151  * Is single-stepping active? (Note that the "is EL_D AArch64?" check
152  * implicitly means this always returns false in pre-v8 CPUs.)
153  */
154 bool arm_singlestep_active(CPUARMState *env)
155 {
156     return extract32(env->cp15.mdscr_el1, 0, 1)
157         && arm_el_is_aa64(env, arm_debug_target_el(env))
158         && arm_generate_debug_exceptions(env);
159 }
160 
161 /* Return true if the linked breakpoint entry lbn passes its checks */
162 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
163 {
164     CPUARMState *env = &cpu->env;
165     uint64_t bcr = env->cp15.dbgbcr[lbn];
166     int brps = arm_num_brps(cpu);
167     int ctx_cmps = arm_num_ctx_cmps(cpu);
168     int bt;
169     uint32_t contextidr;
170     uint64_t hcr_el2;
171 
172     /*
173      * Links to unimplemented or non-context aware breakpoints are
174      * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
175      * as if linked to an UNKNOWN context-aware breakpoint (in which
176      * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
177      * We choose the former.
178      */
179     if (lbn >= brps || lbn < (brps - ctx_cmps)) {
180         return false;
181     }
182 
183     bcr = env->cp15.dbgbcr[lbn];
184 
185     if (extract64(bcr, 0, 1) == 0) {
186         /* Linked breakpoint disabled : generate no events */
187         return false;
188     }
189 
190     bt = extract64(bcr, 20, 4);
191     hcr_el2 = arm_hcr_el2_eff(env);
192 
193     switch (bt) {
194     case 3: /* linked context ID match */
195         switch (arm_current_el(env)) {
196         default:
197             /* Context matches never fire in AArch64 EL3 */
198             return false;
199         case 2:
200             if (!(hcr_el2 & HCR_E2H)) {
201                 /* Context matches never fire in EL2 without E2H enabled. */
202                 return false;
203             }
204             contextidr = env->cp15.contextidr_el[2];
205             break;
206         case 1:
207             contextidr = env->cp15.contextidr_el[1];
208             break;
209         case 0:
210             if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
211                 contextidr = env->cp15.contextidr_el[2];
212             } else {
213                 contextidr = env->cp15.contextidr_el[1];
214             }
215             break;
216         }
217         break;
218 
219     case 7:  /* linked contextidr_el1 match */
220         contextidr = env->cp15.contextidr_el[1];
221         break;
222     case 13: /* linked contextidr_el2 match */
223         contextidr = env->cp15.contextidr_el[2];
224         break;
225 
226     case 9: /* linked VMID match (reserved if no EL2) */
227     case 11: /* linked context ID and VMID match (reserved if no EL2) */
228     case 15: /* linked full context ID match */
229     default:
230         /*
231          * Links to Unlinked context breakpoints must generate no
232          * events; we choose to do the same for reserved values too.
233          */
234         return false;
235     }
236 
237     /*
238      * We match the whole register even if this is AArch32 using the
239      * short descriptor format (in which case it holds both PROCID and ASID),
240      * since we don't implement the optional v7 context ID masking.
241      */
242     return contextidr == (uint32_t)env->cp15.dbgbvr[lbn];
243 }
244 
245 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
246 {
247     CPUARMState *env = &cpu->env;
248     uint64_t cr;
249     int pac, hmc, ssc, wt, lbn;
250     /*
251      * Note that for watchpoints the check is against the CPU security
252      * state, not the S/NS attribute on the offending data access.
253      */
254     bool is_secure = arm_is_secure(env);
255     int access_el = arm_current_el(env);
256 
257     if (is_wp) {
258         CPUWatchpoint *wp = env->cpu_watchpoint[n];
259 
260         if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
261             return false;
262         }
263         cr = env->cp15.dbgwcr[n];
264         if (wp->hitattrs.user) {
265             /*
266              * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
267              * match watchpoints as if they were accesses done at EL0, even if
268              * the CPU is at EL1 or higher.
269              */
270             access_el = 0;
271         }
272     } else {
273         uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
274 
275         if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
276             return false;
277         }
278         cr = env->cp15.dbgbcr[n];
279     }
280     /*
281      * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
282      * enabled and that the address and access type match; for breakpoints
283      * we know the address matched; check the remaining fields, including
284      * linked breakpoints. We rely on WCR and BCR having the same layout
285      * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
286      * Note that some combinations of {PAC, HMC, SSC} are reserved and
287      * must act either like some valid combination or as if the watchpoint
288      * were disabled. We choose the former, and use this together with
289      * the fact that EL3 must always be Secure and EL2 must always be
290      * Non-Secure to simplify the code slightly compared to the full
291      * table in the ARM ARM.
292      */
293     pac = FIELD_EX64(cr, DBGWCR, PAC);
294     hmc = FIELD_EX64(cr, DBGWCR, HMC);
295     ssc = FIELD_EX64(cr, DBGWCR, SSC);
296 
297     switch (ssc) {
298     case 0:
299         break;
300     case 1:
301     case 3:
302         if (is_secure) {
303             return false;
304         }
305         break;
306     case 2:
307         if (!is_secure) {
308             return false;
309         }
310         break;
311     }
312 
313     switch (access_el) {
314     case 3:
315     case 2:
316         if (!hmc) {
317             return false;
318         }
319         break;
320     case 1:
321         if (extract32(pac, 0, 1) == 0) {
322             return false;
323         }
324         break;
325     case 0:
326         if (extract32(pac, 1, 1) == 0) {
327             return false;
328         }
329         break;
330     default:
331         g_assert_not_reached();
332     }
333 
334     wt = FIELD_EX64(cr, DBGWCR, WT);
335     lbn = FIELD_EX64(cr, DBGWCR, LBN);
336 
337     if (wt && !linked_bp_matches(cpu, lbn)) {
338         return false;
339     }
340 
341     return true;
342 }
343 
344 static bool check_watchpoints(ARMCPU *cpu)
345 {
346     CPUARMState *env = &cpu->env;
347     int n;
348 
349     /*
350      * If watchpoints are disabled globally or we can't take debug
351      * exceptions here then watchpoint firings are ignored.
352      */
353     if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
354         || !arm_generate_debug_exceptions(env)) {
355         return false;
356     }
357 
358     for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
359         if (bp_wp_matches(cpu, n, true)) {
360             return true;
361         }
362     }
363     return false;
364 }
365 
366 bool arm_debug_check_breakpoint(CPUState *cs)
367 {
368     ARMCPU *cpu = ARM_CPU(cs);
369     CPUARMState *env = &cpu->env;
370     target_ulong pc;
371     int n;
372 
373     /*
374      * If breakpoints are disabled globally or we can't take debug
375      * exceptions here then breakpoint firings are ignored.
376      */
377     if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
378         || !arm_generate_debug_exceptions(env)) {
379         return false;
380     }
381 
382     /*
383      * Single-step exceptions have priority over breakpoint exceptions.
384      * If single-step state is active-pending, suppress the bp.
385      */
386     if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) {
387         return false;
388     }
389 
390     /*
391      * PC alignment faults have priority over breakpoint exceptions.
392      */
393     pc = is_a64(env) ? env->pc : env->regs[15];
394     if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) {
395         return false;
396     }
397 
398     /*
399      * Instruction aborts have priority over breakpoint exceptions.
400      * TODO: We would need to look up the page for PC and verify that
401      * it is present and executable.
402      */
403 
404     for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
405         if (bp_wp_matches(cpu, n, false)) {
406             return true;
407         }
408     }
409     return false;
410 }
411 
412 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
413 {
414     /*
415      * Called by core code when a CPU watchpoint fires; need to check if this
416      * is also an architectural watchpoint match.
417      */
418     ARMCPU *cpu = ARM_CPU(cs);
419 
420     return check_watchpoints(cpu);
421 }
422 
423 /*
424  * Return the FSR value for a debug exception (watchpoint, hardware
425  * breakpoint or BKPT insn) targeting the specified exception level.
426  */
427 static uint32_t arm_debug_exception_fsr(CPUARMState *env)
428 {
429     ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
430     int target_el = arm_debug_target_el(env);
431     bool using_lpae = false;
432 
433     if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
434         using_lpae = true;
435     } else {
436         if (arm_feature(env, ARM_FEATURE_LPAE) &&
437             (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
438             using_lpae = true;
439         }
440     }
441 
442     if (using_lpae) {
443         return arm_fi_to_lfsc(&fi);
444     } else {
445         return arm_fi_to_sfsc(&fi);
446     }
447 }
448 
449 void arm_debug_excp_handler(CPUState *cs)
450 {
451     /*
452      * Called by core code when a watchpoint or breakpoint fires;
453      * need to check which one and raise the appropriate exception.
454      */
455     ARMCPU *cpu = ARM_CPU(cs);
456     CPUARMState *env = &cpu->env;
457     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
458 
459     if (wp_hit) {
460         if (wp_hit->flags & BP_CPU) {
461             bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
462 
463             cs->watchpoint_hit = NULL;
464 
465             env->exception.fsr = arm_debug_exception_fsr(env);
466             env->exception.vaddress = wp_hit->hitaddr;
467             raise_exception_debug(env, EXCP_DATA_ABORT,
468                                   syn_watchpoint(0, 0, wnr));
469         }
470     } else {
471         uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
472 
473         /*
474          * (1) GDB breakpoints should be handled first.
475          * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
476          * since singlestep is also done by generating a debug internal
477          * exception.
478          */
479         if (cpu_breakpoint_test(cs, pc, BP_GDB)
480             || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
481             return;
482         }
483 
484         env->exception.fsr = arm_debug_exception_fsr(env);
485         /*
486          * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
487          * values to the guest that it shouldn't be able to see at its
488          * exception/security level.
489          */
490         env->exception.vaddress = 0;
491         raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0));
492     }
493 }
494 
495 /*
496  * Raise an EXCP_BKPT with the specified syndrome register value,
497  * targeting the correct exception level for debug exceptions.
498  */
499 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
500 {
501     int debug_el = arm_debug_target_el(env);
502     int cur_el = arm_current_el(env);
503 
504     /* FSR will only be used if the debug target EL is AArch32. */
505     env->exception.fsr = arm_debug_exception_fsr(env);
506     /*
507      * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
508      * values to the guest that it shouldn't be able to see at its
509      * exception/security level.
510      */
511     env->exception.vaddress = 0;
512     /*
513      * Other kinds of architectural debug exception are ignored if
514      * they target an exception level below the current one (in QEMU
515      * this is checked by arm_generate_debug_exceptions()). Breakpoint
516      * instructions are special because they always generate an exception
517      * to somewhere: if they can't go to the configured debug exception
518      * level they are taken to the current exception level.
519      */
520     if (debug_el < cur_el) {
521         debug_el = cur_el;
522     }
523     raise_exception(env, EXCP_BKPT, syndrome, debug_el);
524 }
525 
526 void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome)
527 {
528     raise_exception_debug(env, EXCP_UDEF, syndrome);
529 }
530 
531 #if !defined(CONFIG_USER_ONLY)
532 
533 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
534 {
535     ARMCPU *cpu = ARM_CPU(cs);
536     CPUARMState *env = &cpu->env;
537 
538     /*
539      * In BE32 system mode, target memory is stored byteswapped (on a
540      * little-endian host system), and by the time we reach here (via an
541      * opcode helper) the addresses of subword accesses have been adjusted
542      * to account for that, which means that watchpoints will not match.
543      * Undo the adjustment here.
544      */
545     if (arm_sctlr_b(env)) {
546         if (len == 1) {
547             addr ^= 3;
548         } else if (len == 2) {
549             addr ^= 2;
550         }
551     }
552 
553     return addr;
554 }
555 
556 #endif
557