xref: /openbmc/qemu/target/arm/debug_helper.c (revision d5938f29)
1 /*
2  * ARM debug helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "internals.h"
11 #include "exec/exec-all.h"
12 #include "exec/helper-proto.h"
13 
14 /* Return true if the linked breakpoint entry lbn passes its checks */
15 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
16 {
17     CPUARMState *env = &cpu->env;
18     uint64_t bcr = env->cp15.dbgbcr[lbn];
19     int brps = extract32(cpu->dbgdidr, 24, 4);
20     int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
21     int bt;
22     uint32_t contextidr;
23 
24     /*
25      * Links to unimplemented or non-context aware breakpoints are
26      * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
27      * as if linked to an UNKNOWN context-aware breakpoint (in which
28      * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
29      * We choose the former.
30      */
31     if (lbn > brps || lbn < (brps - ctx_cmps)) {
32         return false;
33     }
34 
35     bcr = env->cp15.dbgbcr[lbn];
36 
37     if (extract64(bcr, 0, 1) == 0) {
38         /* Linked breakpoint disabled : generate no events */
39         return false;
40     }
41 
42     bt = extract64(bcr, 20, 4);
43 
44     /*
45      * We match the whole register even if this is AArch32 using the
46      * short descriptor format (in which case it holds both PROCID and ASID),
47      * since we don't implement the optional v7 context ID masking.
48      */
49     contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
50 
51     switch (bt) {
52     case 3: /* linked context ID match */
53         if (arm_current_el(env) > 1) {
54             /* Context matches never fire in EL2 or (AArch64) EL3 */
55             return false;
56         }
57         return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
58     case 5: /* linked address mismatch (reserved in AArch64) */
59     case 9: /* linked VMID match (reserved if no EL2) */
60     case 11: /* linked context ID and VMID match (reserved if no EL2) */
61     default:
62         /*
63          * Links to Unlinked context breakpoints must generate no
64          * events; we choose to do the same for reserved values too.
65          */
66         return false;
67     }
68 
69     return false;
70 }
71 
72 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
73 {
74     CPUARMState *env = &cpu->env;
75     uint64_t cr;
76     int pac, hmc, ssc, wt, lbn;
77     /*
78      * Note that for watchpoints the check is against the CPU security
79      * state, not the S/NS attribute on the offending data access.
80      */
81     bool is_secure = arm_is_secure(env);
82     int access_el = arm_current_el(env);
83 
84     if (is_wp) {
85         CPUWatchpoint *wp = env->cpu_watchpoint[n];
86 
87         if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
88             return false;
89         }
90         cr = env->cp15.dbgwcr[n];
91         if (wp->hitattrs.user) {
92             /*
93              * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
94              * match watchpoints as if they were accesses done at EL0, even if
95              * the CPU is at EL1 or higher.
96              */
97             access_el = 0;
98         }
99     } else {
100         uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
101 
102         if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
103             return false;
104         }
105         cr = env->cp15.dbgbcr[n];
106     }
107     /*
108      * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
109      * enabled and that the address and access type match; for breakpoints
110      * we know the address matched; check the remaining fields, including
111      * linked breakpoints. We rely on WCR and BCR having the same layout
112      * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
113      * Note that some combinations of {PAC, HMC, SSC} are reserved and
114      * must act either like some valid combination or as if the watchpoint
115      * were disabled. We choose the former, and use this together with
116      * the fact that EL3 must always be Secure and EL2 must always be
117      * Non-Secure to simplify the code slightly compared to the full
118      * table in the ARM ARM.
119      */
120     pac = extract64(cr, 1, 2);
121     hmc = extract64(cr, 13, 1);
122     ssc = extract64(cr, 14, 2);
123 
124     switch (ssc) {
125     case 0:
126         break;
127     case 1:
128     case 3:
129         if (is_secure) {
130             return false;
131         }
132         break;
133     case 2:
134         if (!is_secure) {
135             return false;
136         }
137         break;
138     }
139 
140     switch (access_el) {
141     case 3:
142     case 2:
143         if (!hmc) {
144             return false;
145         }
146         break;
147     case 1:
148         if (extract32(pac, 0, 1) == 0) {
149             return false;
150         }
151         break;
152     case 0:
153         if (extract32(pac, 1, 1) == 0) {
154             return false;
155         }
156         break;
157     default:
158         g_assert_not_reached();
159     }
160 
161     wt = extract64(cr, 20, 1);
162     lbn = extract64(cr, 16, 4);
163 
164     if (wt && !linked_bp_matches(cpu, lbn)) {
165         return false;
166     }
167 
168     return true;
169 }
170 
171 static bool check_watchpoints(ARMCPU *cpu)
172 {
173     CPUARMState *env = &cpu->env;
174     int n;
175 
176     /*
177      * If watchpoints are disabled globally or we can't take debug
178      * exceptions here then watchpoint firings are ignored.
179      */
180     if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
181         || !arm_generate_debug_exceptions(env)) {
182         return false;
183     }
184 
185     for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
186         if (bp_wp_matches(cpu, n, true)) {
187             return true;
188         }
189     }
190     return false;
191 }
192 
193 static bool check_breakpoints(ARMCPU *cpu)
194 {
195     CPUARMState *env = &cpu->env;
196     int n;
197 
198     /*
199      * If breakpoints are disabled globally or we can't take debug
200      * exceptions here then breakpoint firings are ignored.
201      */
202     if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
203         || !arm_generate_debug_exceptions(env)) {
204         return false;
205     }
206 
207     for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
208         if (bp_wp_matches(cpu, n, false)) {
209             return true;
210         }
211     }
212     return false;
213 }
214 
215 void HELPER(check_breakpoints)(CPUARMState *env)
216 {
217     ARMCPU *cpu = env_archcpu(env);
218 
219     if (check_breakpoints(cpu)) {
220         HELPER(exception_internal(env, EXCP_DEBUG));
221     }
222 }
223 
224 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
225 {
226     /*
227      * Called by core code when a CPU watchpoint fires; need to check if this
228      * is also an architectural watchpoint match.
229      */
230     ARMCPU *cpu = ARM_CPU(cs);
231 
232     return check_watchpoints(cpu);
233 }
234 
235 void arm_debug_excp_handler(CPUState *cs)
236 {
237     /*
238      * Called by core code when a watchpoint or breakpoint fires;
239      * need to check which one and raise the appropriate exception.
240      */
241     ARMCPU *cpu = ARM_CPU(cs);
242     CPUARMState *env = &cpu->env;
243     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
244 
245     if (wp_hit) {
246         if (wp_hit->flags & BP_CPU) {
247             bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
248             bool same_el = arm_debug_target_el(env) == arm_current_el(env);
249 
250             cs->watchpoint_hit = NULL;
251 
252             env->exception.fsr = arm_debug_exception_fsr(env);
253             env->exception.vaddress = wp_hit->hitaddr;
254             raise_exception(env, EXCP_DATA_ABORT,
255                     syn_watchpoint(same_el, 0, wnr),
256                     arm_debug_target_el(env));
257         }
258     } else {
259         uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
260         bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
261 
262         /*
263          * (1) GDB breakpoints should be handled first.
264          * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
265          * since singlestep is also done by generating a debug internal
266          * exception.
267          */
268         if (cpu_breakpoint_test(cs, pc, BP_GDB)
269             || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
270             return;
271         }
272 
273         env->exception.fsr = arm_debug_exception_fsr(env);
274         /*
275          * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
276          * values to the guest that it shouldn't be able to see at its
277          * exception/security level.
278          */
279         env->exception.vaddress = 0;
280         raise_exception(env, EXCP_PREFETCH_ABORT,
281                         syn_breakpoint(same_el),
282                         arm_debug_target_el(env));
283     }
284 }
285 
286 #if !defined(CONFIG_USER_ONLY)
287 
288 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
289 {
290     ARMCPU *cpu = ARM_CPU(cs);
291     CPUARMState *env = &cpu->env;
292 
293     /*
294      * In BE32 system mode, target memory is stored byteswapped (on a
295      * little-endian host system), and by the time we reach here (via an
296      * opcode helper) the addresses of subword accesses have been adjusted
297      * to account for that, which means that watchpoints will not match.
298      * Undo the adjustment here.
299      */
300     if (arm_sctlr_b(env)) {
301         if (len == 1) {
302             addr ^= 3;
303         } else if (len == 2) {
304             addr ^= 2;
305         }
306     }
307 
308     return addr;
309 }
310 
311 #endif
312