1 /* 2 * ARM debug helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "cpu.h" 10 #include "internals.h" 11 #include "exec/exec-all.h" 12 #include "exec/helper-proto.h" 13 14 /* Return true if the linked breakpoint entry lbn passes its checks */ 15 static bool linked_bp_matches(ARMCPU *cpu, int lbn) 16 { 17 CPUARMState *env = &cpu->env; 18 uint64_t bcr = env->cp15.dbgbcr[lbn]; 19 int brps = arm_num_brps(cpu); 20 int ctx_cmps = arm_num_ctx_cmps(cpu); 21 int bt; 22 uint32_t contextidr; 23 uint64_t hcr_el2; 24 25 /* 26 * Links to unimplemented or non-context aware breakpoints are 27 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or 28 * as if linked to an UNKNOWN context-aware breakpoint (in which 29 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). 30 * We choose the former. 31 */ 32 if (lbn >= brps || lbn < (brps - ctx_cmps)) { 33 return false; 34 } 35 36 bcr = env->cp15.dbgbcr[lbn]; 37 38 if (extract64(bcr, 0, 1) == 0) { 39 /* Linked breakpoint disabled : generate no events */ 40 return false; 41 } 42 43 bt = extract64(bcr, 20, 4); 44 hcr_el2 = arm_hcr_el2_eff(env); 45 46 switch (bt) { 47 case 3: /* linked context ID match */ 48 switch (arm_current_el(env)) { 49 default: 50 /* Context matches never fire in AArch64 EL3 */ 51 return false; 52 case 2: 53 if (!(hcr_el2 & HCR_E2H)) { 54 /* Context matches never fire in EL2 without E2H enabled. */ 55 return false; 56 } 57 contextidr = env->cp15.contextidr_el[2]; 58 break; 59 case 1: 60 contextidr = env->cp15.contextidr_el[1]; 61 break; 62 case 0: 63 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 64 contextidr = env->cp15.contextidr_el[2]; 65 } else { 66 contextidr = env->cp15.contextidr_el[1]; 67 } 68 break; 69 } 70 break; 71 72 case 7: /* linked contextidr_el1 match */ 73 contextidr = env->cp15.contextidr_el[1]; 74 break; 75 case 13: /* linked contextidr_el2 match */ 76 contextidr = env->cp15.contextidr_el[2]; 77 break; 78 79 case 9: /* linked VMID match (reserved if no EL2) */ 80 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 81 case 15: /* linked full context ID match */ 82 default: 83 /* 84 * Links to Unlinked context breakpoints must generate no 85 * events; we choose to do the same for reserved values too. 86 */ 87 return false; 88 } 89 90 /* 91 * We match the whole register even if this is AArch32 using the 92 * short descriptor format (in which case it holds both PROCID and ASID), 93 * since we don't implement the optional v7 context ID masking. 94 */ 95 return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; 96 } 97 98 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) 99 { 100 CPUARMState *env = &cpu->env; 101 uint64_t cr; 102 int pac, hmc, ssc, wt, lbn; 103 /* 104 * Note that for watchpoints the check is against the CPU security 105 * state, not the S/NS attribute on the offending data access. 106 */ 107 bool is_secure = arm_is_secure(env); 108 int access_el = arm_current_el(env); 109 110 if (is_wp) { 111 CPUWatchpoint *wp = env->cpu_watchpoint[n]; 112 113 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { 114 return false; 115 } 116 cr = env->cp15.dbgwcr[n]; 117 if (wp->hitattrs.user) { 118 /* 119 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should 120 * match watchpoints as if they were accesses done at EL0, even if 121 * the CPU is at EL1 or higher. 122 */ 123 access_el = 0; 124 } 125 } else { 126 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 127 128 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { 129 return false; 130 } 131 cr = env->cp15.dbgbcr[n]; 132 } 133 /* 134 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is 135 * enabled and that the address and access type match; for breakpoints 136 * we know the address matched; check the remaining fields, including 137 * linked breakpoints. We rely on WCR and BCR having the same layout 138 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. 139 * Note that some combinations of {PAC, HMC, SSC} are reserved and 140 * must act either like some valid combination or as if the watchpoint 141 * were disabled. We choose the former, and use this together with 142 * the fact that EL3 must always be Secure and EL2 must always be 143 * Non-Secure to simplify the code slightly compared to the full 144 * table in the ARM ARM. 145 */ 146 pac = extract64(cr, 1, 2); 147 hmc = extract64(cr, 13, 1); 148 ssc = extract64(cr, 14, 2); 149 150 switch (ssc) { 151 case 0: 152 break; 153 case 1: 154 case 3: 155 if (is_secure) { 156 return false; 157 } 158 break; 159 case 2: 160 if (!is_secure) { 161 return false; 162 } 163 break; 164 } 165 166 switch (access_el) { 167 case 3: 168 case 2: 169 if (!hmc) { 170 return false; 171 } 172 break; 173 case 1: 174 if (extract32(pac, 0, 1) == 0) { 175 return false; 176 } 177 break; 178 case 0: 179 if (extract32(pac, 1, 1) == 0) { 180 return false; 181 } 182 break; 183 default: 184 g_assert_not_reached(); 185 } 186 187 wt = extract64(cr, 20, 1); 188 lbn = extract64(cr, 16, 4); 189 190 if (wt && !linked_bp_matches(cpu, lbn)) { 191 return false; 192 } 193 194 return true; 195 } 196 197 static bool check_watchpoints(ARMCPU *cpu) 198 { 199 CPUARMState *env = &cpu->env; 200 int n; 201 202 /* 203 * If watchpoints are disabled globally or we can't take debug 204 * exceptions here then watchpoint firings are ignored. 205 */ 206 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 207 || !arm_generate_debug_exceptions(env)) { 208 return false; 209 } 210 211 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { 212 if (bp_wp_matches(cpu, n, true)) { 213 return true; 214 } 215 } 216 return false; 217 } 218 219 bool arm_debug_check_breakpoint(CPUState *cs) 220 { 221 ARMCPU *cpu = ARM_CPU(cs); 222 CPUARMState *env = &cpu->env; 223 int n; 224 225 /* 226 * If breakpoints are disabled globally or we can't take debug 227 * exceptions here then breakpoint firings are ignored. 228 */ 229 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 230 || !arm_generate_debug_exceptions(env)) { 231 return false; 232 } 233 234 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { 235 if (bp_wp_matches(cpu, n, false)) { 236 return true; 237 } 238 } 239 return false; 240 } 241 242 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 243 { 244 /* 245 * Called by core code when a CPU watchpoint fires; need to check if this 246 * is also an architectural watchpoint match. 247 */ 248 ARMCPU *cpu = ARM_CPU(cs); 249 250 return check_watchpoints(cpu); 251 } 252 253 void arm_debug_excp_handler(CPUState *cs) 254 { 255 /* 256 * Called by core code when a watchpoint or breakpoint fires; 257 * need to check which one and raise the appropriate exception. 258 */ 259 ARMCPU *cpu = ARM_CPU(cs); 260 CPUARMState *env = &cpu->env; 261 CPUWatchpoint *wp_hit = cs->watchpoint_hit; 262 263 if (wp_hit) { 264 if (wp_hit->flags & BP_CPU) { 265 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; 266 bool same_el = arm_debug_target_el(env) == arm_current_el(env); 267 268 cs->watchpoint_hit = NULL; 269 270 env->exception.fsr = arm_debug_exception_fsr(env); 271 env->exception.vaddress = wp_hit->hitaddr; 272 raise_exception(env, EXCP_DATA_ABORT, 273 syn_watchpoint(same_el, 0, wnr), 274 arm_debug_target_el(env)); 275 } 276 } else { 277 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 278 bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); 279 280 /* 281 * (1) GDB breakpoints should be handled first. 282 * (2) Do not raise a CPU exception if no CPU breakpoint has fired, 283 * since singlestep is also done by generating a debug internal 284 * exception. 285 */ 286 if (cpu_breakpoint_test(cs, pc, BP_GDB) 287 || !cpu_breakpoint_test(cs, pc, BP_CPU)) { 288 return; 289 } 290 291 env->exception.fsr = arm_debug_exception_fsr(env); 292 /* 293 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 294 * values to the guest that it shouldn't be able to see at its 295 * exception/security level. 296 */ 297 env->exception.vaddress = 0; 298 raise_exception(env, EXCP_PREFETCH_ABORT, 299 syn_breakpoint(same_el), 300 arm_debug_target_el(env)); 301 } 302 } 303 304 #if !defined(CONFIG_USER_ONLY) 305 306 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) 307 { 308 ARMCPU *cpu = ARM_CPU(cs); 309 CPUARMState *env = &cpu->env; 310 311 /* 312 * In BE32 system mode, target memory is stored byteswapped (on a 313 * little-endian host system), and by the time we reach here (via an 314 * opcode helper) the addresses of subword accesses have been adjusted 315 * to account for that, which means that watchpoints will not match. 316 * Undo the adjustment here. 317 */ 318 if (arm_sctlr_b(env)) { 319 if (len == 1) { 320 addr ^= 3; 321 } else if (len == 2) { 322 addr ^= 2; 323 } 324 } 325 326 return addr; 327 } 328 329 #endif 330