1 /* 2 * ARM debug helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "cpu.h" 11 #include "internals.h" 12 #include "cpu-features.h" 13 #include "cpregs.h" 14 #include "exec/exec-all.h" 15 #include "exec/helper-proto.h" 16 #include "sysemu/tcg.h" 17 18 #ifdef CONFIG_TCG 19 /* Return the Exception Level targeted by debug exceptions. */ 20 static int arm_debug_target_el(CPUARMState *env) 21 { 22 bool secure = arm_is_secure(env); 23 bool route_to_el2 = false; 24 25 if (arm_feature(env, ARM_FEATURE_M)) { 26 return 1; 27 } 28 29 if (arm_is_el2_enabled(env)) { 30 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || 31 env->cp15.mdcr_el2 & MDCR_TDE; 32 } 33 34 if (route_to_el2) { 35 return 2; 36 } else if (arm_feature(env, ARM_FEATURE_EL3) && 37 !arm_el_is_aa64(env, 3) && secure) { 38 return 3; 39 } else { 40 return 1; 41 } 42 } 43 44 /* 45 * Raise an exception to the debug target el. 46 * Modify syndrome to indicate when origin and target EL are the same. 47 */ 48 G_NORETURN static void 49 raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome) 50 { 51 int debug_el = arm_debug_target_el(env); 52 int cur_el = arm_current_el(env); 53 54 /* 55 * If singlestep is targeting a lower EL than the current one, then 56 * DisasContext.ss_active must be false and we can never get here. 57 * Similarly for watchpoint and breakpoint matches. 58 */ 59 assert(debug_el >= cur_el); 60 syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT; 61 raise_exception(env, excp, syndrome, debug_el); 62 } 63 64 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ 65 static bool aa64_generate_debug_exceptions(CPUARMState *env) 66 { 67 int cur_el = arm_current_el(env); 68 int debug_el; 69 70 if (cur_el == 3) { 71 return false; 72 } 73 74 /* MDCR_EL3.SDD disables debug events from Secure state */ 75 if (arm_is_secure_below_el3(env) 76 && extract32(env->cp15.mdcr_el3, 16, 1)) { 77 return false; 78 } 79 80 /* 81 * Same EL to same EL debug exceptions need MDSCR_KDE enabled 82 * while not masking the (D)ebug bit in DAIF. 83 */ 84 debug_el = arm_debug_target_el(env); 85 86 if (cur_el == debug_el) { 87 return extract32(env->cp15.mdscr_el1, 13, 1) 88 && !(env->daif & PSTATE_D); 89 } 90 91 /* Otherwise the debug target needs to be a higher EL */ 92 return debug_el > cur_el; 93 } 94 95 static bool aa32_generate_debug_exceptions(CPUARMState *env) 96 { 97 int el = arm_current_el(env); 98 99 if (el == 0 && arm_el_is_aa64(env, 1)) { 100 return aa64_generate_debug_exceptions(env); 101 } 102 103 if (arm_is_secure(env)) { 104 int spd; 105 106 if (el == 0 && (env->cp15.sder & 1)) { 107 /* 108 * SDER.SUIDEN means debug exceptions from Secure EL0 109 * are always enabled. Otherwise they are controlled by 110 * SDCR.SPD like those from other Secure ELs. 111 */ 112 return true; 113 } 114 115 spd = extract32(env->cp15.mdcr_el3, 14, 2); 116 switch (spd) { 117 case 1: 118 /* SPD == 0b01 is reserved, but behaves as 0b00. */ 119 case 0: 120 /* 121 * For 0b00 we return true if external secure invasive debug 122 * is enabled. On real hardware this is controlled by external 123 * signals to the core. QEMU always permits debug, and behaves 124 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. 125 */ 126 return true; 127 case 2: 128 return false; 129 case 3: 130 return true; 131 } 132 } 133 134 return el != 2; 135 } 136 137 /* 138 * Return true if debugging exceptions are currently enabled. 139 * This corresponds to what in ARM ARM pseudocode would be 140 * if UsingAArch32() then 141 * return AArch32.GenerateDebugExceptions() 142 * else 143 * return AArch64.GenerateDebugExceptions() 144 * We choose to push the if() down into this function for clarity, 145 * since the pseudocode has it at all callsites except for the one in 146 * CheckSoftwareStep(), where it is elided because both branches would 147 * always return the same value. 148 */ 149 bool arm_generate_debug_exceptions(CPUARMState *env) 150 { 151 if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) { 152 return false; 153 } 154 if (is_a64(env)) { 155 return aa64_generate_debug_exceptions(env); 156 } else { 157 return aa32_generate_debug_exceptions(env); 158 } 159 } 160 161 /* 162 * Is single-stepping active? (Note that the "is EL_D AArch64?" check 163 * implicitly means this always returns false in pre-v8 CPUs.) 164 */ 165 bool arm_singlestep_active(CPUARMState *env) 166 { 167 return extract32(env->cp15.mdscr_el1, 0, 1) 168 && arm_el_is_aa64(env, arm_debug_target_el(env)) 169 && arm_generate_debug_exceptions(env); 170 } 171 172 /* Return true if the linked breakpoint entry lbn passes its checks */ 173 static bool linked_bp_matches(ARMCPU *cpu, int lbn) 174 { 175 CPUARMState *env = &cpu->env; 176 uint64_t bcr = env->cp15.dbgbcr[lbn]; 177 int brps = arm_num_brps(cpu); 178 int ctx_cmps = arm_num_ctx_cmps(cpu); 179 int bt; 180 uint32_t contextidr; 181 uint64_t hcr_el2; 182 183 /* 184 * Links to unimplemented or non-context aware breakpoints are 185 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or 186 * as if linked to an UNKNOWN context-aware breakpoint (in which 187 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). 188 * We choose the former. 189 */ 190 if (lbn >= brps || lbn < (brps - ctx_cmps)) { 191 return false; 192 } 193 194 bcr = env->cp15.dbgbcr[lbn]; 195 196 if (extract64(bcr, 0, 1) == 0) { 197 /* Linked breakpoint disabled : generate no events */ 198 return false; 199 } 200 201 bt = extract64(bcr, 20, 4); 202 hcr_el2 = arm_hcr_el2_eff(env); 203 204 switch (bt) { 205 case 3: /* linked context ID match */ 206 switch (arm_current_el(env)) { 207 default: 208 /* Context matches never fire in AArch64 EL3 */ 209 return false; 210 case 2: 211 if (!(hcr_el2 & HCR_E2H)) { 212 /* Context matches never fire in EL2 without E2H enabled. */ 213 return false; 214 } 215 contextidr = env->cp15.contextidr_el[2]; 216 break; 217 case 1: 218 contextidr = env->cp15.contextidr_el[1]; 219 break; 220 case 0: 221 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 222 contextidr = env->cp15.contextidr_el[2]; 223 } else { 224 contextidr = env->cp15.contextidr_el[1]; 225 } 226 break; 227 } 228 break; 229 230 case 7: /* linked contextidr_el1 match */ 231 contextidr = env->cp15.contextidr_el[1]; 232 break; 233 case 13: /* linked contextidr_el2 match */ 234 contextidr = env->cp15.contextidr_el[2]; 235 break; 236 237 case 9: /* linked VMID match (reserved if no EL2) */ 238 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 239 case 15: /* linked full context ID match */ 240 default: 241 /* 242 * Links to Unlinked context breakpoints must generate no 243 * events; we choose to do the same for reserved values too. 244 */ 245 return false; 246 } 247 248 /* 249 * We match the whole register even if this is AArch32 using the 250 * short descriptor format (in which case it holds both PROCID and ASID), 251 * since we don't implement the optional v7 context ID masking. 252 */ 253 return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; 254 } 255 256 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) 257 { 258 CPUARMState *env = &cpu->env; 259 uint64_t cr; 260 int pac, hmc, ssc, wt, lbn; 261 /* 262 * Note that for watchpoints the check is against the CPU security 263 * state, not the S/NS attribute on the offending data access. 264 */ 265 bool is_secure = arm_is_secure(env); 266 int access_el = arm_current_el(env); 267 268 if (is_wp) { 269 CPUWatchpoint *wp = env->cpu_watchpoint[n]; 270 271 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { 272 return false; 273 } 274 cr = env->cp15.dbgwcr[n]; 275 if (wp->hitattrs.user) { 276 /* 277 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should 278 * match watchpoints as if they were accesses done at EL0, even if 279 * the CPU is at EL1 or higher. 280 */ 281 access_el = 0; 282 } 283 } else { 284 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 285 286 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { 287 return false; 288 } 289 cr = env->cp15.dbgbcr[n]; 290 } 291 /* 292 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is 293 * enabled and that the address and access type match; for breakpoints 294 * we know the address matched; check the remaining fields, including 295 * linked breakpoints. We rely on WCR and BCR having the same layout 296 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. 297 * Note that some combinations of {PAC, HMC, SSC} are reserved and 298 * must act either like some valid combination or as if the watchpoint 299 * were disabled. We choose the former, and use this together with 300 * the fact that EL3 must always be Secure and EL2 must always be 301 * Non-Secure to simplify the code slightly compared to the full 302 * table in the ARM ARM. 303 */ 304 pac = FIELD_EX64(cr, DBGWCR, PAC); 305 hmc = FIELD_EX64(cr, DBGWCR, HMC); 306 ssc = FIELD_EX64(cr, DBGWCR, SSC); 307 308 switch (ssc) { 309 case 0: 310 break; 311 case 1: 312 case 3: 313 if (is_secure) { 314 return false; 315 } 316 break; 317 case 2: 318 if (!is_secure) { 319 return false; 320 } 321 break; 322 } 323 324 switch (access_el) { 325 case 3: 326 case 2: 327 if (!hmc) { 328 return false; 329 } 330 break; 331 case 1: 332 if (extract32(pac, 0, 1) == 0) { 333 return false; 334 } 335 break; 336 case 0: 337 if (extract32(pac, 1, 1) == 0) { 338 return false; 339 } 340 break; 341 default: 342 g_assert_not_reached(); 343 } 344 345 wt = FIELD_EX64(cr, DBGWCR, WT); 346 lbn = FIELD_EX64(cr, DBGWCR, LBN); 347 348 if (wt && !linked_bp_matches(cpu, lbn)) { 349 return false; 350 } 351 352 return true; 353 } 354 355 static bool check_watchpoints(ARMCPU *cpu) 356 { 357 CPUARMState *env = &cpu->env; 358 int n; 359 360 /* 361 * If watchpoints are disabled globally or we can't take debug 362 * exceptions here then watchpoint firings are ignored. 363 */ 364 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 365 || !arm_generate_debug_exceptions(env)) { 366 return false; 367 } 368 369 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { 370 if (bp_wp_matches(cpu, n, true)) { 371 return true; 372 } 373 } 374 return false; 375 } 376 377 bool arm_debug_check_breakpoint(CPUState *cs) 378 { 379 ARMCPU *cpu = ARM_CPU(cs); 380 CPUARMState *env = &cpu->env; 381 target_ulong pc; 382 int n; 383 384 /* 385 * If breakpoints are disabled globally or we can't take debug 386 * exceptions here then breakpoint firings are ignored. 387 */ 388 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 389 || !arm_generate_debug_exceptions(env)) { 390 return false; 391 } 392 393 /* 394 * Single-step exceptions have priority over breakpoint exceptions. 395 * If single-step state is active-pending, suppress the bp. 396 */ 397 if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) { 398 return false; 399 } 400 401 /* 402 * PC alignment faults have priority over breakpoint exceptions. 403 */ 404 pc = is_a64(env) ? env->pc : env->regs[15]; 405 if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) { 406 return false; 407 } 408 409 /* 410 * Instruction aborts have priority over breakpoint exceptions. 411 * TODO: We would need to look up the page for PC and verify that 412 * it is present and executable. 413 */ 414 415 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { 416 if (bp_wp_matches(cpu, n, false)) { 417 return true; 418 } 419 } 420 return false; 421 } 422 423 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 424 { 425 /* 426 * Called by core code when a CPU watchpoint fires; need to check if this 427 * is also an architectural watchpoint match. 428 */ 429 ARMCPU *cpu = ARM_CPU(cs); 430 431 return check_watchpoints(cpu); 432 } 433 434 /* 435 * Return the FSR value for a debug exception (watchpoint, hardware 436 * breakpoint or BKPT insn) targeting the specified exception level. 437 */ 438 static uint32_t arm_debug_exception_fsr(CPUARMState *env) 439 { 440 ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; 441 int target_el = arm_debug_target_el(env); 442 bool using_lpae; 443 444 if (arm_feature(env, ARM_FEATURE_M)) { 445 using_lpae = false; 446 } else if (target_el == 2 || arm_el_is_aa64(env, target_el)) { 447 using_lpae = true; 448 } else if (arm_feature(env, ARM_FEATURE_PMSA) && 449 arm_feature(env, ARM_FEATURE_V8)) { 450 using_lpae = true; 451 } else if (arm_feature(env, ARM_FEATURE_LPAE) && 452 (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { 453 using_lpae = true; 454 } else { 455 using_lpae = false; 456 } 457 458 if (using_lpae) { 459 return arm_fi_to_lfsc(&fi); 460 } else { 461 return arm_fi_to_sfsc(&fi); 462 } 463 } 464 465 void arm_debug_excp_handler(CPUState *cs) 466 { 467 /* 468 * Called by core code when a watchpoint or breakpoint fires; 469 * need to check which one and raise the appropriate exception. 470 */ 471 ARMCPU *cpu = ARM_CPU(cs); 472 CPUARMState *env = &cpu->env; 473 CPUWatchpoint *wp_hit = cs->watchpoint_hit; 474 475 if (wp_hit) { 476 if (wp_hit->flags & BP_CPU) { 477 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; 478 479 cs->watchpoint_hit = NULL; 480 481 env->exception.fsr = arm_debug_exception_fsr(env); 482 env->exception.vaddress = wp_hit->hitaddr; 483 raise_exception_debug(env, EXCP_DATA_ABORT, 484 syn_watchpoint(0, 0, wnr)); 485 } 486 } else { 487 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 488 489 /* 490 * (1) GDB breakpoints should be handled first. 491 * (2) Do not raise a CPU exception if no CPU breakpoint has fired, 492 * since singlestep is also done by generating a debug internal 493 * exception. 494 */ 495 if (cpu_breakpoint_test(cs, pc, BP_GDB) 496 || !cpu_breakpoint_test(cs, pc, BP_CPU)) { 497 return; 498 } 499 500 env->exception.fsr = arm_debug_exception_fsr(env); 501 /* 502 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 503 * values to the guest that it shouldn't be able to see at its 504 * exception/security level. 505 */ 506 env->exception.vaddress = 0; 507 raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0)); 508 } 509 } 510 511 /* 512 * Raise an EXCP_BKPT with the specified syndrome register value, 513 * targeting the correct exception level for debug exceptions. 514 */ 515 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) 516 { 517 int debug_el = arm_debug_target_el(env); 518 int cur_el = arm_current_el(env); 519 520 /* FSR will only be used if the debug target EL is AArch32. */ 521 env->exception.fsr = arm_debug_exception_fsr(env); 522 /* 523 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 524 * values to the guest that it shouldn't be able to see at its 525 * exception/security level. 526 */ 527 env->exception.vaddress = 0; 528 /* 529 * Other kinds of architectural debug exception are ignored if 530 * they target an exception level below the current one (in QEMU 531 * this is checked by arm_generate_debug_exceptions()). Breakpoint 532 * instructions are special because they always generate an exception 533 * to somewhere: if they can't go to the configured debug exception 534 * level they are taken to the current exception level. 535 */ 536 if (debug_el < cur_el) { 537 debug_el = cur_el; 538 } 539 raise_exception(env, EXCP_BKPT, syndrome, debug_el); 540 } 541 542 void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome) 543 { 544 raise_exception_debug(env, EXCP_UDEF, syndrome); 545 } 546 547 void hw_watchpoint_update(ARMCPU *cpu, int n) 548 { 549 CPUARMState *env = &cpu->env; 550 vaddr len = 0; 551 vaddr wvr = env->cp15.dbgwvr[n]; 552 uint64_t wcr = env->cp15.dbgwcr[n]; 553 int mask; 554 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 555 556 if (env->cpu_watchpoint[n]) { 557 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 558 env->cpu_watchpoint[n] = NULL; 559 } 560 561 if (!FIELD_EX64(wcr, DBGWCR, E)) { 562 /* E bit clear : watchpoint disabled */ 563 return; 564 } 565 566 switch (FIELD_EX64(wcr, DBGWCR, LSC)) { 567 case 0: 568 /* LSC 00 is reserved and must behave as if the wp is disabled */ 569 return; 570 case 1: 571 flags |= BP_MEM_READ; 572 break; 573 case 2: 574 flags |= BP_MEM_WRITE; 575 break; 576 case 3: 577 flags |= BP_MEM_ACCESS; 578 break; 579 } 580 581 /* 582 * Attempts to use both MASK and BAS fields simultaneously are 583 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 584 * thus generating a watchpoint for every byte in the masked region. 585 */ 586 mask = FIELD_EX64(wcr, DBGWCR, MASK); 587 if (mask == 1 || mask == 2) { 588 /* 589 * Reserved values of MASK; we must act as if the mask value was 590 * some non-reserved value, or as if the watchpoint were disabled. 591 * We choose the latter. 592 */ 593 return; 594 } else if (mask) { 595 /* Watchpoint covers an aligned area up to 2GB in size */ 596 len = 1ULL << mask; 597 /* 598 * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 599 * whether the watchpoint fires when the unmasked bits match; we opt 600 * to generate the exceptions. 601 */ 602 wvr &= ~(len - 1); 603 } else { 604 /* Watchpoint covers bytes defined by the byte address select bits */ 605 int bas = FIELD_EX64(wcr, DBGWCR, BAS); 606 int basstart; 607 608 if (extract64(wvr, 2, 1)) { 609 /* 610 * Deprecated case of an only 4-aligned address. BAS[7:4] are 611 * ignored, and BAS[3:0] define which bytes to watch. 612 */ 613 bas &= 0xf; 614 } 615 616 if (bas == 0) { 617 /* This must act as if the watchpoint is disabled */ 618 return; 619 } 620 621 /* 622 * The BAS bits are supposed to be programmed to indicate a contiguous 623 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 624 * we fire for each byte in the word/doubleword addressed by the WVR. 625 * We choose to ignore any non-zero bits after the first range of 1s. 626 */ 627 basstart = ctz32(bas); 628 len = cto32(bas >> basstart); 629 wvr += basstart; 630 } 631 632 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 633 &env->cpu_watchpoint[n]); 634 } 635 636 void hw_watchpoint_update_all(ARMCPU *cpu) 637 { 638 int i; 639 CPUARMState *env = &cpu->env; 640 641 /* 642 * Completely clear out existing QEMU watchpoints and our array, to 643 * avoid possible stale entries following migration load. 644 */ 645 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 646 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 647 648 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 649 hw_watchpoint_update(cpu, i); 650 } 651 } 652 653 void hw_breakpoint_update(ARMCPU *cpu, int n) 654 { 655 CPUARMState *env = &cpu->env; 656 uint64_t bvr = env->cp15.dbgbvr[n]; 657 uint64_t bcr = env->cp15.dbgbcr[n]; 658 vaddr addr; 659 int bt; 660 int flags = BP_CPU; 661 662 if (env->cpu_breakpoint[n]) { 663 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 664 env->cpu_breakpoint[n] = NULL; 665 } 666 667 if (!extract64(bcr, 0, 1)) { 668 /* E bit clear : watchpoint disabled */ 669 return; 670 } 671 672 bt = extract64(bcr, 20, 4); 673 674 switch (bt) { 675 case 4: /* unlinked address mismatch (reserved if AArch64) */ 676 case 5: /* linked address mismatch (reserved if AArch64) */ 677 qemu_log_mask(LOG_UNIMP, 678 "arm: address mismatch breakpoint types not implemented\n"); 679 return; 680 case 0: /* unlinked address match */ 681 case 1: /* linked address match */ 682 { 683 /* 684 * Bits [1:0] are RES0. 685 * 686 * It is IMPLEMENTATION DEFINED whether bits [63:49] 687 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit 688 * of the VA field ([48] or [52] for FEAT_LVA), or whether the 689 * value is read as written. It is CONSTRAINED UNPREDICTABLE 690 * whether the RESS bits are ignored when comparing an address. 691 * Therefore we are allowed to compare the entire register, which 692 * lets us avoid considering whether FEAT_LVA is actually enabled. 693 * 694 * The BAS field is used to allow setting breakpoints on 16-bit 695 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether 696 * a bp will fire if the addresses covered by the bp and the addresses 697 * covered by the insn overlap but the insn doesn't start at the 698 * start of the bp address range. We choose to require the insn and 699 * the bp to have the same address. The constraints on writing to 700 * BAS enforced in dbgbcr_write mean we have only four cases: 701 * 0b0000 => no breakpoint 702 * 0b0011 => breakpoint on addr 703 * 0b1100 => breakpoint on addr + 2 704 * 0b1111 => breakpoint on addr 705 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 706 */ 707 int bas = extract64(bcr, 5, 4); 708 addr = bvr & ~3ULL; 709 if (bas == 0) { 710 return; 711 } 712 if (bas == 0xc) { 713 addr += 2; 714 } 715 break; 716 } 717 case 2: /* unlinked context ID match */ 718 case 8: /* unlinked VMID match (reserved if no EL2) */ 719 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 720 qemu_log_mask(LOG_UNIMP, 721 "arm: unlinked context breakpoint types not implemented\n"); 722 return; 723 case 9: /* linked VMID match (reserved if no EL2) */ 724 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 725 case 3: /* linked context ID match */ 726 default: 727 /* 728 * We must generate no events for Linked context matches (unless 729 * they are linked to by some other bp/wp, which is handled in 730 * updates for the linking bp/wp). We choose to also generate no events 731 * for reserved values. 732 */ 733 return; 734 } 735 736 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 737 } 738 739 void hw_breakpoint_update_all(ARMCPU *cpu) 740 { 741 int i; 742 CPUARMState *env = &cpu->env; 743 744 /* 745 * Completely clear out existing QEMU breakpoints and our array, to 746 * avoid possible stale entries following migration load. 747 */ 748 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 749 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 750 751 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 752 hw_breakpoint_update(cpu, i); 753 } 754 } 755 756 #if !defined(CONFIG_USER_ONLY) 757 758 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) 759 { 760 ARMCPU *cpu = ARM_CPU(cs); 761 CPUARMState *env = &cpu->env; 762 763 /* 764 * In BE32 system mode, target memory is stored byteswapped (on a 765 * little-endian host system), and by the time we reach here (via an 766 * opcode helper) the addresses of subword accesses have been adjusted 767 * to account for that, which means that watchpoints will not match. 768 * Undo the adjustment here. 769 */ 770 if (arm_sctlr_b(env)) { 771 if (len == 1) { 772 addr ^= 3; 773 } else if (len == 2) { 774 addr ^= 2; 775 } 776 } 777 778 return addr; 779 } 780 781 #endif /* !CONFIG_USER_ONLY */ 782 #endif /* CONFIG_TCG */ 783 784 /* 785 * Check for traps to "powerdown debug" registers, which are controlled 786 * by MDCR.TDOSA 787 */ 788 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 789 bool isread) 790 { 791 int el = arm_current_el(env); 792 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 793 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || 794 (arm_hcr_el2_eff(env) & HCR_TGE); 795 796 if (el < 2 && mdcr_el2_tdosa) { 797 return CP_ACCESS_TRAP_EL2; 798 } 799 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 800 return CP_ACCESS_TRAP_EL3; 801 } 802 return CP_ACCESS_OK; 803 } 804 805 /* 806 * Check for traps to "debug ROM" registers, which are controlled 807 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 808 */ 809 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 810 bool isread) 811 { 812 int el = arm_current_el(env); 813 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 814 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || 815 (arm_hcr_el2_eff(env) & HCR_TGE); 816 817 if (el < 2 && mdcr_el2_tdra) { 818 return CP_ACCESS_TRAP_EL2; 819 } 820 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 821 return CP_ACCESS_TRAP_EL3; 822 } 823 return CP_ACCESS_OK; 824 } 825 826 /* 827 * Check for traps to general debug registers, which are controlled 828 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 829 */ 830 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 831 bool isread) 832 { 833 int el = arm_current_el(env); 834 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 835 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 836 (arm_hcr_el2_eff(env) & HCR_TGE); 837 838 if (el < 2 && mdcr_el2_tda) { 839 return CP_ACCESS_TRAP_EL2; 840 } 841 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 842 return CP_ACCESS_TRAP_EL3; 843 } 844 return CP_ACCESS_OK; 845 } 846 847 static CPAccessResult access_dbgvcr32(CPUARMState *env, const ARMCPRegInfo *ri, 848 bool isread) 849 { 850 /* MCDR_EL3.TDMA doesn't apply for FEAT_NV traps */ 851 if (arm_current_el(env) == 2 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 852 return CP_ACCESS_TRAP_EL3; 853 } 854 return CP_ACCESS_OK; 855 } 856 857 /* 858 * Check for traps to Debug Comms Channel registers. If FEAT_FGT 859 * is implemented then these are controlled by MDCR_EL2.TDCC for 860 * EL2 and MDCR_EL3.TDCC for EL3. They are also controlled by 861 * the general debug access trap bits MDCR_EL2.TDA and MDCR_EL3.TDA. 862 * For EL0, they are also controlled by MDSCR_EL1.TDCC. 863 */ 864 static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, 865 bool isread) 866 { 867 int el = arm_current_el(env); 868 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 869 bool mdscr_el1_tdcc = extract32(env->cp15.mdscr_el1, 12, 1); 870 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 871 (arm_hcr_el2_eff(env) & HCR_TGE); 872 bool mdcr_el2_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 873 (mdcr_el2 & MDCR_TDCC); 874 bool mdcr_el3_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 875 (env->cp15.mdcr_el3 & MDCR_TDCC); 876 877 if (el < 1 && mdscr_el1_tdcc) { 878 return CP_ACCESS_TRAP; 879 } 880 if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) { 881 return CP_ACCESS_TRAP_EL2; 882 } 883 if (el < 3 && ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) { 884 return CP_ACCESS_TRAP_EL3; 885 } 886 return CP_ACCESS_OK; 887 } 888 889 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 890 uint64_t value) 891 { 892 /* 893 * Writes to OSLAR_EL1 may update the OS lock status, which can be 894 * read via a bit in OSLSR_EL1. 895 */ 896 int oslock; 897 898 if (ri->state == ARM_CP_STATE_AA32) { 899 oslock = (value == 0xC5ACCE55); 900 } else { 901 oslock = value & 1; 902 } 903 904 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 905 } 906 907 static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 908 uint64_t value) 909 { 910 ARMCPU *cpu = env_archcpu(env); 911 /* 912 * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not 913 * implemented this is RAZ/WI. 914 */ 915 if(arm_feature(env, ARM_FEATURE_AARCH64) 916 ? cpu_isar_feature(aa64_doublelock, cpu) 917 : cpu_isar_feature(aa32_doublelock, cpu)) { 918 env->cp15.osdlr_el1 = value & 1; 919 } 920 } 921 922 static void dbgclaimset_write(CPUARMState *env, const ARMCPRegInfo *ri, 923 uint64_t value) 924 { 925 env->cp15.dbgclaim |= (value & 0xFF); 926 } 927 928 static uint64_t dbgclaimset_read(CPUARMState *env, const ARMCPRegInfo *ri) 929 { 930 /* CLAIM bits are RAO */ 931 return 0xFF; 932 } 933 934 static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 935 uint64_t value) 936 { 937 env->cp15.dbgclaim &= ~(value & 0xFF); 938 } 939 940 static const ARMCPRegInfo debug_cp_reginfo[] = { 941 /* 942 * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 943 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 944 * unlike DBGDRAR it is never accessible from EL0. 945 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 946 * accessor. 947 */ 948 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 949 .access = PL0_R, .accessfn = access_tdra, 950 .type = ARM_CP_CONST | ARM_CP_NO_GDB, .resetvalue = 0 }, 951 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 952 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 953 .access = PL1_R, .accessfn = access_tdra, 954 .type = ARM_CP_CONST, .resetvalue = 0 }, 955 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 956 .access = PL0_R, .accessfn = access_tdra, 957 .type = ARM_CP_CONST | ARM_CP_NO_GDB, .resetvalue = 0 }, 958 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 959 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 960 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 961 .access = PL1_RW, .accessfn = access_tda, 962 .fgt = FGT_MDSCR_EL1, 963 .nv2_redirect_offset = 0x158, 964 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 965 .resetvalue = 0 }, 966 /* 967 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external 968 * Debug Communication Channel is not implemented. 969 */ 970 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, 971 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, 972 .access = PL0_R, .accessfn = access_tdcc, 973 .type = ARM_CP_CONST, .resetvalue = 0 }, 974 /* 975 * These registers belong to the Debug Communications Channel, 976 * which is not implemented. However we implement RAZ/WI behaviour 977 * with trapping to prevent spurious SIGILLs if the guest OS does 978 * access them as the support cannot be probed for. 979 */ 980 { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 981 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2, 982 .access = PL1_RW, .accessfn = access_tdcc, 983 .type = ARM_CP_CONST, .resetvalue = 0 }, 984 { .name = "OSDTRTX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 985 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 986 .access = PL1_RW, .accessfn = access_tdcc, 987 .type = ARM_CP_CONST, .resetvalue = 0 }, 988 /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */ 989 { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14, 990 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, 991 .access = PL0_RW, .accessfn = access_tdcc, 992 .type = ARM_CP_CONST, .resetvalue = 0 }, 993 /* 994 * OSECCR_EL1 provides a mechanism for an operating system 995 * to access the contents of EDECCR. EDECCR is not implemented though, 996 * as is the rest of external device mechanism. 997 */ 998 { .name = "OSECCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 999 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 1000 .access = PL1_RW, .accessfn = access_tda, 1001 .fgt = FGT_OSECCR_EL1, 1002 .type = ARM_CP_CONST, .resetvalue = 0 }, 1003 /* 1004 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as 1005 * it is unlikely a guest will care. 1006 * We don't implement the configurable EL0 access. 1007 */ 1008 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, 1009 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 1010 .type = ARM_CP_ALIAS, 1011 .access = PL1_R, .accessfn = access_tda, 1012 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 1013 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 1014 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 1015 .access = PL1_W, .type = ARM_CP_NO_RAW, 1016 .accessfn = access_tdosa, 1017 .fgt = FGT_OSLAR_EL1, 1018 .writefn = oslar_write }, 1019 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 1020 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 1021 .access = PL1_R, .resetvalue = 10, 1022 .accessfn = access_tdosa, 1023 .fgt = FGT_OSLSR_EL1, 1024 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 1025 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 1026 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 1027 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 1028 .access = PL1_RW, .accessfn = access_tdosa, 1029 .fgt = FGT_OSDLR_EL1, 1030 .writefn = osdlr_write, 1031 .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) }, 1032 /* 1033 * Dummy DBGVCR: Linux wants to clear this on startup, but we don't 1034 * implement vector catch debug events yet. 1035 */ 1036 { .name = "DBGVCR", 1037 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 1038 .access = PL1_RW, .accessfn = access_tda, 1039 .type = ARM_CP_NOP }, 1040 /* 1041 * Dummy MDCCINT_EL1, since we don't implement the Debug Communications 1042 * Channel but Linux may try to access this register. The 32-bit 1043 * alias is DBGDCCINT. 1044 */ 1045 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 1046 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 1047 .access = PL1_RW, .accessfn = access_tdcc, 1048 .type = ARM_CP_NOP }, 1049 /* 1050 * Dummy DBGCLAIM registers. 1051 * "The architecture does not define any functionality for the CLAIM tag bits.", 1052 * so we only keep the raw bits 1053 */ 1054 { .name = "DBGCLAIMSET_EL1", .state = ARM_CP_STATE_BOTH, 1055 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6, 1056 .type = ARM_CP_ALIAS, 1057 .access = PL1_RW, .accessfn = access_tda, 1058 .fgt = FGT_DBGCLAIM, 1059 .writefn = dbgclaimset_write, .readfn = dbgclaimset_read }, 1060 { .name = "DBGCLAIMCLR_EL1", .state = ARM_CP_STATE_BOTH, 1061 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 6, 1062 .access = PL1_RW, .accessfn = access_tda, 1063 .fgt = FGT_DBGCLAIM, 1064 .writefn = dbgclaimclr_write, .raw_writefn = raw_write, 1065 .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, 1066 }; 1067 1068 /* These are present only when EL1 supports AArch32 */ 1069 static const ARMCPRegInfo debug_aa32_el1_reginfo[] = { 1070 /* 1071 * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 1072 * to save and restore a 32-bit guest's DBGVCR) 1073 */ 1074 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 1075 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 1076 .access = PL2_RW, .accessfn = access_dbgvcr32, 1077 .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, 1078 }; 1079 1080 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 1081 /* 64 bit access versions of the (dummy) debug registers */ 1082 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 1083 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT | ARM_CP_NO_GDB, 1084 .resetvalue = 0 }, 1085 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 1086 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT | ARM_CP_NO_GDB, 1087 .resetvalue = 0 }, 1088 }; 1089 1090 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1091 uint64_t value) 1092 { 1093 ARMCPU *cpu = env_archcpu(env); 1094 int i = ri->crm; 1095 1096 /* 1097 * Bits [1:0] are RES0. 1098 * 1099 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) 1100 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if 1101 * they contain the value written. It is CONSTRAINED UNPREDICTABLE 1102 * whether the RESS bits are ignored when comparing an address. 1103 * 1104 * Therefore we are allowed to compare the entire register, which lets 1105 * us avoid considering whether or not FEAT_LVA is actually enabled. 1106 */ 1107 value &= ~3ULL; 1108 1109 raw_write(env, ri, value); 1110 if (tcg_enabled()) { 1111 hw_watchpoint_update(cpu, i); 1112 } 1113 } 1114 1115 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1116 uint64_t value) 1117 { 1118 ARMCPU *cpu = env_archcpu(env); 1119 int i = ri->crm; 1120 1121 raw_write(env, ri, value); 1122 if (tcg_enabled()) { 1123 hw_watchpoint_update(cpu, i); 1124 } 1125 } 1126 1127 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1128 uint64_t value) 1129 { 1130 ARMCPU *cpu = env_archcpu(env); 1131 int i = ri->crm; 1132 1133 raw_write(env, ri, value); 1134 if (tcg_enabled()) { 1135 hw_breakpoint_update(cpu, i); 1136 } 1137 } 1138 1139 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1140 uint64_t value) 1141 { 1142 ARMCPU *cpu = env_archcpu(env); 1143 int i = ri->crm; 1144 1145 /* 1146 * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 1147 * copy of BAS[0]. 1148 */ 1149 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 1150 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 1151 1152 raw_write(env, ri, value); 1153 if (tcg_enabled()) { 1154 hw_breakpoint_update(cpu, i); 1155 } 1156 } 1157 1158 void define_debug_regs(ARMCPU *cpu) 1159 { 1160 /* 1161 * Define v7 and v8 architectural debug registers. 1162 * These are just dummy implementations for now. 1163 */ 1164 int i; 1165 int wrps, brps, ctx_cmps; 1166 1167 /* 1168 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot 1169 * use AArch32. Given that bit 15 is RES1, if the value is 0 then 1170 * the register must not exist for this cpu. 1171 */ 1172 if (cpu->isar.dbgdidr != 0) { 1173 ARMCPRegInfo dbgdidr = { 1174 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, 1175 .opc1 = 0, .opc2 = 0, 1176 .access = PL0_R, .accessfn = access_tda, 1177 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 1178 }; 1179 define_one_arm_cp_reg(cpu, &dbgdidr); 1180 } 1181 1182 /* 1183 * DBGDEVID is present in the v7 debug architecture if 1184 * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is 1185 * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist 1186 * from v7.1 of the debug architecture. Because no fields have yet 1187 * been defined in DBGDEVID2 (and quite possibly none will ever 1188 * be) we don't define an ARMISARegisters field for it. 1189 * These registers exist only if EL1 can use AArch32, but that 1190 * happens naturally because they are only PL1 accessible anyway. 1191 */ 1192 if (extract32(cpu->isar.dbgdidr, 15, 1)) { 1193 ARMCPRegInfo dbgdevid = { 1194 .name = "DBGDEVID", 1195 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7, 1196 .access = PL1_R, .accessfn = access_tda, 1197 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid, 1198 }; 1199 define_one_arm_cp_reg(cpu, &dbgdevid); 1200 } 1201 if (cpu_isar_feature(aa32_debugv7p1, cpu)) { 1202 ARMCPRegInfo dbgdevid12[] = { 1203 { 1204 .name = "DBGDEVID1", 1205 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7, 1206 .access = PL1_R, .accessfn = access_tda, 1207 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1, 1208 }, { 1209 .name = "DBGDEVID2", 1210 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7, 1211 .access = PL1_R, .accessfn = access_tda, 1212 .type = ARM_CP_CONST, .resetvalue = 0, 1213 }, 1214 }; 1215 define_arm_cp_regs(cpu, dbgdevid12); 1216 } 1217 1218 brps = arm_num_brps(cpu); 1219 wrps = arm_num_wrps(cpu); 1220 ctx_cmps = arm_num_ctx_cmps(cpu); 1221 1222 assert(ctx_cmps <= brps); 1223 1224 define_arm_cp_regs(cpu, debug_cp_reginfo); 1225 if (cpu_isar_feature(aa64_aa32_el1, cpu)) { 1226 define_arm_cp_regs(cpu, debug_aa32_el1_reginfo); 1227 } 1228 1229 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 1230 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 1231 } 1232 1233 for (i = 0; i < brps; i++) { 1234 char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); 1235 char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); 1236 ARMCPRegInfo dbgregs[] = { 1237 { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, 1238 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 1239 .access = PL1_RW, .accessfn = access_tda, 1240 .fgt = FGT_DBGBVRN_EL1, 1241 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 1242 .writefn = dbgbvr_write, .raw_writefn = raw_write 1243 }, 1244 { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, 1245 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 1246 .access = PL1_RW, .accessfn = access_tda, 1247 .fgt = FGT_DBGBCRN_EL1, 1248 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 1249 .writefn = dbgbcr_write, .raw_writefn = raw_write 1250 }, 1251 }; 1252 define_arm_cp_regs(cpu, dbgregs); 1253 g_free(dbgbvr_el1_name); 1254 g_free(dbgbcr_el1_name); 1255 } 1256 1257 for (i = 0; i < wrps; i++) { 1258 char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); 1259 char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); 1260 ARMCPRegInfo dbgregs[] = { 1261 { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, 1262 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 1263 .access = PL1_RW, .accessfn = access_tda, 1264 .fgt = FGT_DBGWVRN_EL1, 1265 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 1266 .writefn = dbgwvr_write, .raw_writefn = raw_write 1267 }, 1268 { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, 1269 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 1270 .access = PL1_RW, .accessfn = access_tda, 1271 .fgt = FGT_DBGWCRN_EL1, 1272 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 1273 .writefn = dbgwcr_write, .raw_writefn = raw_write 1274 }, 1275 }; 1276 define_arm_cp_regs(cpu, dbgregs); 1277 g_free(dbgwvr_el1_name); 1278 g_free(dbgwcr_el1_name); 1279 } 1280 } 1281