1 /* 2 * ARM debug helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "cpu.h" 11 #include "internals.h" 12 #include "cpregs.h" 13 #include "exec/exec-all.h" 14 #include "exec/helper-proto.h" 15 #include "sysemu/tcg.h" 16 17 #ifdef CONFIG_TCG 18 /* Return the Exception Level targeted by debug exceptions. */ 19 static int arm_debug_target_el(CPUARMState *env) 20 { 21 bool secure = arm_is_secure(env); 22 bool route_to_el2 = false; 23 24 if (arm_is_el2_enabled(env)) { 25 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || 26 env->cp15.mdcr_el2 & MDCR_TDE; 27 } 28 29 if (route_to_el2) { 30 return 2; 31 } else if (arm_feature(env, ARM_FEATURE_EL3) && 32 !arm_el_is_aa64(env, 3) && secure) { 33 return 3; 34 } else { 35 return 1; 36 } 37 } 38 39 /* 40 * Raise an exception to the debug target el. 41 * Modify syndrome to indicate when origin and target EL are the same. 42 */ 43 G_NORETURN static void 44 raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome) 45 { 46 int debug_el = arm_debug_target_el(env); 47 int cur_el = arm_current_el(env); 48 49 /* 50 * If singlestep is targeting a lower EL than the current one, then 51 * DisasContext.ss_active must be false and we can never get here. 52 * Similarly for watchpoint and breakpoint matches. 53 */ 54 assert(debug_el >= cur_el); 55 syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT; 56 raise_exception(env, excp, syndrome, debug_el); 57 } 58 59 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ 60 static bool aa64_generate_debug_exceptions(CPUARMState *env) 61 { 62 int cur_el = arm_current_el(env); 63 int debug_el; 64 65 if (cur_el == 3) { 66 return false; 67 } 68 69 /* MDCR_EL3.SDD disables debug events from Secure state */ 70 if (arm_is_secure_below_el3(env) 71 && extract32(env->cp15.mdcr_el3, 16, 1)) { 72 return false; 73 } 74 75 /* 76 * Same EL to same EL debug exceptions need MDSCR_KDE enabled 77 * while not masking the (D)ebug bit in DAIF. 78 */ 79 debug_el = arm_debug_target_el(env); 80 81 if (cur_el == debug_el) { 82 return extract32(env->cp15.mdscr_el1, 13, 1) 83 && !(env->daif & PSTATE_D); 84 } 85 86 /* Otherwise the debug target needs to be a higher EL */ 87 return debug_el > cur_el; 88 } 89 90 static bool aa32_generate_debug_exceptions(CPUARMState *env) 91 { 92 int el = arm_current_el(env); 93 94 if (el == 0 && arm_el_is_aa64(env, 1)) { 95 return aa64_generate_debug_exceptions(env); 96 } 97 98 if (arm_is_secure(env)) { 99 int spd; 100 101 if (el == 0 && (env->cp15.sder & 1)) { 102 /* 103 * SDER.SUIDEN means debug exceptions from Secure EL0 104 * are always enabled. Otherwise they are controlled by 105 * SDCR.SPD like those from other Secure ELs. 106 */ 107 return true; 108 } 109 110 spd = extract32(env->cp15.mdcr_el3, 14, 2); 111 switch (spd) { 112 case 1: 113 /* SPD == 0b01 is reserved, but behaves as 0b00. */ 114 case 0: 115 /* 116 * For 0b00 we return true if external secure invasive debug 117 * is enabled. On real hardware this is controlled by external 118 * signals to the core. QEMU always permits debug, and behaves 119 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. 120 */ 121 return true; 122 case 2: 123 return false; 124 case 3: 125 return true; 126 } 127 } 128 129 return el != 2; 130 } 131 132 /* 133 * Return true if debugging exceptions are currently enabled. 134 * This corresponds to what in ARM ARM pseudocode would be 135 * if UsingAArch32() then 136 * return AArch32.GenerateDebugExceptions() 137 * else 138 * return AArch64.GenerateDebugExceptions() 139 * We choose to push the if() down into this function for clarity, 140 * since the pseudocode has it at all callsites except for the one in 141 * CheckSoftwareStep(), where it is elided because both branches would 142 * always return the same value. 143 */ 144 bool arm_generate_debug_exceptions(CPUARMState *env) 145 { 146 if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) { 147 return false; 148 } 149 if (is_a64(env)) { 150 return aa64_generate_debug_exceptions(env); 151 } else { 152 return aa32_generate_debug_exceptions(env); 153 } 154 } 155 156 /* 157 * Is single-stepping active? (Note that the "is EL_D AArch64?" check 158 * implicitly means this always returns false in pre-v8 CPUs.) 159 */ 160 bool arm_singlestep_active(CPUARMState *env) 161 { 162 return extract32(env->cp15.mdscr_el1, 0, 1) 163 && arm_el_is_aa64(env, arm_debug_target_el(env)) 164 && arm_generate_debug_exceptions(env); 165 } 166 167 /* Return true if the linked breakpoint entry lbn passes its checks */ 168 static bool linked_bp_matches(ARMCPU *cpu, int lbn) 169 { 170 CPUARMState *env = &cpu->env; 171 uint64_t bcr = env->cp15.dbgbcr[lbn]; 172 int brps = arm_num_brps(cpu); 173 int ctx_cmps = arm_num_ctx_cmps(cpu); 174 int bt; 175 uint32_t contextidr; 176 uint64_t hcr_el2; 177 178 /* 179 * Links to unimplemented or non-context aware breakpoints are 180 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or 181 * as if linked to an UNKNOWN context-aware breakpoint (in which 182 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). 183 * We choose the former. 184 */ 185 if (lbn >= brps || lbn < (brps - ctx_cmps)) { 186 return false; 187 } 188 189 bcr = env->cp15.dbgbcr[lbn]; 190 191 if (extract64(bcr, 0, 1) == 0) { 192 /* Linked breakpoint disabled : generate no events */ 193 return false; 194 } 195 196 bt = extract64(bcr, 20, 4); 197 hcr_el2 = arm_hcr_el2_eff(env); 198 199 switch (bt) { 200 case 3: /* linked context ID match */ 201 switch (arm_current_el(env)) { 202 default: 203 /* Context matches never fire in AArch64 EL3 */ 204 return false; 205 case 2: 206 if (!(hcr_el2 & HCR_E2H)) { 207 /* Context matches never fire in EL2 without E2H enabled. */ 208 return false; 209 } 210 contextidr = env->cp15.contextidr_el[2]; 211 break; 212 case 1: 213 contextidr = env->cp15.contextidr_el[1]; 214 break; 215 case 0: 216 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 217 contextidr = env->cp15.contextidr_el[2]; 218 } else { 219 contextidr = env->cp15.contextidr_el[1]; 220 } 221 break; 222 } 223 break; 224 225 case 7: /* linked contextidr_el1 match */ 226 contextidr = env->cp15.contextidr_el[1]; 227 break; 228 case 13: /* linked contextidr_el2 match */ 229 contextidr = env->cp15.contextidr_el[2]; 230 break; 231 232 case 9: /* linked VMID match (reserved if no EL2) */ 233 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 234 case 15: /* linked full context ID match */ 235 default: 236 /* 237 * Links to Unlinked context breakpoints must generate no 238 * events; we choose to do the same for reserved values too. 239 */ 240 return false; 241 } 242 243 /* 244 * We match the whole register even if this is AArch32 using the 245 * short descriptor format (in which case it holds both PROCID and ASID), 246 * since we don't implement the optional v7 context ID masking. 247 */ 248 return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; 249 } 250 251 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) 252 { 253 CPUARMState *env = &cpu->env; 254 uint64_t cr; 255 int pac, hmc, ssc, wt, lbn; 256 /* 257 * Note that for watchpoints the check is against the CPU security 258 * state, not the S/NS attribute on the offending data access. 259 */ 260 bool is_secure = arm_is_secure(env); 261 int access_el = arm_current_el(env); 262 263 if (is_wp) { 264 CPUWatchpoint *wp = env->cpu_watchpoint[n]; 265 266 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { 267 return false; 268 } 269 cr = env->cp15.dbgwcr[n]; 270 if (wp->hitattrs.user) { 271 /* 272 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should 273 * match watchpoints as if they were accesses done at EL0, even if 274 * the CPU is at EL1 or higher. 275 */ 276 access_el = 0; 277 } 278 } else { 279 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 280 281 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { 282 return false; 283 } 284 cr = env->cp15.dbgbcr[n]; 285 } 286 /* 287 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is 288 * enabled and that the address and access type match; for breakpoints 289 * we know the address matched; check the remaining fields, including 290 * linked breakpoints. We rely on WCR and BCR having the same layout 291 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. 292 * Note that some combinations of {PAC, HMC, SSC} are reserved and 293 * must act either like some valid combination or as if the watchpoint 294 * were disabled. We choose the former, and use this together with 295 * the fact that EL3 must always be Secure and EL2 must always be 296 * Non-Secure to simplify the code slightly compared to the full 297 * table in the ARM ARM. 298 */ 299 pac = FIELD_EX64(cr, DBGWCR, PAC); 300 hmc = FIELD_EX64(cr, DBGWCR, HMC); 301 ssc = FIELD_EX64(cr, DBGWCR, SSC); 302 303 switch (ssc) { 304 case 0: 305 break; 306 case 1: 307 case 3: 308 if (is_secure) { 309 return false; 310 } 311 break; 312 case 2: 313 if (!is_secure) { 314 return false; 315 } 316 break; 317 } 318 319 switch (access_el) { 320 case 3: 321 case 2: 322 if (!hmc) { 323 return false; 324 } 325 break; 326 case 1: 327 if (extract32(pac, 0, 1) == 0) { 328 return false; 329 } 330 break; 331 case 0: 332 if (extract32(pac, 1, 1) == 0) { 333 return false; 334 } 335 break; 336 default: 337 g_assert_not_reached(); 338 } 339 340 wt = FIELD_EX64(cr, DBGWCR, WT); 341 lbn = FIELD_EX64(cr, DBGWCR, LBN); 342 343 if (wt && !linked_bp_matches(cpu, lbn)) { 344 return false; 345 } 346 347 return true; 348 } 349 350 static bool check_watchpoints(ARMCPU *cpu) 351 { 352 CPUARMState *env = &cpu->env; 353 int n; 354 355 /* 356 * If watchpoints are disabled globally or we can't take debug 357 * exceptions here then watchpoint firings are ignored. 358 */ 359 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 360 || !arm_generate_debug_exceptions(env)) { 361 return false; 362 } 363 364 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { 365 if (bp_wp_matches(cpu, n, true)) { 366 return true; 367 } 368 } 369 return false; 370 } 371 372 bool arm_debug_check_breakpoint(CPUState *cs) 373 { 374 ARMCPU *cpu = ARM_CPU(cs); 375 CPUARMState *env = &cpu->env; 376 target_ulong pc; 377 int n; 378 379 /* 380 * If breakpoints are disabled globally or we can't take debug 381 * exceptions here then breakpoint firings are ignored. 382 */ 383 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 384 || !arm_generate_debug_exceptions(env)) { 385 return false; 386 } 387 388 /* 389 * Single-step exceptions have priority over breakpoint exceptions. 390 * If single-step state is active-pending, suppress the bp. 391 */ 392 if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) { 393 return false; 394 } 395 396 /* 397 * PC alignment faults have priority over breakpoint exceptions. 398 */ 399 pc = is_a64(env) ? env->pc : env->regs[15]; 400 if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) { 401 return false; 402 } 403 404 /* 405 * Instruction aborts have priority over breakpoint exceptions. 406 * TODO: We would need to look up the page for PC and verify that 407 * it is present and executable. 408 */ 409 410 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { 411 if (bp_wp_matches(cpu, n, false)) { 412 return true; 413 } 414 } 415 return false; 416 } 417 418 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 419 { 420 /* 421 * Called by core code when a CPU watchpoint fires; need to check if this 422 * is also an architectural watchpoint match. 423 */ 424 ARMCPU *cpu = ARM_CPU(cs); 425 426 return check_watchpoints(cpu); 427 } 428 429 /* 430 * Return the FSR value for a debug exception (watchpoint, hardware 431 * breakpoint or BKPT insn) targeting the specified exception level. 432 */ 433 static uint32_t arm_debug_exception_fsr(CPUARMState *env) 434 { 435 ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; 436 int target_el = arm_debug_target_el(env); 437 bool using_lpae = false; 438 439 if (target_el == 2 || arm_el_is_aa64(env, target_el)) { 440 using_lpae = true; 441 } else if (arm_feature(env, ARM_FEATURE_PMSA) && 442 arm_feature(env, ARM_FEATURE_V8)) { 443 using_lpae = true; 444 } else { 445 if (arm_feature(env, ARM_FEATURE_LPAE) && 446 (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { 447 using_lpae = true; 448 } 449 } 450 451 if (using_lpae) { 452 return arm_fi_to_lfsc(&fi); 453 } else { 454 return arm_fi_to_sfsc(&fi); 455 } 456 } 457 458 void arm_debug_excp_handler(CPUState *cs) 459 { 460 /* 461 * Called by core code when a watchpoint or breakpoint fires; 462 * need to check which one and raise the appropriate exception. 463 */ 464 ARMCPU *cpu = ARM_CPU(cs); 465 CPUARMState *env = &cpu->env; 466 CPUWatchpoint *wp_hit = cs->watchpoint_hit; 467 468 if (wp_hit) { 469 if (wp_hit->flags & BP_CPU) { 470 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; 471 472 cs->watchpoint_hit = NULL; 473 474 env->exception.fsr = arm_debug_exception_fsr(env); 475 env->exception.vaddress = wp_hit->hitaddr; 476 raise_exception_debug(env, EXCP_DATA_ABORT, 477 syn_watchpoint(0, 0, wnr)); 478 } 479 } else { 480 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 481 482 /* 483 * (1) GDB breakpoints should be handled first. 484 * (2) Do not raise a CPU exception if no CPU breakpoint has fired, 485 * since singlestep is also done by generating a debug internal 486 * exception. 487 */ 488 if (cpu_breakpoint_test(cs, pc, BP_GDB) 489 || !cpu_breakpoint_test(cs, pc, BP_CPU)) { 490 return; 491 } 492 493 env->exception.fsr = arm_debug_exception_fsr(env); 494 /* 495 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 496 * values to the guest that it shouldn't be able to see at its 497 * exception/security level. 498 */ 499 env->exception.vaddress = 0; 500 raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0)); 501 } 502 } 503 504 /* 505 * Raise an EXCP_BKPT with the specified syndrome register value, 506 * targeting the correct exception level for debug exceptions. 507 */ 508 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) 509 { 510 int debug_el = arm_debug_target_el(env); 511 int cur_el = arm_current_el(env); 512 513 /* FSR will only be used if the debug target EL is AArch32. */ 514 env->exception.fsr = arm_debug_exception_fsr(env); 515 /* 516 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 517 * values to the guest that it shouldn't be able to see at its 518 * exception/security level. 519 */ 520 env->exception.vaddress = 0; 521 /* 522 * Other kinds of architectural debug exception are ignored if 523 * they target an exception level below the current one (in QEMU 524 * this is checked by arm_generate_debug_exceptions()). Breakpoint 525 * instructions are special because they always generate an exception 526 * to somewhere: if they can't go to the configured debug exception 527 * level they are taken to the current exception level. 528 */ 529 if (debug_el < cur_el) { 530 debug_el = cur_el; 531 } 532 raise_exception(env, EXCP_BKPT, syndrome, debug_el); 533 } 534 535 void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome) 536 { 537 raise_exception_debug(env, EXCP_UDEF, syndrome); 538 } 539 540 void hw_watchpoint_update(ARMCPU *cpu, int n) 541 { 542 CPUARMState *env = &cpu->env; 543 vaddr len = 0; 544 vaddr wvr = env->cp15.dbgwvr[n]; 545 uint64_t wcr = env->cp15.dbgwcr[n]; 546 int mask; 547 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 548 549 if (env->cpu_watchpoint[n]) { 550 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 551 env->cpu_watchpoint[n] = NULL; 552 } 553 554 if (!FIELD_EX64(wcr, DBGWCR, E)) { 555 /* E bit clear : watchpoint disabled */ 556 return; 557 } 558 559 switch (FIELD_EX64(wcr, DBGWCR, LSC)) { 560 case 0: 561 /* LSC 00 is reserved and must behave as if the wp is disabled */ 562 return; 563 case 1: 564 flags |= BP_MEM_READ; 565 break; 566 case 2: 567 flags |= BP_MEM_WRITE; 568 break; 569 case 3: 570 flags |= BP_MEM_ACCESS; 571 break; 572 } 573 574 /* 575 * Attempts to use both MASK and BAS fields simultaneously are 576 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 577 * thus generating a watchpoint for every byte in the masked region. 578 */ 579 mask = FIELD_EX64(wcr, DBGWCR, MASK); 580 if (mask == 1 || mask == 2) { 581 /* 582 * Reserved values of MASK; we must act as if the mask value was 583 * some non-reserved value, or as if the watchpoint were disabled. 584 * We choose the latter. 585 */ 586 return; 587 } else if (mask) { 588 /* Watchpoint covers an aligned area up to 2GB in size */ 589 len = 1ULL << mask; 590 /* 591 * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 592 * whether the watchpoint fires when the unmasked bits match; we opt 593 * to generate the exceptions. 594 */ 595 wvr &= ~(len - 1); 596 } else { 597 /* Watchpoint covers bytes defined by the byte address select bits */ 598 int bas = FIELD_EX64(wcr, DBGWCR, BAS); 599 int basstart; 600 601 if (extract64(wvr, 2, 1)) { 602 /* 603 * Deprecated case of an only 4-aligned address. BAS[7:4] are 604 * ignored, and BAS[3:0] define which bytes to watch. 605 */ 606 bas &= 0xf; 607 } 608 609 if (bas == 0) { 610 /* This must act as if the watchpoint is disabled */ 611 return; 612 } 613 614 /* 615 * The BAS bits are supposed to be programmed to indicate a contiguous 616 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 617 * we fire for each byte in the word/doubleword addressed by the WVR. 618 * We choose to ignore any non-zero bits after the first range of 1s. 619 */ 620 basstart = ctz32(bas); 621 len = cto32(bas >> basstart); 622 wvr += basstart; 623 } 624 625 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 626 &env->cpu_watchpoint[n]); 627 } 628 629 void hw_watchpoint_update_all(ARMCPU *cpu) 630 { 631 int i; 632 CPUARMState *env = &cpu->env; 633 634 /* 635 * Completely clear out existing QEMU watchpoints and our array, to 636 * avoid possible stale entries following migration load. 637 */ 638 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 639 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 640 641 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 642 hw_watchpoint_update(cpu, i); 643 } 644 } 645 646 void hw_breakpoint_update(ARMCPU *cpu, int n) 647 { 648 CPUARMState *env = &cpu->env; 649 uint64_t bvr = env->cp15.dbgbvr[n]; 650 uint64_t bcr = env->cp15.dbgbcr[n]; 651 vaddr addr; 652 int bt; 653 int flags = BP_CPU; 654 655 if (env->cpu_breakpoint[n]) { 656 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 657 env->cpu_breakpoint[n] = NULL; 658 } 659 660 if (!extract64(bcr, 0, 1)) { 661 /* E bit clear : watchpoint disabled */ 662 return; 663 } 664 665 bt = extract64(bcr, 20, 4); 666 667 switch (bt) { 668 case 4: /* unlinked address mismatch (reserved if AArch64) */ 669 case 5: /* linked address mismatch (reserved if AArch64) */ 670 qemu_log_mask(LOG_UNIMP, 671 "arm: address mismatch breakpoint types not implemented\n"); 672 return; 673 case 0: /* unlinked address match */ 674 case 1: /* linked address match */ 675 { 676 /* 677 * Bits [1:0] are RES0. 678 * 679 * It is IMPLEMENTATION DEFINED whether bits [63:49] 680 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit 681 * of the VA field ([48] or [52] for FEAT_LVA), or whether the 682 * value is read as written. It is CONSTRAINED UNPREDICTABLE 683 * whether the RESS bits are ignored when comparing an address. 684 * Therefore we are allowed to compare the entire register, which 685 * lets us avoid considering whether FEAT_LVA is actually enabled. 686 * 687 * The BAS field is used to allow setting breakpoints on 16-bit 688 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether 689 * a bp will fire if the addresses covered by the bp and the addresses 690 * covered by the insn overlap but the insn doesn't start at the 691 * start of the bp address range. We choose to require the insn and 692 * the bp to have the same address. The constraints on writing to 693 * BAS enforced in dbgbcr_write mean we have only four cases: 694 * 0b0000 => no breakpoint 695 * 0b0011 => breakpoint on addr 696 * 0b1100 => breakpoint on addr + 2 697 * 0b1111 => breakpoint on addr 698 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 699 */ 700 int bas = extract64(bcr, 5, 4); 701 addr = bvr & ~3ULL; 702 if (bas == 0) { 703 return; 704 } 705 if (bas == 0xc) { 706 addr += 2; 707 } 708 break; 709 } 710 case 2: /* unlinked context ID match */ 711 case 8: /* unlinked VMID match (reserved if no EL2) */ 712 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 713 qemu_log_mask(LOG_UNIMP, 714 "arm: unlinked context breakpoint types not implemented\n"); 715 return; 716 case 9: /* linked VMID match (reserved if no EL2) */ 717 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 718 case 3: /* linked context ID match */ 719 default: 720 /* 721 * We must generate no events for Linked context matches (unless 722 * they are linked to by some other bp/wp, which is handled in 723 * updates for the linking bp/wp). We choose to also generate no events 724 * for reserved values. 725 */ 726 return; 727 } 728 729 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 730 } 731 732 void hw_breakpoint_update_all(ARMCPU *cpu) 733 { 734 int i; 735 CPUARMState *env = &cpu->env; 736 737 /* 738 * Completely clear out existing QEMU breakpoints and our array, to 739 * avoid possible stale entries following migration load. 740 */ 741 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 742 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 743 744 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 745 hw_breakpoint_update(cpu, i); 746 } 747 } 748 749 #if !defined(CONFIG_USER_ONLY) 750 751 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) 752 { 753 ARMCPU *cpu = ARM_CPU(cs); 754 CPUARMState *env = &cpu->env; 755 756 /* 757 * In BE32 system mode, target memory is stored byteswapped (on a 758 * little-endian host system), and by the time we reach here (via an 759 * opcode helper) the addresses of subword accesses have been adjusted 760 * to account for that, which means that watchpoints will not match. 761 * Undo the adjustment here. 762 */ 763 if (arm_sctlr_b(env)) { 764 if (len == 1) { 765 addr ^= 3; 766 } else if (len == 2) { 767 addr ^= 2; 768 } 769 } 770 771 return addr; 772 } 773 774 #endif /* !CONFIG_USER_ONLY */ 775 #endif /* CONFIG_TCG */ 776 777 /* 778 * Check for traps to "powerdown debug" registers, which are controlled 779 * by MDCR.TDOSA 780 */ 781 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 782 bool isread) 783 { 784 int el = arm_current_el(env); 785 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 786 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || 787 (arm_hcr_el2_eff(env) & HCR_TGE); 788 789 if (el < 2 && mdcr_el2_tdosa) { 790 return CP_ACCESS_TRAP_EL2; 791 } 792 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 793 return CP_ACCESS_TRAP_EL3; 794 } 795 return CP_ACCESS_OK; 796 } 797 798 /* 799 * Check for traps to "debug ROM" registers, which are controlled 800 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 801 */ 802 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 803 bool isread) 804 { 805 int el = arm_current_el(env); 806 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 807 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || 808 (arm_hcr_el2_eff(env) & HCR_TGE); 809 810 if (el < 2 && mdcr_el2_tdra) { 811 return CP_ACCESS_TRAP_EL2; 812 } 813 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 814 return CP_ACCESS_TRAP_EL3; 815 } 816 return CP_ACCESS_OK; 817 } 818 819 /* 820 * Check for traps to general debug registers, which are controlled 821 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 822 */ 823 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 824 bool isread) 825 { 826 int el = arm_current_el(env); 827 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 828 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 829 (arm_hcr_el2_eff(env) & HCR_TGE); 830 831 if (el < 2 && mdcr_el2_tda) { 832 return CP_ACCESS_TRAP_EL2; 833 } 834 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 835 return CP_ACCESS_TRAP_EL3; 836 } 837 return CP_ACCESS_OK; 838 } 839 840 /* 841 * Check for traps to Debug Comms Channel registers. If FEAT_FGT 842 * is implemented then these are controlled by MDCR_EL2.TDCC for 843 * EL2 and MDCR_EL3.TDCC for EL3. They are also controlled by 844 * the general debug access trap bits MDCR_EL2.TDA and MDCR_EL3.TDA. 845 */ 846 static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, 847 bool isread) 848 { 849 int el = arm_current_el(env); 850 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 851 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 852 (arm_hcr_el2_eff(env) & HCR_TGE); 853 bool mdcr_el2_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 854 (mdcr_el2 & MDCR_TDCC); 855 bool mdcr_el3_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 856 (env->cp15.mdcr_el3 & MDCR_TDCC); 857 858 if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) { 859 return CP_ACCESS_TRAP_EL2; 860 } 861 if (el < 3 && ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) { 862 return CP_ACCESS_TRAP_EL3; 863 } 864 return CP_ACCESS_OK; 865 } 866 867 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 868 uint64_t value) 869 { 870 /* 871 * Writes to OSLAR_EL1 may update the OS lock status, which can be 872 * read via a bit in OSLSR_EL1. 873 */ 874 int oslock; 875 876 if (ri->state == ARM_CP_STATE_AA32) { 877 oslock = (value == 0xC5ACCE55); 878 } else { 879 oslock = value & 1; 880 } 881 882 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 883 } 884 885 static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 886 uint64_t value) 887 { 888 ARMCPU *cpu = env_archcpu(env); 889 /* 890 * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not 891 * implemented this is RAZ/WI. 892 */ 893 if(arm_feature(env, ARM_FEATURE_AARCH64) 894 ? cpu_isar_feature(aa64_doublelock, cpu) 895 : cpu_isar_feature(aa32_doublelock, cpu)) { 896 env->cp15.osdlr_el1 = value & 1; 897 } 898 } 899 900 static void dbgclaimset_write(CPUARMState *env, const ARMCPRegInfo *ri, 901 uint64_t value) 902 { 903 env->cp15.dbgclaim |= (value & 0xFF); 904 } 905 906 static uint64_t dbgclaimset_read(CPUARMState *env, const ARMCPRegInfo *ri) 907 { 908 /* CLAIM bits are RAO */ 909 return 0xFF; 910 } 911 912 static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 913 uint64_t value) 914 { 915 env->cp15.dbgclaim &= ~(value & 0xFF); 916 } 917 918 static const ARMCPRegInfo debug_cp_reginfo[] = { 919 /* 920 * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 921 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 922 * unlike DBGDRAR it is never accessible from EL0. 923 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 924 * accessor. 925 */ 926 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 927 .access = PL0_R, .accessfn = access_tdra, 928 .type = ARM_CP_CONST, .resetvalue = 0 }, 929 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 930 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 931 .access = PL1_R, .accessfn = access_tdra, 932 .type = ARM_CP_CONST, .resetvalue = 0 }, 933 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 934 .access = PL0_R, .accessfn = access_tdra, 935 .type = ARM_CP_CONST, .resetvalue = 0 }, 936 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 937 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 938 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 939 .access = PL1_RW, .accessfn = access_tda, 940 .fgt = FGT_MDSCR_EL1, 941 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 942 .resetvalue = 0 }, 943 /* 944 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external 945 * Debug Communication Channel is not implemented. 946 */ 947 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, 948 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, 949 .access = PL0_R, .accessfn = access_tdcc, 950 .type = ARM_CP_CONST, .resetvalue = 0 }, 951 /* 952 * OSDTRRX_EL1/OSDTRTX_EL1 are used for save and restore of DBGDTRRX_EL0. 953 * It is a component of the Debug Communications Channel, which is not implemented. 954 */ 955 { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 956 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2, 957 .access = PL1_RW, .accessfn = access_tdcc, 958 .type = ARM_CP_CONST, .resetvalue = 0 }, 959 { .name = "OSDTRTX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 960 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 961 .access = PL1_RW, .accessfn = access_tdcc, 962 .type = ARM_CP_CONST, .resetvalue = 0 }, 963 /* 964 * OSECCR_EL1 provides a mechanism for an operating system 965 * to access the contents of EDECCR. EDECCR is not implemented though, 966 * as is the rest of external device mechanism. 967 */ 968 { .name = "OSECCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 969 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 970 .access = PL1_RW, .accessfn = access_tda, 971 .fgt = FGT_OSECCR_EL1, 972 .type = ARM_CP_CONST, .resetvalue = 0 }, 973 /* 974 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as 975 * it is unlikely a guest will care. 976 * We don't implement the configurable EL0 access. 977 */ 978 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, 979 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 980 .type = ARM_CP_ALIAS, 981 .access = PL1_R, .accessfn = access_tda, 982 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 983 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 984 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 985 .access = PL1_W, .type = ARM_CP_NO_RAW, 986 .accessfn = access_tdosa, 987 .fgt = FGT_OSLAR_EL1, 988 .writefn = oslar_write }, 989 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 990 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 991 .access = PL1_R, .resetvalue = 10, 992 .accessfn = access_tdosa, 993 .fgt = FGT_OSLSR_EL1, 994 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 995 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 996 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 997 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 998 .access = PL1_RW, .accessfn = access_tdosa, 999 .fgt = FGT_OSDLR_EL1, 1000 .writefn = osdlr_write, 1001 .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) }, 1002 /* 1003 * Dummy DBGVCR: Linux wants to clear this on startup, but we don't 1004 * implement vector catch debug events yet. 1005 */ 1006 { .name = "DBGVCR", 1007 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 1008 .access = PL1_RW, .accessfn = access_tda, 1009 .type = ARM_CP_NOP }, 1010 /* 1011 * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 1012 * to save and restore a 32-bit guest's DBGVCR) 1013 */ 1014 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 1015 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 1016 .access = PL2_RW, .accessfn = access_tda, 1017 .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, 1018 /* 1019 * Dummy MDCCINT_EL1, since we don't implement the Debug Communications 1020 * Channel but Linux may try to access this register. The 32-bit 1021 * alias is DBGDCCINT. 1022 */ 1023 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 1024 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 1025 .access = PL1_RW, .accessfn = access_tdcc, 1026 .type = ARM_CP_NOP }, 1027 /* 1028 * Dummy DBGCLAIM registers. 1029 * "The architecture does not define any functionality for the CLAIM tag bits.", 1030 * so we only keep the raw bits 1031 */ 1032 { .name = "DBGCLAIMSET_EL1", .state = ARM_CP_STATE_BOTH, 1033 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6, 1034 .type = ARM_CP_ALIAS, 1035 .access = PL1_RW, .accessfn = access_tda, 1036 .fgt = FGT_DBGCLAIM, 1037 .writefn = dbgclaimset_write, .readfn = dbgclaimset_read }, 1038 { .name = "DBGCLAIMCLR_EL1", .state = ARM_CP_STATE_BOTH, 1039 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 6, 1040 .access = PL1_RW, .accessfn = access_tda, 1041 .fgt = FGT_DBGCLAIM, 1042 .writefn = dbgclaimclr_write, .raw_writefn = raw_write, 1043 .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, 1044 }; 1045 1046 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 1047 /* 64 bit access versions of the (dummy) debug registers */ 1048 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 1049 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 1050 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 1051 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 1052 }; 1053 1054 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1055 uint64_t value) 1056 { 1057 ARMCPU *cpu = env_archcpu(env); 1058 int i = ri->crm; 1059 1060 /* 1061 * Bits [1:0] are RES0. 1062 * 1063 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) 1064 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if 1065 * they contain the value written. It is CONSTRAINED UNPREDICTABLE 1066 * whether the RESS bits are ignored when comparing an address. 1067 * 1068 * Therefore we are allowed to compare the entire register, which lets 1069 * us avoid considering whether or not FEAT_LVA is actually enabled. 1070 */ 1071 value &= ~3ULL; 1072 1073 raw_write(env, ri, value); 1074 if (tcg_enabled()) { 1075 hw_watchpoint_update(cpu, i); 1076 } 1077 } 1078 1079 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1080 uint64_t value) 1081 { 1082 ARMCPU *cpu = env_archcpu(env); 1083 int i = ri->crm; 1084 1085 raw_write(env, ri, value); 1086 if (tcg_enabled()) { 1087 hw_watchpoint_update(cpu, i); 1088 } 1089 } 1090 1091 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1092 uint64_t value) 1093 { 1094 ARMCPU *cpu = env_archcpu(env); 1095 int i = ri->crm; 1096 1097 raw_write(env, ri, value); 1098 if (tcg_enabled()) { 1099 hw_breakpoint_update(cpu, i); 1100 } 1101 } 1102 1103 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1104 uint64_t value) 1105 { 1106 ARMCPU *cpu = env_archcpu(env); 1107 int i = ri->crm; 1108 1109 /* 1110 * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 1111 * copy of BAS[0]. 1112 */ 1113 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 1114 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 1115 1116 raw_write(env, ri, value); 1117 if (tcg_enabled()) { 1118 hw_breakpoint_update(cpu, i); 1119 } 1120 } 1121 1122 void define_debug_regs(ARMCPU *cpu) 1123 { 1124 /* 1125 * Define v7 and v8 architectural debug registers. 1126 * These are just dummy implementations for now. 1127 */ 1128 int i; 1129 int wrps, brps, ctx_cmps; 1130 1131 /* 1132 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot 1133 * use AArch32. Given that bit 15 is RES1, if the value is 0 then 1134 * the register must not exist for this cpu. 1135 */ 1136 if (cpu->isar.dbgdidr != 0) { 1137 ARMCPRegInfo dbgdidr = { 1138 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, 1139 .opc1 = 0, .opc2 = 0, 1140 .access = PL0_R, .accessfn = access_tda, 1141 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 1142 }; 1143 define_one_arm_cp_reg(cpu, &dbgdidr); 1144 } 1145 1146 /* 1147 * DBGDEVID is present in the v7 debug architecture if 1148 * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is 1149 * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist 1150 * from v7.1 of the debug architecture. Because no fields have yet 1151 * been defined in DBGDEVID2 (and quite possibly none will ever 1152 * be) we don't define an ARMISARegisters field for it. 1153 * These registers exist only if EL1 can use AArch32, but that 1154 * happens naturally because they are only PL1 accessible anyway. 1155 */ 1156 if (extract32(cpu->isar.dbgdidr, 15, 1)) { 1157 ARMCPRegInfo dbgdevid = { 1158 .name = "DBGDEVID", 1159 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7, 1160 .access = PL1_R, .accessfn = access_tda, 1161 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid, 1162 }; 1163 define_one_arm_cp_reg(cpu, &dbgdevid); 1164 } 1165 if (cpu_isar_feature(aa32_debugv7p1, cpu)) { 1166 ARMCPRegInfo dbgdevid12[] = { 1167 { 1168 .name = "DBGDEVID1", 1169 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7, 1170 .access = PL1_R, .accessfn = access_tda, 1171 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1, 1172 }, { 1173 .name = "DBGDEVID2", 1174 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7, 1175 .access = PL1_R, .accessfn = access_tda, 1176 .type = ARM_CP_CONST, .resetvalue = 0, 1177 }, 1178 }; 1179 define_arm_cp_regs(cpu, dbgdevid12); 1180 } 1181 1182 brps = arm_num_brps(cpu); 1183 wrps = arm_num_wrps(cpu); 1184 ctx_cmps = arm_num_ctx_cmps(cpu); 1185 1186 assert(ctx_cmps <= brps); 1187 1188 define_arm_cp_regs(cpu, debug_cp_reginfo); 1189 1190 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 1191 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 1192 } 1193 1194 for (i = 0; i < brps; i++) { 1195 char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); 1196 char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); 1197 ARMCPRegInfo dbgregs[] = { 1198 { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, 1199 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 1200 .access = PL1_RW, .accessfn = access_tda, 1201 .fgt = FGT_DBGBVRN_EL1, 1202 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 1203 .writefn = dbgbvr_write, .raw_writefn = raw_write 1204 }, 1205 { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, 1206 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 1207 .access = PL1_RW, .accessfn = access_tda, 1208 .fgt = FGT_DBGBCRN_EL1, 1209 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 1210 .writefn = dbgbcr_write, .raw_writefn = raw_write 1211 }, 1212 }; 1213 define_arm_cp_regs(cpu, dbgregs); 1214 g_free(dbgbvr_el1_name); 1215 g_free(dbgbcr_el1_name); 1216 } 1217 1218 for (i = 0; i < wrps; i++) { 1219 char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); 1220 char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); 1221 ARMCPRegInfo dbgregs[] = { 1222 { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, 1223 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 1224 .access = PL1_RW, .accessfn = access_tda, 1225 .fgt = FGT_DBGWVRN_EL1, 1226 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 1227 .writefn = dbgwvr_write, .raw_writefn = raw_write 1228 }, 1229 { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, 1230 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 1231 .access = PL1_RW, .accessfn = access_tda, 1232 .fgt = FGT_DBGWCRN_EL1, 1233 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 1234 .writefn = dbgwcr_write, .raw_writefn = raw_write 1235 }, 1236 }; 1237 define_arm_cp_regs(cpu, dbgregs); 1238 g_free(dbgwvr_el1_name); 1239 g_free(dbgwcr_el1_name); 1240 } 1241 } 1242