1 /* 2 * ARM debug helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "cpu.h" 11 #include "internals.h" 12 #include "cpregs.h" 13 #include "exec/exec-all.h" 14 #include "exec/helper-proto.h" 15 #include "sysemu/tcg.h" 16 17 #ifdef CONFIG_TCG 18 /* Return the Exception Level targeted by debug exceptions. */ 19 static int arm_debug_target_el(CPUARMState *env) 20 { 21 bool secure = arm_is_secure(env); 22 bool route_to_el2 = false; 23 24 if (arm_feature(env, ARM_FEATURE_M)) { 25 return 1; 26 } 27 28 if (arm_is_el2_enabled(env)) { 29 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || 30 env->cp15.mdcr_el2 & MDCR_TDE; 31 } 32 33 if (route_to_el2) { 34 return 2; 35 } else if (arm_feature(env, ARM_FEATURE_EL3) && 36 !arm_el_is_aa64(env, 3) && secure) { 37 return 3; 38 } else { 39 return 1; 40 } 41 } 42 43 /* 44 * Raise an exception to the debug target el. 45 * Modify syndrome to indicate when origin and target EL are the same. 46 */ 47 G_NORETURN static void 48 raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome) 49 { 50 int debug_el = arm_debug_target_el(env); 51 int cur_el = arm_current_el(env); 52 53 /* 54 * If singlestep is targeting a lower EL than the current one, then 55 * DisasContext.ss_active must be false and we can never get here. 56 * Similarly for watchpoint and breakpoint matches. 57 */ 58 assert(debug_el >= cur_el); 59 syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT; 60 raise_exception(env, excp, syndrome, debug_el); 61 } 62 63 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ 64 static bool aa64_generate_debug_exceptions(CPUARMState *env) 65 { 66 int cur_el = arm_current_el(env); 67 int debug_el; 68 69 if (cur_el == 3) { 70 return false; 71 } 72 73 /* MDCR_EL3.SDD disables debug events from Secure state */ 74 if (arm_is_secure_below_el3(env) 75 && extract32(env->cp15.mdcr_el3, 16, 1)) { 76 return false; 77 } 78 79 /* 80 * Same EL to same EL debug exceptions need MDSCR_KDE enabled 81 * while not masking the (D)ebug bit in DAIF. 82 */ 83 debug_el = arm_debug_target_el(env); 84 85 if (cur_el == debug_el) { 86 return extract32(env->cp15.mdscr_el1, 13, 1) 87 && !(env->daif & PSTATE_D); 88 } 89 90 /* Otherwise the debug target needs to be a higher EL */ 91 return debug_el > cur_el; 92 } 93 94 static bool aa32_generate_debug_exceptions(CPUARMState *env) 95 { 96 int el = arm_current_el(env); 97 98 if (el == 0 && arm_el_is_aa64(env, 1)) { 99 return aa64_generate_debug_exceptions(env); 100 } 101 102 if (arm_is_secure(env)) { 103 int spd; 104 105 if (el == 0 && (env->cp15.sder & 1)) { 106 /* 107 * SDER.SUIDEN means debug exceptions from Secure EL0 108 * are always enabled. Otherwise they are controlled by 109 * SDCR.SPD like those from other Secure ELs. 110 */ 111 return true; 112 } 113 114 spd = extract32(env->cp15.mdcr_el3, 14, 2); 115 switch (spd) { 116 case 1: 117 /* SPD == 0b01 is reserved, but behaves as 0b00. */ 118 case 0: 119 /* 120 * For 0b00 we return true if external secure invasive debug 121 * is enabled. On real hardware this is controlled by external 122 * signals to the core. QEMU always permits debug, and behaves 123 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. 124 */ 125 return true; 126 case 2: 127 return false; 128 case 3: 129 return true; 130 } 131 } 132 133 return el != 2; 134 } 135 136 /* 137 * Return true if debugging exceptions are currently enabled. 138 * This corresponds to what in ARM ARM pseudocode would be 139 * if UsingAArch32() then 140 * return AArch32.GenerateDebugExceptions() 141 * else 142 * return AArch64.GenerateDebugExceptions() 143 * We choose to push the if() down into this function for clarity, 144 * since the pseudocode has it at all callsites except for the one in 145 * CheckSoftwareStep(), where it is elided because both branches would 146 * always return the same value. 147 */ 148 bool arm_generate_debug_exceptions(CPUARMState *env) 149 { 150 if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) { 151 return false; 152 } 153 if (is_a64(env)) { 154 return aa64_generate_debug_exceptions(env); 155 } else { 156 return aa32_generate_debug_exceptions(env); 157 } 158 } 159 160 /* 161 * Is single-stepping active? (Note that the "is EL_D AArch64?" check 162 * implicitly means this always returns false in pre-v8 CPUs.) 163 */ 164 bool arm_singlestep_active(CPUARMState *env) 165 { 166 return extract32(env->cp15.mdscr_el1, 0, 1) 167 && arm_el_is_aa64(env, arm_debug_target_el(env)) 168 && arm_generate_debug_exceptions(env); 169 } 170 171 /* Return true if the linked breakpoint entry lbn passes its checks */ 172 static bool linked_bp_matches(ARMCPU *cpu, int lbn) 173 { 174 CPUARMState *env = &cpu->env; 175 uint64_t bcr = env->cp15.dbgbcr[lbn]; 176 int brps = arm_num_brps(cpu); 177 int ctx_cmps = arm_num_ctx_cmps(cpu); 178 int bt; 179 uint32_t contextidr; 180 uint64_t hcr_el2; 181 182 /* 183 * Links to unimplemented or non-context aware breakpoints are 184 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or 185 * as if linked to an UNKNOWN context-aware breakpoint (in which 186 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). 187 * We choose the former. 188 */ 189 if (lbn >= brps || lbn < (brps - ctx_cmps)) { 190 return false; 191 } 192 193 bcr = env->cp15.dbgbcr[lbn]; 194 195 if (extract64(bcr, 0, 1) == 0) { 196 /* Linked breakpoint disabled : generate no events */ 197 return false; 198 } 199 200 bt = extract64(bcr, 20, 4); 201 hcr_el2 = arm_hcr_el2_eff(env); 202 203 switch (bt) { 204 case 3: /* linked context ID match */ 205 switch (arm_current_el(env)) { 206 default: 207 /* Context matches never fire in AArch64 EL3 */ 208 return false; 209 case 2: 210 if (!(hcr_el2 & HCR_E2H)) { 211 /* Context matches never fire in EL2 without E2H enabled. */ 212 return false; 213 } 214 contextidr = env->cp15.contextidr_el[2]; 215 break; 216 case 1: 217 contextidr = env->cp15.contextidr_el[1]; 218 break; 219 case 0: 220 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 221 contextidr = env->cp15.contextidr_el[2]; 222 } else { 223 contextidr = env->cp15.contextidr_el[1]; 224 } 225 break; 226 } 227 break; 228 229 case 7: /* linked contextidr_el1 match */ 230 contextidr = env->cp15.contextidr_el[1]; 231 break; 232 case 13: /* linked contextidr_el2 match */ 233 contextidr = env->cp15.contextidr_el[2]; 234 break; 235 236 case 9: /* linked VMID match (reserved if no EL2) */ 237 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 238 case 15: /* linked full context ID match */ 239 default: 240 /* 241 * Links to Unlinked context breakpoints must generate no 242 * events; we choose to do the same for reserved values too. 243 */ 244 return false; 245 } 246 247 /* 248 * We match the whole register even if this is AArch32 using the 249 * short descriptor format (in which case it holds both PROCID and ASID), 250 * since we don't implement the optional v7 context ID masking. 251 */ 252 return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; 253 } 254 255 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) 256 { 257 CPUARMState *env = &cpu->env; 258 uint64_t cr; 259 int pac, hmc, ssc, wt, lbn; 260 /* 261 * Note that for watchpoints the check is against the CPU security 262 * state, not the S/NS attribute on the offending data access. 263 */ 264 bool is_secure = arm_is_secure(env); 265 int access_el = arm_current_el(env); 266 267 if (is_wp) { 268 CPUWatchpoint *wp = env->cpu_watchpoint[n]; 269 270 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { 271 return false; 272 } 273 cr = env->cp15.dbgwcr[n]; 274 if (wp->hitattrs.user) { 275 /* 276 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should 277 * match watchpoints as if they were accesses done at EL0, even if 278 * the CPU is at EL1 or higher. 279 */ 280 access_el = 0; 281 } 282 } else { 283 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 284 285 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { 286 return false; 287 } 288 cr = env->cp15.dbgbcr[n]; 289 } 290 /* 291 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is 292 * enabled and that the address and access type match; for breakpoints 293 * we know the address matched; check the remaining fields, including 294 * linked breakpoints. We rely on WCR and BCR having the same layout 295 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. 296 * Note that some combinations of {PAC, HMC, SSC} are reserved and 297 * must act either like some valid combination or as if the watchpoint 298 * were disabled. We choose the former, and use this together with 299 * the fact that EL3 must always be Secure and EL2 must always be 300 * Non-Secure to simplify the code slightly compared to the full 301 * table in the ARM ARM. 302 */ 303 pac = FIELD_EX64(cr, DBGWCR, PAC); 304 hmc = FIELD_EX64(cr, DBGWCR, HMC); 305 ssc = FIELD_EX64(cr, DBGWCR, SSC); 306 307 switch (ssc) { 308 case 0: 309 break; 310 case 1: 311 case 3: 312 if (is_secure) { 313 return false; 314 } 315 break; 316 case 2: 317 if (!is_secure) { 318 return false; 319 } 320 break; 321 } 322 323 switch (access_el) { 324 case 3: 325 case 2: 326 if (!hmc) { 327 return false; 328 } 329 break; 330 case 1: 331 if (extract32(pac, 0, 1) == 0) { 332 return false; 333 } 334 break; 335 case 0: 336 if (extract32(pac, 1, 1) == 0) { 337 return false; 338 } 339 break; 340 default: 341 g_assert_not_reached(); 342 } 343 344 wt = FIELD_EX64(cr, DBGWCR, WT); 345 lbn = FIELD_EX64(cr, DBGWCR, LBN); 346 347 if (wt && !linked_bp_matches(cpu, lbn)) { 348 return false; 349 } 350 351 return true; 352 } 353 354 static bool check_watchpoints(ARMCPU *cpu) 355 { 356 CPUARMState *env = &cpu->env; 357 int n; 358 359 /* 360 * If watchpoints are disabled globally or we can't take debug 361 * exceptions here then watchpoint firings are ignored. 362 */ 363 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 364 || !arm_generate_debug_exceptions(env)) { 365 return false; 366 } 367 368 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { 369 if (bp_wp_matches(cpu, n, true)) { 370 return true; 371 } 372 } 373 return false; 374 } 375 376 bool arm_debug_check_breakpoint(CPUState *cs) 377 { 378 ARMCPU *cpu = ARM_CPU(cs); 379 CPUARMState *env = &cpu->env; 380 target_ulong pc; 381 int n; 382 383 /* 384 * If breakpoints are disabled globally or we can't take debug 385 * exceptions here then breakpoint firings are ignored. 386 */ 387 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 388 || !arm_generate_debug_exceptions(env)) { 389 return false; 390 } 391 392 /* 393 * Single-step exceptions have priority over breakpoint exceptions. 394 * If single-step state is active-pending, suppress the bp. 395 */ 396 if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) { 397 return false; 398 } 399 400 /* 401 * PC alignment faults have priority over breakpoint exceptions. 402 */ 403 pc = is_a64(env) ? env->pc : env->regs[15]; 404 if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) { 405 return false; 406 } 407 408 /* 409 * Instruction aborts have priority over breakpoint exceptions. 410 * TODO: We would need to look up the page for PC and verify that 411 * it is present and executable. 412 */ 413 414 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { 415 if (bp_wp_matches(cpu, n, false)) { 416 return true; 417 } 418 } 419 return false; 420 } 421 422 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 423 { 424 /* 425 * Called by core code when a CPU watchpoint fires; need to check if this 426 * is also an architectural watchpoint match. 427 */ 428 ARMCPU *cpu = ARM_CPU(cs); 429 430 return check_watchpoints(cpu); 431 } 432 433 /* 434 * Return the FSR value for a debug exception (watchpoint, hardware 435 * breakpoint or BKPT insn) targeting the specified exception level. 436 */ 437 static uint32_t arm_debug_exception_fsr(CPUARMState *env) 438 { 439 ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; 440 int target_el = arm_debug_target_el(env); 441 bool using_lpae; 442 443 if (arm_feature(env, ARM_FEATURE_M)) { 444 using_lpae = false; 445 } else if (target_el == 2 || arm_el_is_aa64(env, target_el)) { 446 using_lpae = true; 447 } else if (arm_feature(env, ARM_FEATURE_PMSA) && 448 arm_feature(env, ARM_FEATURE_V8)) { 449 using_lpae = true; 450 } else if (arm_feature(env, ARM_FEATURE_LPAE) && 451 (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { 452 using_lpae = true; 453 } else { 454 using_lpae = false; 455 } 456 457 if (using_lpae) { 458 return arm_fi_to_lfsc(&fi); 459 } else { 460 return arm_fi_to_sfsc(&fi); 461 } 462 } 463 464 void arm_debug_excp_handler(CPUState *cs) 465 { 466 /* 467 * Called by core code when a watchpoint or breakpoint fires; 468 * need to check which one and raise the appropriate exception. 469 */ 470 ARMCPU *cpu = ARM_CPU(cs); 471 CPUARMState *env = &cpu->env; 472 CPUWatchpoint *wp_hit = cs->watchpoint_hit; 473 474 if (wp_hit) { 475 if (wp_hit->flags & BP_CPU) { 476 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; 477 478 cs->watchpoint_hit = NULL; 479 480 env->exception.fsr = arm_debug_exception_fsr(env); 481 env->exception.vaddress = wp_hit->hitaddr; 482 raise_exception_debug(env, EXCP_DATA_ABORT, 483 syn_watchpoint(0, 0, wnr)); 484 } 485 } else { 486 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 487 488 /* 489 * (1) GDB breakpoints should be handled first. 490 * (2) Do not raise a CPU exception if no CPU breakpoint has fired, 491 * since singlestep is also done by generating a debug internal 492 * exception. 493 */ 494 if (cpu_breakpoint_test(cs, pc, BP_GDB) 495 || !cpu_breakpoint_test(cs, pc, BP_CPU)) { 496 return; 497 } 498 499 env->exception.fsr = arm_debug_exception_fsr(env); 500 /* 501 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 502 * values to the guest that it shouldn't be able to see at its 503 * exception/security level. 504 */ 505 env->exception.vaddress = 0; 506 raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0)); 507 } 508 } 509 510 /* 511 * Raise an EXCP_BKPT with the specified syndrome register value, 512 * targeting the correct exception level for debug exceptions. 513 */ 514 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) 515 { 516 int debug_el = arm_debug_target_el(env); 517 int cur_el = arm_current_el(env); 518 519 /* FSR will only be used if the debug target EL is AArch32. */ 520 env->exception.fsr = arm_debug_exception_fsr(env); 521 /* 522 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 523 * values to the guest that it shouldn't be able to see at its 524 * exception/security level. 525 */ 526 env->exception.vaddress = 0; 527 /* 528 * Other kinds of architectural debug exception are ignored if 529 * they target an exception level below the current one (in QEMU 530 * this is checked by arm_generate_debug_exceptions()). Breakpoint 531 * instructions are special because they always generate an exception 532 * to somewhere: if they can't go to the configured debug exception 533 * level they are taken to the current exception level. 534 */ 535 if (debug_el < cur_el) { 536 debug_el = cur_el; 537 } 538 raise_exception(env, EXCP_BKPT, syndrome, debug_el); 539 } 540 541 void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome) 542 { 543 raise_exception_debug(env, EXCP_UDEF, syndrome); 544 } 545 546 void hw_watchpoint_update(ARMCPU *cpu, int n) 547 { 548 CPUARMState *env = &cpu->env; 549 vaddr len = 0; 550 vaddr wvr = env->cp15.dbgwvr[n]; 551 uint64_t wcr = env->cp15.dbgwcr[n]; 552 int mask; 553 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 554 555 if (env->cpu_watchpoint[n]) { 556 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 557 env->cpu_watchpoint[n] = NULL; 558 } 559 560 if (!FIELD_EX64(wcr, DBGWCR, E)) { 561 /* E bit clear : watchpoint disabled */ 562 return; 563 } 564 565 switch (FIELD_EX64(wcr, DBGWCR, LSC)) { 566 case 0: 567 /* LSC 00 is reserved and must behave as if the wp is disabled */ 568 return; 569 case 1: 570 flags |= BP_MEM_READ; 571 break; 572 case 2: 573 flags |= BP_MEM_WRITE; 574 break; 575 case 3: 576 flags |= BP_MEM_ACCESS; 577 break; 578 } 579 580 /* 581 * Attempts to use both MASK and BAS fields simultaneously are 582 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 583 * thus generating a watchpoint for every byte in the masked region. 584 */ 585 mask = FIELD_EX64(wcr, DBGWCR, MASK); 586 if (mask == 1 || mask == 2) { 587 /* 588 * Reserved values of MASK; we must act as if the mask value was 589 * some non-reserved value, or as if the watchpoint were disabled. 590 * We choose the latter. 591 */ 592 return; 593 } else if (mask) { 594 /* Watchpoint covers an aligned area up to 2GB in size */ 595 len = 1ULL << mask; 596 /* 597 * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 598 * whether the watchpoint fires when the unmasked bits match; we opt 599 * to generate the exceptions. 600 */ 601 wvr &= ~(len - 1); 602 } else { 603 /* Watchpoint covers bytes defined by the byte address select bits */ 604 int bas = FIELD_EX64(wcr, DBGWCR, BAS); 605 int basstart; 606 607 if (extract64(wvr, 2, 1)) { 608 /* 609 * Deprecated case of an only 4-aligned address. BAS[7:4] are 610 * ignored, and BAS[3:0] define which bytes to watch. 611 */ 612 bas &= 0xf; 613 } 614 615 if (bas == 0) { 616 /* This must act as if the watchpoint is disabled */ 617 return; 618 } 619 620 /* 621 * The BAS bits are supposed to be programmed to indicate a contiguous 622 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 623 * we fire for each byte in the word/doubleword addressed by the WVR. 624 * We choose to ignore any non-zero bits after the first range of 1s. 625 */ 626 basstart = ctz32(bas); 627 len = cto32(bas >> basstart); 628 wvr += basstart; 629 } 630 631 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 632 &env->cpu_watchpoint[n]); 633 } 634 635 void hw_watchpoint_update_all(ARMCPU *cpu) 636 { 637 int i; 638 CPUARMState *env = &cpu->env; 639 640 /* 641 * Completely clear out existing QEMU watchpoints and our array, to 642 * avoid possible stale entries following migration load. 643 */ 644 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 645 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 646 647 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 648 hw_watchpoint_update(cpu, i); 649 } 650 } 651 652 void hw_breakpoint_update(ARMCPU *cpu, int n) 653 { 654 CPUARMState *env = &cpu->env; 655 uint64_t bvr = env->cp15.dbgbvr[n]; 656 uint64_t bcr = env->cp15.dbgbcr[n]; 657 vaddr addr; 658 int bt; 659 int flags = BP_CPU; 660 661 if (env->cpu_breakpoint[n]) { 662 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 663 env->cpu_breakpoint[n] = NULL; 664 } 665 666 if (!extract64(bcr, 0, 1)) { 667 /* E bit clear : watchpoint disabled */ 668 return; 669 } 670 671 bt = extract64(bcr, 20, 4); 672 673 switch (bt) { 674 case 4: /* unlinked address mismatch (reserved if AArch64) */ 675 case 5: /* linked address mismatch (reserved if AArch64) */ 676 qemu_log_mask(LOG_UNIMP, 677 "arm: address mismatch breakpoint types not implemented\n"); 678 return; 679 case 0: /* unlinked address match */ 680 case 1: /* linked address match */ 681 { 682 /* 683 * Bits [1:0] are RES0. 684 * 685 * It is IMPLEMENTATION DEFINED whether bits [63:49] 686 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit 687 * of the VA field ([48] or [52] for FEAT_LVA), or whether the 688 * value is read as written. It is CONSTRAINED UNPREDICTABLE 689 * whether the RESS bits are ignored when comparing an address. 690 * Therefore we are allowed to compare the entire register, which 691 * lets us avoid considering whether FEAT_LVA is actually enabled. 692 * 693 * The BAS field is used to allow setting breakpoints on 16-bit 694 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether 695 * a bp will fire if the addresses covered by the bp and the addresses 696 * covered by the insn overlap but the insn doesn't start at the 697 * start of the bp address range. We choose to require the insn and 698 * the bp to have the same address. The constraints on writing to 699 * BAS enforced in dbgbcr_write mean we have only four cases: 700 * 0b0000 => no breakpoint 701 * 0b0011 => breakpoint on addr 702 * 0b1100 => breakpoint on addr + 2 703 * 0b1111 => breakpoint on addr 704 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 705 */ 706 int bas = extract64(bcr, 5, 4); 707 addr = bvr & ~3ULL; 708 if (bas == 0) { 709 return; 710 } 711 if (bas == 0xc) { 712 addr += 2; 713 } 714 break; 715 } 716 case 2: /* unlinked context ID match */ 717 case 8: /* unlinked VMID match (reserved if no EL2) */ 718 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 719 qemu_log_mask(LOG_UNIMP, 720 "arm: unlinked context breakpoint types not implemented\n"); 721 return; 722 case 9: /* linked VMID match (reserved if no EL2) */ 723 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 724 case 3: /* linked context ID match */ 725 default: 726 /* 727 * We must generate no events for Linked context matches (unless 728 * they are linked to by some other bp/wp, which is handled in 729 * updates for the linking bp/wp). We choose to also generate no events 730 * for reserved values. 731 */ 732 return; 733 } 734 735 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 736 } 737 738 void hw_breakpoint_update_all(ARMCPU *cpu) 739 { 740 int i; 741 CPUARMState *env = &cpu->env; 742 743 /* 744 * Completely clear out existing QEMU breakpoints and our array, to 745 * avoid possible stale entries following migration load. 746 */ 747 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 748 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 749 750 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 751 hw_breakpoint_update(cpu, i); 752 } 753 } 754 755 #if !defined(CONFIG_USER_ONLY) 756 757 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) 758 { 759 ARMCPU *cpu = ARM_CPU(cs); 760 CPUARMState *env = &cpu->env; 761 762 /* 763 * In BE32 system mode, target memory is stored byteswapped (on a 764 * little-endian host system), and by the time we reach here (via an 765 * opcode helper) the addresses of subword accesses have been adjusted 766 * to account for that, which means that watchpoints will not match. 767 * Undo the adjustment here. 768 */ 769 if (arm_sctlr_b(env)) { 770 if (len == 1) { 771 addr ^= 3; 772 } else if (len == 2) { 773 addr ^= 2; 774 } 775 } 776 777 return addr; 778 } 779 780 #endif /* !CONFIG_USER_ONLY */ 781 #endif /* CONFIG_TCG */ 782 783 /* 784 * Check for traps to "powerdown debug" registers, which are controlled 785 * by MDCR.TDOSA 786 */ 787 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 788 bool isread) 789 { 790 int el = arm_current_el(env); 791 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 792 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || 793 (arm_hcr_el2_eff(env) & HCR_TGE); 794 795 if (el < 2 && mdcr_el2_tdosa) { 796 return CP_ACCESS_TRAP_EL2; 797 } 798 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 799 return CP_ACCESS_TRAP_EL3; 800 } 801 return CP_ACCESS_OK; 802 } 803 804 /* 805 * Check for traps to "debug ROM" registers, which are controlled 806 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 807 */ 808 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 809 bool isread) 810 { 811 int el = arm_current_el(env); 812 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 813 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || 814 (arm_hcr_el2_eff(env) & HCR_TGE); 815 816 if (el < 2 && mdcr_el2_tdra) { 817 return CP_ACCESS_TRAP_EL2; 818 } 819 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 820 return CP_ACCESS_TRAP_EL3; 821 } 822 return CP_ACCESS_OK; 823 } 824 825 /* 826 * Check for traps to general debug registers, which are controlled 827 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 828 */ 829 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 830 bool isread) 831 { 832 int el = arm_current_el(env); 833 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 834 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 835 (arm_hcr_el2_eff(env) & HCR_TGE); 836 837 if (el < 2 && mdcr_el2_tda) { 838 return CP_ACCESS_TRAP_EL2; 839 } 840 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 841 return CP_ACCESS_TRAP_EL3; 842 } 843 return CP_ACCESS_OK; 844 } 845 846 /* 847 * Check for traps to Debug Comms Channel registers. If FEAT_FGT 848 * is implemented then these are controlled by MDCR_EL2.TDCC for 849 * EL2 and MDCR_EL3.TDCC for EL3. They are also controlled by 850 * the general debug access trap bits MDCR_EL2.TDA and MDCR_EL3.TDA. 851 * For EL0, they are also controlled by MDSCR_EL1.TDCC. 852 */ 853 static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, 854 bool isread) 855 { 856 int el = arm_current_el(env); 857 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 858 bool mdscr_el1_tdcc = extract32(env->cp15.mdscr_el1, 12, 1); 859 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 860 (arm_hcr_el2_eff(env) & HCR_TGE); 861 bool mdcr_el2_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 862 (mdcr_el2 & MDCR_TDCC); 863 bool mdcr_el3_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 864 (env->cp15.mdcr_el3 & MDCR_TDCC); 865 866 if (el < 1 && mdscr_el1_tdcc) { 867 return CP_ACCESS_TRAP; 868 } 869 if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) { 870 return CP_ACCESS_TRAP_EL2; 871 } 872 if (el < 3 && ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) { 873 return CP_ACCESS_TRAP_EL3; 874 } 875 return CP_ACCESS_OK; 876 } 877 878 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 879 uint64_t value) 880 { 881 /* 882 * Writes to OSLAR_EL1 may update the OS lock status, which can be 883 * read via a bit in OSLSR_EL1. 884 */ 885 int oslock; 886 887 if (ri->state == ARM_CP_STATE_AA32) { 888 oslock = (value == 0xC5ACCE55); 889 } else { 890 oslock = value & 1; 891 } 892 893 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 894 } 895 896 static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 897 uint64_t value) 898 { 899 ARMCPU *cpu = env_archcpu(env); 900 /* 901 * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not 902 * implemented this is RAZ/WI. 903 */ 904 if(arm_feature(env, ARM_FEATURE_AARCH64) 905 ? cpu_isar_feature(aa64_doublelock, cpu) 906 : cpu_isar_feature(aa32_doublelock, cpu)) { 907 env->cp15.osdlr_el1 = value & 1; 908 } 909 } 910 911 static void dbgclaimset_write(CPUARMState *env, const ARMCPRegInfo *ri, 912 uint64_t value) 913 { 914 env->cp15.dbgclaim |= (value & 0xFF); 915 } 916 917 static uint64_t dbgclaimset_read(CPUARMState *env, const ARMCPRegInfo *ri) 918 { 919 /* CLAIM bits are RAO */ 920 return 0xFF; 921 } 922 923 static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 924 uint64_t value) 925 { 926 env->cp15.dbgclaim &= ~(value & 0xFF); 927 } 928 929 static const ARMCPRegInfo debug_cp_reginfo[] = { 930 /* 931 * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 932 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 933 * unlike DBGDRAR it is never accessible from EL0. 934 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 935 * accessor. 936 */ 937 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 938 .access = PL0_R, .accessfn = access_tdra, 939 .type = ARM_CP_CONST, .resetvalue = 0 }, 940 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 941 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 942 .access = PL1_R, .accessfn = access_tdra, 943 .type = ARM_CP_CONST, .resetvalue = 0 }, 944 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 945 .access = PL0_R, .accessfn = access_tdra, 946 .type = ARM_CP_CONST, .resetvalue = 0 }, 947 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 948 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 949 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 950 .access = PL1_RW, .accessfn = access_tda, 951 .fgt = FGT_MDSCR_EL1, 952 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 953 .resetvalue = 0 }, 954 /* 955 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external 956 * Debug Communication Channel is not implemented. 957 */ 958 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, 959 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, 960 .access = PL0_R, .accessfn = access_tdcc, 961 .type = ARM_CP_CONST, .resetvalue = 0 }, 962 /* 963 * These registers belong to the Debug Communications Channel, 964 * which is not implemented. However we implement RAZ/WI behaviour 965 * with trapping to prevent spurious SIGILLs if the guest OS does 966 * access them as the support cannot be probed for. 967 */ 968 { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 969 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2, 970 .access = PL1_RW, .accessfn = access_tdcc, 971 .type = ARM_CP_CONST, .resetvalue = 0 }, 972 { .name = "OSDTRTX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 973 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 974 .access = PL1_RW, .accessfn = access_tdcc, 975 .type = ARM_CP_CONST, .resetvalue = 0 }, 976 /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */ 977 { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14, 978 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, 979 .access = PL0_RW, .accessfn = access_tdcc, 980 .type = ARM_CP_CONST, .resetvalue = 0 }, 981 /* 982 * OSECCR_EL1 provides a mechanism for an operating system 983 * to access the contents of EDECCR. EDECCR is not implemented though, 984 * as is the rest of external device mechanism. 985 */ 986 { .name = "OSECCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 987 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 988 .access = PL1_RW, .accessfn = access_tda, 989 .fgt = FGT_OSECCR_EL1, 990 .type = ARM_CP_CONST, .resetvalue = 0 }, 991 /* 992 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as 993 * it is unlikely a guest will care. 994 * We don't implement the configurable EL0 access. 995 */ 996 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, 997 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 998 .type = ARM_CP_ALIAS, 999 .access = PL1_R, .accessfn = access_tda, 1000 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 1001 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 1002 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 1003 .access = PL1_W, .type = ARM_CP_NO_RAW, 1004 .accessfn = access_tdosa, 1005 .fgt = FGT_OSLAR_EL1, 1006 .writefn = oslar_write }, 1007 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 1008 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 1009 .access = PL1_R, .resetvalue = 10, 1010 .accessfn = access_tdosa, 1011 .fgt = FGT_OSLSR_EL1, 1012 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 1013 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 1014 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 1015 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 1016 .access = PL1_RW, .accessfn = access_tdosa, 1017 .fgt = FGT_OSDLR_EL1, 1018 .writefn = osdlr_write, 1019 .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) }, 1020 /* 1021 * Dummy DBGVCR: Linux wants to clear this on startup, but we don't 1022 * implement vector catch debug events yet. 1023 */ 1024 { .name = "DBGVCR", 1025 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 1026 .access = PL1_RW, .accessfn = access_tda, 1027 .type = ARM_CP_NOP }, 1028 /* 1029 * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 1030 * to save and restore a 32-bit guest's DBGVCR) 1031 */ 1032 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 1033 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 1034 .access = PL2_RW, .accessfn = access_tda, 1035 .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, 1036 /* 1037 * Dummy MDCCINT_EL1, since we don't implement the Debug Communications 1038 * Channel but Linux may try to access this register. The 32-bit 1039 * alias is DBGDCCINT. 1040 */ 1041 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 1042 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 1043 .access = PL1_RW, .accessfn = access_tdcc, 1044 .type = ARM_CP_NOP }, 1045 /* 1046 * Dummy DBGCLAIM registers. 1047 * "The architecture does not define any functionality for the CLAIM tag bits.", 1048 * so we only keep the raw bits 1049 */ 1050 { .name = "DBGCLAIMSET_EL1", .state = ARM_CP_STATE_BOTH, 1051 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6, 1052 .type = ARM_CP_ALIAS, 1053 .access = PL1_RW, .accessfn = access_tda, 1054 .fgt = FGT_DBGCLAIM, 1055 .writefn = dbgclaimset_write, .readfn = dbgclaimset_read }, 1056 { .name = "DBGCLAIMCLR_EL1", .state = ARM_CP_STATE_BOTH, 1057 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 6, 1058 .access = PL1_RW, .accessfn = access_tda, 1059 .fgt = FGT_DBGCLAIM, 1060 .writefn = dbgclaimclr_write, .raw_writefn = raw_write, 1061 .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, 1062 }; 1063 1064 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 1065 /* 64 bit access versions of the (dummy) debug registers */ 1066 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 1067 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 1068 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 1069 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 1070 }; 1071 1072 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1073 uint64_t value) 1074 { 1075 ARMCPU *cpu = env_archcpu(env); 1076 int i = ri->crm; 1077 1078 /* 1079 * Bits [1:0] are RES0. 1080 * 1081 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) 1082 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if 1083 * they contain the value written. It is CONSTRAINED UNPREDICTABLE 1084 * whether the RESS bits are ignored when comparing an address. 1085 * 1086 * Therefore we are allowed to compare the entire register, which lets 1087 * us avoid considering whether or not FEAT_LVA is actually enabled. 1088 */ 1089 value &= ~3ULL; 1090 1091 raw_write(env, ri, value); 1092 if (tcg_enabled()) { 1093 hw_watchpoint_update(cpu, i); 1094 } 1095 } 1096 1097 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1098 uint64_t value) 1099 { 1100 ARMCPU *cpu = env_archcpu(env); 1101 int i = ri->crm; 1102 1103 raw_write(env, ri, value); 1104 if (tcg_enabled()) { 1105 hw_watchpoint_update(cpu, i); 1106 } 1107 } 1108 1109 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1110 uint64_t value) 1111 { 1112 ARMCPU *cpu = env_archcpu(env); 1113 int i = ri->crm; 1114 1115 raw_write(env, ri, value); 1116 if (tcg_enabled()) { 1117 hw_breakpoint_update(cpu, i); 1118 } 1119 } 1120 1121 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1122 uint64_t value) 1123 { 1124 ARMCPU *cpu = env_archcpu(env); 1125 int i = ri->crm; 1126 1127 /* 1128 * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 1129 * copy of BAS[0]. 1130 */ 1131 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 1132 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 1133 1134 raw_write(env, ri, value); 1135 if (tcg_enabled()) { 1136 hw_breakpoint_update(cpu, i); 1137 } 1138 } 1139 1140 void define_debug_regs(ARMCPU *cpu) 1141 { 1142 /* 1143 * Define v7 and v8 architectural debug registers. 1144 * These are just dummy implementations for now. 1145 */ 1146 int i; 1147 int wrps, brps, ctx_cmps; 1148 1149 /* 1150 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot 1151 * use AArch32. Given that bit 15 is RES1, if the value is 0 then 1152 * the register must not exist for this cpu. 1153 */ 1154 if (cpu->isar.dbgdidr != 0) { 1155 ARMCPRegInfo dbgdidr = { 1156 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, 1157 .opc1 = 0, .opc2 = 0, 1158 .access = PL0_R, .accessfn = access_tda, 1159 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 1160 }; 1161 define_one_arm_cp_reg(cpu, &dbgdidr); 1162 } 1163 1164 /* 1165 * DBGDEVID is present in the v7 debug architecture if 1166 * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is 1167 * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist 1168 * from v7.1 of the debug architecture. Because no fields have yet 1169 * been defined in DBGDEVID2 (and quite possibly none will ever 1170 * be) we don't define an ARMISARegisters field for it. 1171 * These registers exist only if EL1 can use AArch32, but that 1172 * happens naturally because they are only PL1 accessible anyway. 1173 */ 1174 if (extract32(cpu->isar.dbgdidr, 15, 1)) { 1175 ARMCPRegInfo dbgdevid = { 1176 .name = "DBGDEVID", 1177 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7, 1178 .access = PL1_R, .accessfn = access_tda, 1179 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid, 1180 }; 1181 define_one_arm_cp_reg(cpu, &dbgdevid); 1182 } 1183 if (cpu_isar_feature(aa32_debugv7p1, cpu)) { 1184 ARMCPRegInfo dbgdevid12[] = { 1185 { 1186 .name = "DBGDEVID1", 1187 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7, 1188 .access = PL1_R, .accessfn = access_tda, 1189 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1, 1190 }, { 1191 .name = "DBGDEVID2", 1192 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7, 1193 .access = PL1_R, .accessfn = access_tda, 1194 .type = ARM_CP_CONST, .resetvalue = 0, 1195 }, 1196 }; 1197 define_arm_cp_regs(cpu, dbgdevid12); 1198 } 1199 1200 brps = arm_num_brps(cpu); 1201 wrps = arm_num_wrps(cpu); 1202 ctx_cmps = arm_num_ctx_cmps(cpu); 1203 1204 assert(ctx_cmps <= brps); 1205 1206 define_arm_cp_regs(cpu, debug_cp_reginfo); 1207 1208 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 1209 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 1210 } 1211 1212 for (i = 0; i < brps; i++) { 1213 char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); 1214 char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); 1215 ARMCPRegInfo dbgregs[] = { 1216 { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, 1217 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 1218 .access = PL1_RW, .accessfn = access_tda, 1219 .fgt = FGT_DBGBVRN_EL1, 1220 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 1221 .writefn = dbgbvr_write, .raw_writefn = raw_write 1222 }, 1223 { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, 1224 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 1225 .access = PL1_RW, .accessfn = access_tda, 1226 .fgt = FGT_DBGBCRN_EL1, 1227 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 1228 .writefn = dbgbcr_write, .raw_writefn = raw_write 1229 }, 1230 }; 1231 define_arm_cp_regs(cpu, dbgregs); 1232 g_free(dbgbvr_el1_name); 1233 g_free(dbgbcr_el1_name); 1234 } 1235 1236 for (i = 0; i < wrps; i++) { 1237 char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); 1238 char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); 1239 ARMCPRegInfo dbgregs[] = { 1240 { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, 1241 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 1242 .access = PL1_RW, .accessfn = access_tda, 1243 .fgt = FGT_DBGWVRN_EL1, 1244 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 1245 .writefn = dbgwvr_write, .raw_writefn = raw_write 1246 }, 1247 { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, 1248 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 1249 .access = PL1_RW, .accessfn = access_tda, 1250 .fgt = FGT_DBGWCRN_EL1, 1251 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 1252 .writefn = dbgwcr_write, .raw_writefn = raw_write 1253 }, 1254 }; 1255 define_arm_cp_regs(cpu, dbgregs); 1256 g_free(dbgwvr_el1_name); 1257 g_free(dbgwcr_el1_name); 1258 } 1259 } 1260