1 /* 2 * ARM debug helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "cpu.h" 11 #include "internals.h" 12 #include "cpregs.h" 13 #include "exec/exec-all.h" 14 #include "exec/helper-proto.h" 15 #include "sysemu/tcg.h" 16 17 #ifdef CONFIG_TCG 18 /* Return the Exception Level targeted by debug exceptions. */ 19 static int arm_debug_target_el(CPUARMState *env) 20 { 21 bool secure = arm_is_secure(env); 22 bool route_to_el2 = false; 23 24 if (arm_is_el2_enabled(env)) { 25 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || 26 env->cp15.mdcr_el2 & MDCR_TDE; 27 } 28 29 if (route_to_el2) { 30 return 2; 31 } else if (arm_feature(env, ARM_FEATURE_EL3) && 32 !arm_el_is_aa64(env, 3) && secure) { 33 return 3; 34 } else { 35 return 1; 36 } 37 } 38 39 /* 40 * Raise an exception to the debug target el. 41 * Modify syndrome to indicate when origin and target EL are the same. 42 */ 43 G_NORETURN static void 44 raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome) 45 { 46 int debug_el = arm_debug_target_el(env); 47 int cur_el = arm_current_el(env); 48 49 /* 50 * If singlestep is targeting a lower EL than the current one, then 51 * DisasContext.ss_active must be false and we can never get here. 52 * Similarly for watchpoint and breakpoint matches. 53 */ 54 assert(debug_el >= cur_el); 55 syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT; 56 raise_exception(env, excp, syndrome, debug_el); 57 } 58 59 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ 60 static bool aa64_generate_debug_exceptions(CPUARMState *env) 61 { 62 int cur_el = arm_current_el(env); 63 int debug_el; 64 65 if (cur_el == 3) { 66 return false; 67 } 68 69 /* MDCR_EL3.SDD disables debug events from Secure state */ 70 if (arm_is_secure_below_el3(env) 71 && extract32(env->cp15.mdcr_el3, 16, 1)) { 72 return false; 73 } 74 75 /* 76 * Same EL to same EL debug exceptions need MDSCR_KDE enabled 77 * while not masking the (D)ebug bit in DAIF. 78 */ 79 debug_el = arm_debug_target_el(env); 80 81 if (cur_el == debug_el) { 82 return extract32(env->cp15.mdscr_el1, 13, 1) 83 && !(env->daif & PSTATE_D); 84 } 85 86 /* Otherwise the debug target needs to be a higher EL */ 87 return debug_el > cur_el; 88 } 89 90 static bool aa32_generate_debug_exceptions(CPUARMState *env) 91 { 92 int el = arm_current_el(env); 93 94 if (el == 0 && arm_el_is_aa64(env, 1)) { 95 return aa64_generate_debug_exceptions(env); 96 } 97 98 if (arm_is_secure(env)) { 99 int spd; 100 101 if (el == 0 && (env->cp15.sder & 1)) { 102 /* 103 * SDER.SUIDEN means debug exceptions from Secure EL0 104 * are always enabled. Otherwise they are controlled by 105 * SDCR.SPD like those from other Secure ELs. 106 */ 107 return true; 108 } 109 110 spd = extract32(env->cp15.mdcr_el3, 14, 2); 111 switch (spd) { 112 case 1: 113 /* SPD == 0b01 is reserved, but behaves as 0b00. */ 114 case 0: 115 /* 116 * For 0b00 we return true if external secure invasive debug 117 * is enabled. On real hardware this is controlled by external 118 * signals to the core. QEMU always permits debug, and behaves 119 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. 120 */ 121 return true; 122 case 2: 123 return false; 124 case 3: 125 return true; 126 } 127 } 128 129 return el != 2; 130 } 131 132 /* 133 * Return true if debugging exceptions are currently enabled. 134 * This corresponds to what in ARM ARM pseudocode would be 135 * if UsingAArch32() then 136 * return AArch32.GenerateDebugExceptions() 137 * else 138 * return AArch64.GenerateDebugExceptions() 139 * We choose to push the if() down into this function for clarity, 140 * since the pseudocode has it at all callsites except for the one in 141 * CheckSoftwareStep(), where it is elided because both branches would 142 * always return the same value. 143 */ 144 bool arm_generate_debug_exceptions(CPUARMState *env) 145 { 146 if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) { 147 return false; 148 } 149 if (is_a64(env)) { 150 return aa64_generate_debug_exceptions(env); 151 } else { 152 return aa32_generate_debug_exceptions(env); 153 } 154 } 155 156 /* 157 * Is single-stepping active? (Note that the "is EL_D AArch64?" check 158 * implicitly means this always returns false in pre-v8 CPUs.) 159 */ 160 bool arm_singlestep_active(CPUARMState *env) 161 { 162 return extract32(env->cp15.mdscr_el1, 0, 1) 163 && arm_el_is_aa64(env, arm_debug_target_el(env)) 164 && arm_generate_debug_exceptions(env); 165 } 166 167 /* Return true if the linked breakpoint entry lbn passes its checks */ 168 static bool linked_bp_matches(ARMCPU *cpu, int lbn) 169 { 170 CPUARMState *env = &cpu->env; 171 uint64_t bcr = env->cp15.dbgbcr[lbn]; 172 int brps = arm_num_brps(cpu); 173 int ctx_cmps = arm_num_ctx_cmps(cpu); 174 int bt; 175 uint32_t contextidr; 176 uint64_t hcr_el2; 177 178 /* 179 * Links to unimplemented or non-context aware breakpoints are 180 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or 181 * as if linked to an UNKNOWN context-aware breakpoint (in which 182 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). 183 * We choose the former. 184 */ 185 if (lbn >= brps || lbn < (brps - ctx_cmps)) { 186 return false; 187 } 188 189 bcr = env->cp15.dbgbcr[lbn]; 190 191 if (extract64(bcr, 0, 1) == 0) { 192 /* Linked breakpoint disabled : generate no events */ 193 return false; 194 } 195 196 bt = extract64(bcr, 20, 4); 197 hcr_el2 = arm_hcr_el2_eff(env); 198 199 switch (bt) { 200 case 3: /* linked context ID match */ 201 switch (arm_current_el(env)) { 202 default: 203 /* Context matches never fire in AArch64 EL3 */ 204 return false; 205 case 2: 206 if (!(hcr_el2 & HCR_E2H)) { 207 /* Context matches never fire in EL2 without E2H enabled. */ 208 return false; 209 } 210 contextidr = env->cp15.contextidr_el[2]; 211 break; 212 case 1: 213 contextidr = env->cp15.contextidr_el[1]; 214 break; 215 case 0: 216 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 217 contextidr = env->cp15.contextidr_el[2]; 218 } else { 219 contextidr = env->cp15.contextidr_el[1]; 220 } 221 break; 222 } 223 break; 224 225 case 7: /* linked contextidr_el1 match */ 226 contextidr = env->cp15.contextidr_el[1]; 227 break; 228 case 13: /* linked contextidr_el2 match */ 229 contextidr = env->cp15.contextidr_el[2]; 230 break; 231 232 case 9: /* linked VMID match (reserved if no EL2) */ 233 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 234 case 15: /* linked full context ID match */ 235 default: 236 /* 237 * Links to Unlinked context breakpoints must generate no 238 * events; we choose to do the same for reserved values too. 239 */ 240 return false; 241 } 242 243 /* 244 * We match the whole register even if this is AArch32 using the 245 * short descriptor format (in which case it holds both PROCID and ASID), 246 * since we don't implement the optional v7 context ID masking. 247 */ 248 return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; 249 } 250 251 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) 252 { 253 CPUARMState *env = &cpu->env; 254 uint64_t cr; 255 int pac, hmc, ssc, wt, lbn; 256 /* 257 * Note that for watchpoints the check is against the CPU security 258 * state, not the S/NS attribute on the offending data access. 259 */ 260 bool is_secure = arm_is_secure(env); 261 int access_el = arm_current_el(env); 262 263 if (is_wp) { 264 CPUWatchpoint *wp = env->cpu_watchpoint[n]; 265 266 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { 267 return false; 268 } 269 cr = env->cp15.dbgwcr[n]; 270 if (wp->hitattrs.user) { 271 /* 272 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should 273 * match watchpoints as if they were accesses done at EL0, even if 274 * the CPU is at EL1 or higher. 275 */ 276 access_el = 0; 277 } 278 } else { 279 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 280 281 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { 282 return false; 283 } 284 cr = env->cp15.dbgbcr[n]; 285 } 286 /* 287 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is 288 * enabled and that the address and access type match; for breakpoints 289 * we know the address matched; check the remaining fields, including 290 * linked breakpoints. We rely on WCR and BCR having the same layout 291 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. 292 * Note that some combinations of {PAC, HMC, SSC} are reserved and 293 * must act either like some valid combination or as if the watchpoint 294 * were disabled. We choose the former, and use this together with 295 * the fact that EL3 must always be Secure and EL2 must always be 296 * Non-Secure to simplify the code slightly compared to the full 297 * table in the ARM ARM. 298 */ 299 pac = FIELD_EX64(cr, DBGWCR, PAC); 300 hmc = FIELD_EX64(cr, DBGWCR, HMC); 301 ssc = FIELD_EX64(cr, DBGWCR, SSC); 302 303 switch (ssc) { 304 case 0: 305 break; 306 case 1: 307 case 3: 308 if (is_secure) { 309 return false; 310 } 311 break; 312 case 2: 313 if (!is_secure) { 314 return false; 315 } 316 break; 317 } 318 319 switch (access_el) { 320 case 3: 321 case 2: 322 if (!hmc) { 323 return false; 324 } 325 break; 326 case 1: 327 if (extract32(pac, 0, 1) == 0) { 328 return false; 329 } 330 break; 331 case 0: 332 if (extract32(pac, 1, 1) == 0) { 333 return false; 334 } 335 break; 336 default: 337 g_assert_not_reached(); 338 } 339 340 wt = FIELD_EX64(cr, DBGWCR, WT); 341 lbn = FIELD_EX64(cr, DBGWCR, LBN); 342 343 if (wt && !linked_bp_matches(cpu, lbn)) { 344 return false; 345 } 346 347 return true; 348 } 349 350 static bool check_watchpoints(ARMCPU *cpu) 351 { 352 CPUARMState *env = &cpu->env; 353 int n; 354 355 /* 356 * If watchpoints are disabled globally or we can't take debug 357 * exceptions here then watchpoint firings are ignored. 358 */ 359 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 360 || !arm_generate_debug_exceptions(env)) { 361 return false; 362 } 363 364 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { 365 if (bp_wp_matches(cpu, n, true)) { 366 return true; 367 } 368 } 369 return false; 370 } 371 372 bool arm_debug_check_breakpoint(CPUState *cs) 373 { 374 ARMCPU *cpu = ARM_CPU(cs); 375 CPUARMState *env = &cpu->env; 376 target_ulong pc; 377 int n; 378 379 /* 380 * If breakpoints are disabled globally or we can't take debug 381 * exceptions here then breakpoint firings are ignored. 382 */ 383 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 384 || !arm_generate_debug_exceptions(env)) { 385 return false; 386 } 387 388 /* 389 * Single-step exceptions have priority over breakpoint exceptions. 390 * If single-step state is active-pending, suppress the bp. 391 */ 392 if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) { 393 return false; 394 } 395 396 /* 397 * PC alignment faults have priority over breakpoint exceptions. 398 */ 399 pc = is_a64(env) ? env->pc : env->regs[15]; 400 if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) { 401 return false; 402 } 403 404 /* 405 * Instruction aborts have priority over breakpoint exceptions. 406 * TODO: We would need to look up the page for PC and verify that 407 * it is present and executable. 408 */ 409 410 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { 411 if (bp_wp_matches(cpu, n, false)) { 412 return true; 413 } 414 } 415 return false; 416 } 417 418 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 419 { 420 /* 421 * Called by core code when a CPU watchpoint fires; need to check if this 422 * is also an architectural watchpoint match. 423 */ 424 ARMCPU *cpu = ARM_CPU(cs); 425 426 return check_watchpoints(cpu); 427 } 428 429 /* 430 * Return the FSR value for a debug exception (watchpoint, hardware 431 * breakpoint or BKPT insn) targeting the specified exception level. 432 */ 433 static uint32_t arm_debug_exception_fsr(CPUARMState *env) 434 { 435 ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; 436 int target_el = arm_debug_target_el(env); 437 bool using_lpae = false; 438 439 if (target_el == 2 || arm_el_is_aa64(env, target_el)) { 440 using_lpae = true; 441 } else if (arm_feature(env, ARM_FEATURE_PMSA) && 442 arm_feature(env, ARM_FEATURE_V8)) { 443 using_lpae = true; 444 } else { 445 if (arm_feature(env, ARM_FEATURE_LPAE) && 446 (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { 447 using_lpae = true; 448 } 449 } 450 451 if (using_lpae) { 452 return arm_fi_to_lfsc(&fi); 453 } else { 454 return arm_fi_to_sfsc(&fi); 455 } 456 } 457 458 void arm_debug_excp_handler(CPUState *cs) 459 { 460 /* 461 * Called by core code when a watchpoint or breakpoint fires; 462 * need to check which one and raise the appropriate exception. 463 */ 464 ARMCPU *cpu = ARM_CPU(cs); 465 CPUARMState *env = &cpu->env; 466 CPUWatchpoint *wp_hit = cs->watchpoint_hit; 467 468 if (wp_hit) { 469 if (wp_hit->flags & BP_CPU) { 470 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; 471 472 cs->watchpoint_hit = NULL; 473 474 env->exception.fsr = arm_debug_exception_fsr(env); 475 env->exception.vaddress = wp_hit->hitaddr; 476 raise_exception_debug(env, EXCP_DATA_ABORT, 477 syn_watchpoint(0, 0, wnr)); 478 } 479 } else { 480 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 481 482 /* 483 * (1) GDB breakpoints should be handled first. 484 * (2) Do not raise a CPU exception if no CPU breakpoint has fired, 485 * since singlestep is also done by generating a debug internal 486 * exception. 487 */ 488 if (cpu_breakpoint_test(cs, pc, BP_GDB) 489 || !cpu_breakpoint_test(cs, pc, BP_CPU)) { 490 return; 491 } 492 493 env->exception.fsr = arm_debug_exception_fsr(env); 494 /* 495 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 496 * values to the guest that it shouldn't be able to see at its 497 * exception/security level. 498 */ 499 env->exception.vaddress = 0; 500 raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0)); 501 } 502 } 503 504 /* 505 * Raise an EXCP_BKPT with the specified syndrome register value, 506 * targeting the correct exception level for debug exceptions. 507 */ 508 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) 509 { 510 int debug_el = arm_debug_target_el(env); 511 int cur_el = arm_current_el(env); 512 513 /* FSR will only be used if the debug target EL is AArch32. */ 514 env->exception.fsr = arm_debug_exception_fsr(env); 515 /* 516 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 517 * values to the guest that it shouldn't be able to see at its 518 * exception/security level. 519 */ 520 env->exception.vaddress = 0; 521 /* 522 * Other kinds of architectural debug exception are ignored if 523 * they target an exception level below the current one (in QEMU 524 * this is checked by arm_generate_debug_exceptions()). Breakpoint 525 * instructions are special because they always generate an exception 526 * to somewhere: if they can't go to the configured debug exception 527 * level they are taken to the current exception level. 528 */ 529 if (debug_el < cur_el) { 530 debug_el = cur_el; 531 } 532 raise_exception(env, EXCP_BKPT, syndrome, debug_el); 533 } 534 535 void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome) 536 { 537 raise_exception_debug(env, EXCP_UDEF, syndrome); 538 } 539 540 void hw_watchpoint_update(ARMCPU *cpu, int n) 541 { 542 CPUARMState *env = &cpu->env; 543 vaddr len = 0; 544 vaddr wvr = env->cp15.dbgwvr[n]; 545 uint64_t wcr = env->cp15.dbgwcr[n]; 546 int mask; 547 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 548 549 if (env->cpu_watchpoint[n]) { 550 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 551 env->cpu_watchpoint[n] = NULL; 552 } 553 554 if (!FIELD_EX64(wcr, DBGWCR, E)) { 555 /* E bit clear : watchpoint disabled */ 556 return; 557 } 558 559 switch (FIELD_EX64(wcr, DBGWCR, LSC)) { 560 case 0: 561 /* LSC 00 is reserved and must behave as if the wp is disabled */ 562 return; 563 case 1: 564 flags |= BP_MEM_READ; 565 break; 566 case 2: 567 flags |= BP_MEM_WRITE; 568 break; 569 case 3: 570 flags |= BP_MEM_ACCESS; 571 break; 572 } 573 574 /* 575 * Attempts to use both MASK and BAS fields simultaneously are 576 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 577 * thus generating a watchpoint for every byte in the masked region. 578 */ 579 mask = FIELD_EX64(wcr, DBGWCR, MASK); 580 if (mask == 1 || mask == 2) { 581 /* 582 * Reserved values of MASK; we must act as if the mask value was 583 * some non-reserved value, or as if the watchpoint were disabled. 584 * We choose the latter. 585 */ 586 return; 587 } else if (mask) { 588 /* Watchpoint covers an aligned area up to 2GB in size */ 589 len = 1ULL << mask; 590 /* 591 * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 592 * whether the watchpoint fires when the unmasked bits match; we opt 593 * to generate the exceptions. 594 */ 595 wvr &= ~(len - 1); 596 } else { 597 /* Watchpoint covers bytes defined by the byte address select bits */ 598 int bas = FIELD_EX64(wcr, DBGWCR, BAS); 599 int basstart; 600 601 if (extract64(wvr, 2, 1)) { 602 /* 603 * Deprecated case of an only 4-aligned address. BAS[7:4] are 604 * ignored, and BAS[3:0] define which bytes to watch. 605 */ 606 bas &= 0xf; 607 } 608 609 if (bas == 0) { 610 /* This must act as if the watchpoint is disabled */ 611 return; 612 } 613 614 /* 615 * The BAS bits are supposed to be programmed to indicate a contiguous 616 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 617 * we fire for each byte in the word/doubleword addressed by the WVR. 618 * We choose to ignore any non-zero bits after the first range of 1s. 619 */ 620 basstart = ctz32(bas); 621 len = cto32(bas >> basstart); 622 wvr += basstart; 623 } 624 625 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 626 &env->cpu_watchpoint[n]); 627 } 628 629 void hw_watchpoint_update_all(ARMCPU *cpu) 630 { 631 int i; 632 CPUARMState *env = &cpu->env; 633 634 /* 635 * Completely clear out existing QEMU watchpoints and our array, to 636 * avoid possible stale entries following migration load. 637 */ 638 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 639 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 640 641 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 642 hw_watchpoint_update(cpu, i); 643 } 644 } 645 646 void hw_breakpoint_update(ARMCPU *cpu, int n) 647 { 648 CPUARMState *env = &cpu->env; 649 uint64_t bvr = env->cp15.dbgbvr[n]; 650 uint64_t bcr = env->cp15.dbgbcr[n]; 651 vaddr addr; 652 int bt; 653 int flags = BP_CPU; 654 655 if (env->cpu_breakpoint[n]) { 656 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 657 env->cpu_breakpoint[n] = NULL; 658 } 659 660 if (!extract64(bcr, 0, 1)) { 661 /* E bit clear : watchpoint disabled */ 662 return; 663 } 664 665 bt = extract64(bcr, 20, 4); 666 667 switch (bt) { 668 case 4: /* unlinked address mismatch (reserved if AArch64) */ 669 case 5: /* linked address mismatch (reserved if AArch64) */ 670 qemu_log_mask(LOG_UNIMP, 671 "arm: address mismatch breakpoint types not implemented\n"); 672 return; 673 case 0: /* unlinked address match */ 674 case 1: /* linked address match */ 675 { 676 /* 677 * Bits [1:0] are RES0. 678 * 679 * It is IMPLEMENTATION DEFINED whether bits [63:49] 680 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit 681 * of the VA field ([48] or [52] for FEAT_LVA), or whether the 682 * value is read as written. It is CONSTRAINED UNPREDICTABLE 683 * whether the RESS bits are ignored when comparing an address. 684 * Therefore we are allowed to compare the entire register, which 685 * lets us avoid considering whether FEAT_LVA is actually enabled. 686 * 687 * The BAS field is used to allow setting breakpoints on 16-bit 688 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether 689 * a bp will fire if the addresses covered by the bp and the addresses 690 * covered by the insn overlap but the insn doesn't start at the 691 * start of the bp address range. We choose to require the insn and 692 * the bp to have the same address. The constraints on writing to 693 * BAS enforced in dbgbcr_write mean we have only four cases: 694 * 0b0000 => no breakpoint 695 * 0b0011 => breakpoint on addr 696 * 0b1100 => breakpoint on addr + 2 697 * 0b1111 => breakpoint on addr 698 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 699 */ 700 int bas = extract64(bcr, 5, 4); 701 addr = bvr & ~3ULL; 702 if (bas == 0) { 703 return; 704 } 705 if (bas == 0xc) { 706 addr += 2; 707 } 708 break; 709 } 710 case 2: /* unlinked context ID match */ 711 case 8: /* unlinked VMID match (reserved if no EL2) */ 712 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 713 qemu_log_mask(LOG_UNIMP, 714 "arm: unlinked context breakpoint types not implemented\n"); 715 return; 716 case 9: /* linked VMID match (reserved if no EL2) */ 717 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 718 case 3: /* linked context ID match */ 719 default: 720 /* 721 * We must generate no events for Linked context matches (unless 722 * they are linked to by some other bp/wp, which is handled in 723 * updates for the linking bp/wp). We choose to also generate no events 724 * for reserved values. 725 */ 726 return; 727 } 728 729 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 730 } 731 732 void hw_breakpoint_update_all(ARMCPU *cpu) 733 { 734 int i; 735 CPUARMState *env = &cpu->env; 736 737 /* 738 * Completely clear out existing QEMU breakpoints and our array, to 739 * avoid possible stale entries following migration load. 740 */ 741 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 742 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 743 744 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 745 hw_breakpoint_update(cpu, i); 746 } 747 } 748 749 #if !defined(CONFIG_USER_ONLY) 750 751 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) 752 { 753 ARMCPU *cpu = ARM_CPU(cs); 754 CPUARMState *env = &cpu->env; 755 756 /* 757 * In BE32 system mode, target memory is stored byteswapped (on a 758 * little-endian host system), and by the time we reach here (via an 759 * opcode helper) the addresses of subword accesses have been adjusted 760 * to account for that, which means that watchpoints will not match. 761 * Undo the adjustment here. 762 */ 763 if (arm_sctlr_b(env)) { 764 if (len == 1) { 765 addr ^= 3; 766 } else if (len == 2) { 767 addr ^= 2; 768 } 769 } 770 771 return addr; 772 } 773 774 #endif /* !CONFIG_USER_ONLY */ 775 #endif /* CONFIG_TCG */ 776 777 /* 778 * Check for traps to "powerdown debug" registers, which are controlled 779 * by MDCR.TDOSA 780 */ 781 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 782 bool isread) 783 { 784 int el = arm_current_el(env); 785 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 786 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || 787 (arm_hcr_el2_eff(env) & HCR_TGE); 788 789 if (el < 2 && mdcr_el2_tdosa) { 790 return CP_ACCESS_TRAP_EL2; 791 } 792 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 793 return CP_ACCESS_TRAP_EL3; 794 } 795 return CP_ACCESS_OK; 796 } 797 798 /* 799 * Check for traps to "debug ROM" registers, which are controlled 800 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 801 */ 802 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 803 bool isread) 804 { 805 int el = arm_current_el(env); 806 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 807 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || 808 (arm_hcr_el2_eff(env) & HCR_TGE); 809 810 if (el < 2 && mdcr_el2_tdra) { 811 return CP_ACCESS_TRAP_EL2; 812 } 813 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 814 return CP_ACCESS_TRAP_EL3; 815 } 816 return CP_ACCESS_OK; 817 } 818 819 /* 820 * Check for traps to general debug registers, which are controlled 821 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 822 */ 823 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 824 bool isread) 825 { 826 int el = arm_current_el(env); 827 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 828 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 829 (arm_hcr_el2_eff(env) & HCR_TGE); 830 831 if (el < 2 && mdcr_el2_tda) { 832 return CP_ACCESS_TRAP_EL2; 833 } 834 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 835 return CP_ACCESS_TRAP_EL3; 836 } 837 return CP_ACCESS_OK; 838 } 839 840 /* 841 * Check for traps to Debug Comms Channel registers. If FEAT_FGT 842 * is implemented then these are controlled by MDCR_EL2.TDCC for 843 * EL2 and MDCR_EL3.TDCC for EL3. They are also controlled by 844 * the general debug access trap bits MDCR_EL2.TDA and MDCR_EL3.TDA. 845 * For EL0, they are also controlled by MDSCR_EL1.TDCC. 846 */ 847 static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, 848 bool isread) 849 { 850 int el = arm_current_el(env); 851 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 852 bool mdscr_el1_tdcc = extract32(env->cp15.mdscr_el1, 12, 1); 853 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 854 (arm_hcr_el2_eff(env) & HCR_TGE); 855 bool mdcr_el2_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 856 (mdcr_el2 & MDCR_TDCC); 857 bool mdcr_el3_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 858 (env->cp15.mdcr_el3 & MDCR_TDCC); 859 860 if (el < 1 && mdscr_el1_tdcc) { 861 return CP_ACCESS_TRAP; 862 } 863 if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) { 864 return CP_ACCESS_TRAP_EL2; 865 } 866 if (el < 3 && ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) { 867 return CP_ACCESS_TRAP_EL3; 868 } 869 return CP_ACCESS_OK; 870 } 871 872 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 873 uint64_t value) 874 { 875 /* 876 * Writes to OSLAR_EL1 may update the OS lock status, which can be 877 * read via a bit in OSLSR_EL1. 878 */ 879 int oslock; 880 881 if (ri->state == ARM_CP_STATE_AA32) { 882 oslock = (value == 0xC5ACCE55); 883 } else { 884 oslock = value & 1; 885 } 886 887 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 888 } 889 890 static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 891 uint64_t value) 892 { 893 ARMCPU *cpu = env_archcpu(env); 894 /* 895 * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not 896 * implemented this is RAZ/WI. 897 */ 898 if(arm_feature(env, ARM_FEATURE_AARCH64) 899 ? cpu_isar_feature(aa64_doublelock, cpu) 900 : cpu_isar_feature(aa32_doublelock, cpu)) { 901 env->cp15.osdlr_el1 = value & 1; 902 } 903 } 904 905 static void dbgclaimset_write(CPUARMState *env, const ARMCPRegInfo *ri, 906 uint64_t value) 907 { 908 env->cp15.dbgclaim |= (value & 0xFF); 909 } 910 911 static uint64_t dbgclaimset_read(CPUARMState *env, const ARMCPRegInfo *ri) 912 { 913 /* CLAIM bits are RAO */ 914 return 0xFF; 915 } 916 917 static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 918 uint64_t value) 919 { 920 env->cp15.dbgclaim &= ~(value & 0xFF); 921 } 922 923 static const ARMCPRegInfo debug_cp_reginfo[] = { 924 /* 925 * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 926 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 927 * unlike DBGDRAR it is never accessible from EL0. 928 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 929 * accessor. 930 */ 931 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 932 .access = PL0_R, .accessfn = access_tdra, 933 .type = ARM_CP_CONST, .resetvalue = 0 }, 934 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 935 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 936 .access = PL1_R, .accessfn = access_tdra, 937 .type = ARM_CP_CONST, .resetvalue = 0 }, 938 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 939 .access = PL0_R, .accessfn = access_tdra, 940 .type = ARM_CP_CONST, .resetvalue = 0 }, 941 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 942 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 943 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 944 .access = PL1_RW, .accessfn = access_tda, 945 .fgt = FGT_MDSCR_EL1, 946 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 947 .resetvalue = 0 }, 948 /* 949 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external 950 * Debug Communication Channel is not implemented. 951 */ 952 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, 953 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, 954 .access = PL0_R, .accessfn = access_tdcc, 955 .type = ARM_CP_CONST, .resetvalue = 0 }, 956 /* 957 * These registers belong to the Debug Communications Channel, 958 * which is not implemented. However we implement RAZ/WI behaviour 959 * with trapping to prevent spurious SIGILLs if the guest OS does 960 * access them as the support cannot be probed for. 961 */ 962 { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 963 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2, 964 .access = PL1_RW, .accessfn = access_tdcc, 965 .type = ARM_CP_CONST, .resetvalue = 0 }, 966 { .name = "OSDTRTX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 967 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 968 .access = PL1_RW, .accessfn = access_tdcc, 969 .type = ARM_CP_CONST, .resetvalue = 0 }, 970 /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */ 971 { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14, 972 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, 973 .access = PL0_RW, .accessfn = access_tdcc, 974 .type = ARM_CP_CONST, .resetvalue = 0 }, 975 /* 976 * OSECCR_EL1 provides a mechanism for an operating system 977 * to access the contents of EDECCR. EDECCR is not implemented though, 978 * as is the rest of external device mechanism. 979 */ 980 { .name = "OSECCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 981 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 982 .access = PL1_RW, .accessfn = access_tda, 983 .fgt = FGT_OSECCR_EL1, 984 .type = ARM_CP_CONST, .resetvalue = 0 }, 985 /* 986 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as 987 * it is unlikely a guest will care. 988 * We don't implement the configurable EL0 access. 989 */ 990 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, 991 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 992 .type = ARM_CP_ALIAS, 993 .access = PL1_R, .accessfn = access_tda, 994 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 995 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 996 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 997 .access = PL1_W, .type = ARM_CP_NO_RAW, 998 .accessfn = access_tdosa, 999 .fgt = FGT_OSLAR_EL1, 1000 .writefn = oslar_write }, 1001 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 1002 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 1003 .access = PL1_R, .resetvalue = 10, 1004 .accessfn = access_tdosa, 1005 .fgt = FGT_OSLSR_EL1, 1006 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 1007 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 1008 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 1009 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 1010 .access = PL1_RW, .accessfn = access_tdosa, 1011 .fgt = FGT_OSDLR_EL1, 1012 .writefn = osdlr_write, 1013 .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) }, 1014 /* 1015 * Dummy DBGVCR: Linux wants to clear this on startup, but we don't 1016 * implement vector catch debug events yet. 1017 */ 1018 { .name = "DBGVCR", 1019 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 1020 .access = PL1_RW, .accessfn = access_tda, 1021 .type = ARM_CP_NOP }, 1022 /* 1023 * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 1024 * to save and restore a 32-bit guest's DBGVCR) 1025 */ 1026 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 1027 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 1028 .access = PL2_RW, .accessfn = access_tda, 1029 .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, 1030 /* 1031 * Dummy MDCCINT_EL1, since we don't implement the Debug Communications 1032 * Channel but Linux may try to access this register. The 32-bit 1033 * alias is DBGDCCINT. 1034 */ 1035 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 1036 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 1037 .access = PL1_RW, .accessfn = access_tdcc, 1038 .type = ARM_CP_NOP }, 1039 /* 1040 * Dummy DBGCLAIM registers. 1041 * "The architecture does not define any functionality for the CLAIM tag bits.", 1042 * so we only keep the raw bits 1043 */ 1044 { .name = "DBGCLAIMSET_EL1", .state = ARM_CP_STATE_BOTH, 1045 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6, 1046 .type = ARM_CP_ALIAS, 1047 .access = PL1_RW, .accessfn = access_tda, 1048 .fgt = FGT_DBGCLAIM, 1049 .writefn = dbgclaimset_write, .readfn = dbgclaimset_read }, 1050 { .name = "DBGCLAIMCLR_EL1", .state = ARM_CP_STATE_BOTH, 1051 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 6, 1052 .access = PL1_RW, .accessfn = access_tda, 1053 .fgt = FGT_DBGCLAIM, 1054 .writefn = dbgclaimclr_write, .raw_writefn = raw_write, 1055 .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, 1056 }; 1057 1058 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 1059 /* 64 bit access versions of the (dummy) debug registers */ 1060 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 1061 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 1062 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 1063 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 1064 }; 1065 1066 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1067 uint64_t value) 1068 { 1069 ARMCPU *cpu = env_archcpu(env); 1070 int i = ri->crm; 1071 1072 /* 1073 * Bits [1:0] are RES0. 1074 * 1075 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) 1076 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if 1077 * they contain the value written. It is CONSTRAINED UNPREDICTABLE 1078 * whether the RESS bits are ignored when comparing an address. 1079 * 1080 * Therefore we are allowed to compare the entire register, which lets 1081 * us avoid considering whether or not FEAT_LVA is actually enabled. 1082 */ 1083 value &= ~3ULL; 1084 1085 raw_write(env, ri, value); 1086 if (tcg_enabled()) { 1087 hw_watchpoint_update(cpu, i); 1088 } 1089 } 1090 1091 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1092 uint64_t value) 1093 { 1094 ARMCPU *cpu = env_archcpu(env); 1095 int i = ri->crm; 1096 1097 raw_write(env, ri, value); 1098 if (tcg_enabled()) { 1099 hw_watchpoint_update(cpu, i); 1100 } 1101 } 1102 1103 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1104 uint64_t value) 1105 { 1106 ARMCPU *cpu = env_archcpu(env); 1107 int i = ri->crm; 1108 1109 raw_write(env, ri, value); 1110 if (tcg_enabled()) { 1111 hw_breakpoint_update(cpu, i); 1112 } 1113 } 1114 1115 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1116 uint64_t value) 1117 { 1118 ARMCPU *cpu = env_archcpu(env); 1119 int i = ri->crm; 1120 1121 /* 1122 * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 1123 * copy of BAS[0]. 1124 */ 1125 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 1126 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 1127 1128 raw_write(env, ri, value); 1129 if (tcg_enabled()) { 1130 hw_breakpoint_update(cpu, i); 1131 } 1132 } 1133 1134 void define_debug_regs(ARMCPU *cpu) 1135 { 1136 /* 1137 * Define v7 and v8 architectural debug registers. 1138 * These are just dummy implementations for now. 1139 */ 1140 int i; 1141 int wrps, brps, ctx_cmps; 1142 1143 /* 1144 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot 1145 * use AArch32. Given that bit 15 is RES1, if the value is 0 then 1146 * the register must not exist for this cpu. 1147 */ 1148 if (cpu->isar.dbgdidr != 0) { 1149 ARMCPRegInfo dbgdidr = { 1150 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, 1151 .opc1 = 0, .opc2 = 0, 1152 .access = PL0_R, .accessfn = access_tda, 1153 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 1154 }; 1155 define_one_arm_cp_reg(cpu, &dbgdidr); 1156 } 1157 1158 /* 1159 * DBGDEVID is present in the v7 debug architecture if 1160 * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is 1161 * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist 1162 * from v7.1 of the debug architecture. Because no fields have yet 1163 * been defined in DBGDEVID2 (and quite possibly none will ever 1164 * be) we don't define an ARMISARegisters field for it. 1165 * These registers exist only if EL1 can use AArch32, but that 1166 * happens naturally because they are only PL1 accessible anyway. 1167 */ 1168 if (extract32(cpu->isar.dbgdidr, 15, 1)) { 1169 ARMCPRegInfo dbgdevid = { 1170 .name = "DBGDEVID", 1171 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7, 1172 .access = PL1_R, .accessfn = access_tda, 1173 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid, 1174 }; 1175 define_one_arm_cp_reg(cpu, &dbgdevid); 1176 } 1177 if (cpu_isar_feature(aa32_debugv7p1, cpu)) { 1178 ARMCPRegInfo dbgdevid12[] = { 1179 { 1180 .name = "DBGDEVID1", 1181 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7, 1182 .access = PL1_R, .accessfn = access_tda, 1183 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1, 1184 }, { 1185 .name = "DBGDEVID2", 1186 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7, 1187 .access = PL1_R, .accessfn = access_tda, 1188 .type = ARM_CP_CONST, .resetvalue = 0, 1189 }, 1190 }; 1191 define_arm_cp_regs(cpu, dbgdevid12); 1192 } 1193 1194 brps = arm_num_brps(cpu); 1195 wrps = arm_num_wrps(cpu); 1196 ctx_cmps = arm_num_ctx_cmps(cpu); 1197 1198 assert(ctx_cmps <= brps); 1199 1200 define_arm_cp_regs(cpu, debug_cp_reginfo); 1201 1202 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 1203 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 1204 } 1205 1206 for (i = 0; i < brps; i++) { 1207 char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); 1208 char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); 1209 ARMCPRegInfo dbgregs[] = { 1210 { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, 1211 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 1212 .access = PL1_RW, .accessfn = access_tda, 1213 .fgt = FGT_DBGBVRN_EL1, 1214 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 1215 .writefn = dbgbvr_write, .raw_writefn = raw_write 1216 }, 1217 { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, 1218 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 1219 .access = PL1_RW, .accessfn = access_tda, 1220 .fgt = FGT_DBGBCRN_EL1, 1221 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 1222 .writefn = dbgbcr_write, .raw_writefn = raw_write 1223 }, 1224 }; 1225 define_arm_cp_regs(cpu, dbgregs); 1226 g_free(dbgbvr_el1_name); 1227 g_free(dbgbcr_el1_name); 1228 } 1229 1230 for (i = 0; i < wrps; i++) { 1231 char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); 1232 char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); 1233 ARMCPRegInfo dbgregs[] = { 1234 { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, 1235 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 1236 .access = PL1_RW, .accessfn = access_tda, 1237 .fgt = FGT_DBGWVRN_EL1, 1238 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 1239 .writefn = dbgwvr_write, .raw_writefn = raw_write 1240 }, 1241 { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, 1242 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 1243 .access = PL1_RW, .accessfn = access_tda, 1244 .fgt = FGT_DBGWCRN_EL1, 1245 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 1246 .writefn = dbgwcr_write, .raw_writefn = raw_write 1247 }, 1248 }; 1249 define_arm_cp_regs(cpu, dbgregs); 1250 g_free(dbgwvr_el1_name); 1251 g_free(dbgwcr_el1_name); 1252 } 1253 } 1254