1 /* 2 * ARM debug helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "cpu.h" 11 #include "internals.h" 12 #include "cpregs.h" 13 #include "exec/exec-all.h" 14 #include "exec/helper-proto.h" 15 16 17 /* Return the Exception Level targeted by debug exceptions. */ 18 static int arm_debug_target_el(CPUARMState *env) 19 { 20 bool secure = arm_is_secure(env); 21 bool route_to_el2 = false; 22 23 if (arm_is_el2_enabled(env)) { 24 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || 25 env->cp15.mdcr_el2 & MDCR_TDE; 26 } 27 28 if (route_to_el2) { 29 return 2; 30 } else if (arm_feature(env, ARM_FEATURE_EL3) && 31 !arm_el_is_aa64(env, 3) && secure) { 32 return 3; 33 } else { 34 return 1; 35 } 36 } 37 38 /* 39 * Raise an exception to the debug target el. 40 * Modify syndrome to indicate when origin and target EL are the same. 41 */ 42 G_NORETURN static void 43 raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome) 44 { 45 int debug_el = arm_debug_target_el(env); 46 int cur_el = arm_current_el(env); 47 48 /* 49 * If singlestep is targeting a lower EL than the current one, then 50 * DisasContext.ss_active must be false and we can never get here. 51 * Similarly for watchpoint and breakpoint matches. 52 */ 53 assert(debug_el >= cur_el); 54 syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT; 55 raise_exception(env, excp, syndrome, debug_el); 56 } 57 58 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ 59 static bool aa64_generate_debug_exceptions(CPUARMState *env) 60 { 61 int cur_el = arm_current_el(env); 62 int debug_el; 63 64 if (cur_el == 3) { 65 return false; 66 } 67 68 /* MDCR_EL3.SDD disables debug events from Secure state */ 69 if (arm_is_secure_below_el3(env) 70 && extract32(env->cp15.mdcr_el3, 16, 1)) { 71 return false; 72 } 73 74 /* 75 * Same EL to same EL debug exceptions need MDSCR_KDE enabled 76 * while not masking the (D)ebug bit in DAIF. 77 */ 78 debug_el = arm_debug_target_el(env); 79 80 if (cur_el == debug_el) { 81 return extract32(env->cp15.mdscr_el1, 13, 1) 82 && !(env->daif & PSTATE_D); 83 } 84 85 /* Otherwise the debug target needs to be a higher EL */ 86 return debug_el > cur_el; 87 } 88 89 static bool aa32_generate_debug_exceptions(CPUARMState *env) 90 { 91 int el = arm_current_el(env); 92 93 if (el == 0 && arm_el_is_aa64(env, 1)) { 94 return aa64_generate_debug_exceptions(env); 95 } 96 97 if (arm_is_secure(env)) { 98 int spd; 99 100 if (el == 0 && (env->cp15.sder & 1)) { 101 /* 102 * SDER.SUIDEN means debug exceptions from Secure EL0 103 * are always enabled. Otherwise they are controlled by 104 * SDCR.SPD like those from other Secure ELs. 105 */ 106 return true; 107 } 108 109 spd = extract32(env->cp15.mdcr_el3, 14, 2); 110 switch (spd) { 111 case 1: 112 /* SPD == 0b01 is reserved, but behaves as 0b00. */ 113 case 0: 114 /* 115 * For 0b00 we return true if external secure invasive debug 116 * is enabled. On real hardware this is controlled by external 117 * signals to the core. QEMU always permits debug, and behaves 118 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. 119 */ 120 return true; 121 case 2: 122 return false; 123 case 3: 124 return true; 125 } 126 } 127 128 return el != 2; 129 } 130 131 /* 132 * Return true if debugging exceptions are currently enabled. 133 * This corresponds to what in ARM ARM pseudocode would be 134 * if UsingAArch32() then 135 * return AArch32.GenerateDebugExceptions() 136 * else 137 * return AArch64.GenerateDebugExceptions() 138 * We choose to push the if() down into this function for clarity, 139 * since the pseudocode has it at all callsites except for the one in 140 * CheckSoftwareStep(), where it is elided because both branches would 141 * always return the same value. 142 */ 143 bool arm_generate_debug_exceptions(CPUARMState *env) 144 { 145 if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) { 146 return false; 147 } 148 if (is_a64(env)) { 149 return aa64_generate_debug_exceptions(env); 150 } else { 151 return aa32_generate_debug_exceptions(env); 152 } 153 } 154 155 /* 156 * Is single-stepping active? (Note that the "is EL_D AArch64?" check 157 * implicitly means this always returns false in pre-v8 CPUs.) 158 */ 159 bool arm_singlestep_active(CPUARMState *env) 160 { 161 return extract32(env->cp15.mdscr_el1, 0, 1) 162 && arm_el_is_aa64(env, arm_debug_target_el(env)) 163 && arm_generate_debug_exceptions(env); 164 } 165 166 /* Return true if the linked breakpoint entry lbn passes its checks */ 167 static bool linked_bp_matches(ARMCPU *cpu, int lbn) 168 { 169 CPUARMState *env = &cpu->env; 170 uint64_t bcr = env->cp15.dbgbcr[lbn]; 171 int brps = arm_num_brps(cpu); 172 int ctx_cmps = arm_num_ctx_cmps(cpu); 173 int bt; 174 uint32_t contextidr; 175 uint64_t hcr_el2; 176 177 /* 178 * Links to unimplemented or non-context aware breakpoints are 179 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or 180 * as if linked to an UNKNOWN context-aware breakpoint (in which 181 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). 182 * We choose the former. 183 */ 184 if (lbn >= brps || lbn < (brps - ctx_cmps)) { 185 return false; 186 } 187 188 bcr = env->cp15.dbgbcr[lbn]; 189 190 if (extract64(bcr, 0, 1) == 0) { 191 /* Linked breakpoint disabled : generate no events */ 192 return false; 193 } 194 195 bt = extract64(bcr, 20, 4); 196 hcr_el2 = arm_hcr_el2_eff(env); 197 198 switch (bt) { 199 case 3: /* linked context ID match */ 200 switch (arm_current_el(env)) { 201 default: 202 /* Context matches never fire in AArch64 EL3 */ 203 return false; 204 case 2: 205 if (!(hcr_el2 & HCR_E2H)) { 206 /* Context matches never fire in EL2 without E2H enabled. */ 207 return false; 208 } 209 contextidr = env->cp15.contextidr_el[2]; 210 break; 211 case 1: 212 contextidr = env->cp15.contextidr_el[1]; 213 break; 214 case 0: 215 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 216 contextidr = env->cp15.contextidr_el[2]; 217 } else { 218 contextidr = env->cp15.contextidr_el[1]; 219 } 220 break; 221 } 222 break; 223 224 case 7: /* linked contextidr_el1 match */ 225 contextidr = env->cp15.contextidr_el[1]; 226 break; 227 case 13: /* linked contextidr_el2 match */ 228 contextidr = env->cp15.contextidr_el[2]; 229 break; 230 231 case 9: /* linked VMID match (reserved if no EL2) */ 232 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 233 case 15: /* linked full context ID match */ 234 default: 235 /* 236 * Links to Unlinked context breakpoints must generate no 237 * events; we choose to do the same for reserved values too. 238 */ 239 return false; 240 } 241 242 /* 243 * We match the whole register even if this is AArch32 using the 244 * short descriptor format (in which case it holds both PROCID and ASID), 245 * since we don't implement the optional v7 context ID masking. 246 */ 247 return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; 248 } 249 250 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) 251 { 252 CPUARMState *env = &cpu->env; 253 uint64_t cr; 254 int pac, hmc, ssc, wt, lbn; 255 /* 256 * Note that for watchpoints the check is against the CPU security 257 * state, not the S/NS attribute on the offending data access. 258 */ 259 bool is_secure = arm_is_secure(env); 260 int access_el = arm_current_el(env); 261 262 if (is_wp) { 263 CPUWatchpoint *wp = env->cpu_watchpoint[n]; 264 265 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { 266 return false; 267 } 268 cr = env->cp15.dbgwcr[n]; 269 if (wp->hitattrs.user) { 270 /* 271 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should 272 * match watchpoints as if they were accesses done at EL0, even if 273 * the CPU is at EL1 or higher. 274 */ 275 access_el = 0; 276 } 277 } else { 278 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 279 280 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { 281 return false; 282 } 283 cr = env->cp15.dbgbcr[n]; 284 } 285 /* 286 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is 287 * enabled and that the address and access type match; for breakpoints 288 * we know the address matched; check the remaining fields, including 289 * linked breakpoints. We rely on WCR and BCR having the same layout 290 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. 291 * Note that some combinations of {PAC, HMC, SSC} are reserved and 292 * must act either like some valid combination or as if the watchpoint 293 * were disabled. We choose the former, and use this together with 294 * the fact that EL3 must always be Secure and EL2 must always be 295 * Non-Secure to simplify the code slightly compared to the full 296 * table in the ARM ARM. 297 */ 298 pac = FIELD_EX64(cr, DBGWCR, PAC); 299 hmc = FIELD_EX64(cr, DBGWCR, HMC); 300 ssc = FIELD_EX64(cr, DBGWCR, SSC); 301 302 switch (ssc) { 303 case 0: 304 break; 305 case 1: 306 case 3: 307 if (is_secure) { 308 return false; 309 } 310 break; 311 case 2: 312 if (!is_secure) { 313 return false; 314 } 315 break; 316 } 317 318 switch (access_el) { 319 case 3: 320 case 2: 321 if (!hmc) { 322 return false; 323 } 324 break; 325 case 1: 326 if (extract32(pac, 0, 1) == 0) { 327 return false; 328 } 329 break; 330 case 0: 331 if (extract32(pac, 1, 1) == 0) { 332 return false; 333 } 334 break; 335 default: 336 g_assert_not_reached(); 337 } 338 339 wt = FIELD_EX64(cr, DBGWCR, WT); 340 lbn = FIELD_EX64(cr, DBGWCR, LBN); 341 342 if (wt && !linked_bp_matches(cpu, lbn)) { 343 return false; 344 } 345 346 return true; 347 } 348 349 static bool check_watchpoints(ARMCPU *cpu) 350 { 351 CPUARMState *env = &cpu->env; 352 int n; 353 354 /* 355 * If watchpoints are disabled globally or we can't take debug 356 * exceptions here then watchpoint firings are ignored. 357 */ 358 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 359 || !arm_generate_debug_exceptions(env)) { 360 return false; 361 } 362 363 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { 364 if (bp_wp_matches(cpu, n, true)) { 365 return true; 366 } 367 } 368 return false; 369 } 370 371 bool arm_debug_check_breakpoint(CPUState *cs) 372 { 373 ARMCPU *cpu = ARM_CPU(cs); 374 CPUARMState *env = &cpu->env; 375 target_ulong pc; 376 int n; 377 378 /* 379 * If breakpoints are disabled globally or we can't take debug 380 * exceptions here then breakpoint firings are ignored. 381 */ 382 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 383 || !arm_generate_debug_exceptions(env)) { 384 return false; 385 } 386 387 /* 388 * Single-step exceptions have priority over breakpoint exceptions. 389 * If single-step state is active-pending, suppress the bp. 390 */ 391 if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) { 392 return false; 393 } 394 395 /* 396 * PC alignment faults have priority over breakpoint exceptions. 397 */ 398 pc = is_a64(env) ? env->pc : env->regs[15]; 399 if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) { 400 return false; 401 } 402 403 /* 404 * Instruction aborts have priority over breakpoint exceptions. 405 * TODO: We would need to look up the page for PC and verify that 406 * it is present and executable. 407 */ 408 409 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { 410 if (bp_wp_matches(cpu, n, false)) { 411 return true; 412 } 413 } 414 return false; 415 } 416 417 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 418 { 419 /* 420 * Called by core code when a CPU watchpoint fires; need to check if this 421 * is also an architectural watchpoint match. 422 */ 423 ARMCPU *cpu = ARM_CPU(cs); 424 425 return check_watchpoints(cpu); 426 } 427 428 /* 429 * Return the FSR value for a debug exception (watchpoint, hardware 430 * breakpoint or BKPT insn) targeting the specified exception level. 431 */ 432 static uint32_t arm_debug_exception_fsr(CPUARMState *env) 433 { 434 ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; 435 int target_el = arm_debug_target_el(env); 436 bool using_lpae = false; 437 438 if (target_el == 2 || arm_el_is_aa64(env, target_el)) { 439 using_lpae = true; 440 } else if (arm_feature(env, ARM_FEATURE_PMSA) && 441 arm_feature(env, ARM_FEATURE_V8)) { 442 using_lpae = true; 443 } else { 444 if (arm_feature(env, ARM_FEATURE_LPAE) && 445 (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { 446 using_lpae = true; 447 } 448 } 449 450 if (using_lpae) { 451 return arm_fi_to_lfsc(&fi); 452 } else { 453 return arm_fi_to_sfsc(&fi); 454 } 455 } 456 457 void arm_debug_excp_handler(CPUState *cs) 458 { 459 /* 460 * Called by core code when a watchpoint or breakpoint fires; 461 * need to check which one and raise the appropriate exception. 462 */ 463 ARMCPU *cpu = ARM_CPU(cs); 464 CPUARMState *env = &cpu->env; 465 CPUWatchpoint *wp_hit = cs->watchpoint_hit; 466 467 if (wp_hit) { 468 if (wp_hit->flags & BP_CPU) { 469 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; 470 471 cs->watchpoint_hit = NULL; 472 473 env->exception.fsr = arm_debug_exception_fsr(env); 474 env->exception.vaddress = wp_hit->hitaddr; 475 raise_exception_debug(env, EXCP_DATA_ABORT, 476 syn_watchpoint(0, 0, wnr)); 477 } 478 } else { 479 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 480 481 /* 482 * (1) GDB breakpoints should be handled first. 483 * (2) Do not raise a CPU exception if no CPU breakpoint has fired, 484 * since singlestep is also done by generating a debug internal 485 * exception. 486 */ 487 if (cpu_breakpoint_test(cs, pc, BP_GDB) 488 || !cpu_breakpoint_test(cs, pc, BP_CPU)) { 489 return; 490 } 491 492 env->exception.fsr = arm_debug_exception_fsr(env); 493 /* 494 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 495 * values to the guest that it shouldn't be able to see at its 496 * exception/security level. 497 */ 498 env->exception.vaddress = 0; 499 raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0)); 500 } 501 } 502 503 /* 504 * Raise an EXCP_BKPT with the specified syndrome register value, 505 * targeting the correct exception level for debug exceptions. 506 */ 507 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) 508 { 509 int debug_el = arm_debug_target_el(env); 510 int cur_el = arm_current_el(env); 511 512 /* FSR will only be used if the debug target EL is AArch32. */ 513 env->exception.fsr = arm_debug_exception_fsr(env); 514 /* 515 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 516 * values to the guest that it shouldn't be able to see at its 517 * exception/security level. 518 */ 519 env->exception.vaddress = 0; 520 /* 521 * Other kinds of architectural debug exception are ignored if 522 * they target an exception level below the current one (in QEMU 523 * this is checked by arm_generate_debug_exceptions()). Breakpoint 524 * instructions are special because they always generate an exception 525 * to somewhere: if they can't go to the configured debug exception 526 * level they are taken to the current exception level. 527 */ 528 if (debug_el < cur_el) { 529 debug_el = cur_el; 530 } 531 raise_exception(env, EXCP_BKPT, syndrome, debug_el); 532 } 533 534 void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome) 535 { 536 raise_exception_debug(env, EXCP_UDEF, syndrome); 537 } 538 539 /* 540 * Check for traps to "powerdown debug" registers, which are controlled 541 * by MDCR.TDOSA 542 */ 543 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 544 bool isread) 545 { 546 int el = arm_current_el(env); 547 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 548 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || 549 (arm_hcr_el2_eff(env) & HCR_TGE); 550 551 if (el < 2 && mdcr_el2_tdosa) { 552 return CP_ACCESS_TRAP_EL2; 553 } 554 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 555 return CP_ACCESS_TRAP_EL3; 556 } 557 return CP_ACCESS_OK; 558 } 559 560 /* 561 * Check for traps to "debug ROM" registers, which are controlled 562 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 563 */ 564 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 565 bool isread) 566 { 567 int el = arm_current_el(env); 568 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 569 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || 570 (arm_hcr_el2_eff(env) & HCR_TGE); 571 572 if (el < 2 && mdcr_el2_tdra) { 573 return CP_ACCESS_TRAP_EL2; 574 } 575 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 576 return CP_ACCESS_TRAP_EL3; 577 } 578 return CP_ACCESS_OK; 579 } 580 581 /* 582 * Check for traps to general debug registers, which are controlled 583 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 584 */ 585 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 586 bool isread) 587 { 588 int el = arm_current_el(env); 589 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 590 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 591 (arm_hcr_el2_eff(env) & HCR_TGE); 592 593 if (el < 2 && mdcr_el2_tda) { 594 return CP_ACCESS_TRAP_EL2; 595 } 596 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 597 return CP_ACCESS_TRAP_EL3; 598 } 599 return CP_ACCESS_OK; 600 } 601 602 /* 603 * Check for traps to Debug Comms Channel registers. If FEAT_FGT 604 * is implemented then these are controlled by MDCR_EL2.TDCC for 605 * EL2 and MDCR_EL3.TDCC for EL3. They are also controlled by 606 * the general debug access trap bits MDCR_EL2.TDA and MDCR_EL3.TDA. 607 */ 608 static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, 609 bool isread) 610 { 611 int el = arm_current_el(env); 612 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 613 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 614 (arm_hcr_el2_eff(env) & HCR_TGE); 615 bool mdcr_el2_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 616 (mdcr_el2 & MDCR_TDCC); 617 bool mdcr_el3_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 618 (env->cp15.mdcr_el3 & MDCR_TDCC); 619 620 if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) { 621 return CP_ACCESS_TRAP_EL2; 622 } 623 if (el < 3 && ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) { 624 return CP_ACCESS_TRAP_EL3; 625 } 626 return CP_ACCESS_OK; 627 } 628 629 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 630 uint64_t value) 631 { 632 /* 633 * Writes to OSLAR_EL1 may update the OS lock status, which can be 634 * read via a bit in OSLSR_EL1. 635 */ 636 int oslock; 637 638 if (ri->state == ARM_CP_STATE_AA32) { 639 oslock = (value == 0xC5ACCE55); 640 } else { 641 oslock = value & 1; 642 } 643 644 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 645 } 646 647 static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 648 uint64_t value) 649 { 650 ARMCPU *cpu = env_archcpu(env); 651 /* 652 * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not 653 * implemented this is RAZ/WI. 654 */ 655 if(arm_feature(env, ARM_FEATURE_AARCH64) 656 ? cpu_isar_feature(aa64_doublelock, cpu) 657 : cpu_isar_feature(aa32_doublelock, cpu)) { 658 env->cp15.osdlr_el1 = value & 1; 659 } 660 } 661 662 static void dbgclaimset_write(CPUARMState *env, const ARMCPRegInfo *ri, 663 uint64_t value) 664 { 665 env->cp15.dbgclaim |= (value & 0xFF); 666 } 667 668 static uint64_t dbgclaimset_read(CPUARMState *env, const ARMCPRegInfo *ri) 669 { 670 /* CLAIM bits are RAO */ 671 return 0xFF; 672 } 673 674 static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 675 uint64_t value) 676 { 677 env->cp15.dbgclaim &= ~(value & 0xFF); 678 } 679 680 static const ARMCPRegInfo debug_cp_reginfo[] = { 681 /* 682 * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 683 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 684 * unlike DBGDRAR it is never accessible from EL0. 685 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 686 * accessor. 687 */ 688 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 689 .access = PL0_R, .accessfn = access_tdra, 690 .type = ARM_CP_CONST, .resetvalue = 0 }, 691 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 692 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 693 .access = PL1_R, .accessfn = access_tdra, 694 .type = ARM_CP_CONST, .resetvalue = 0 }, 695 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 696 .access = PL0_R, .accessfn = access_tdra, 697 .type = ARM_CP_CONST, .resetvalue = 0 }, 698 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 699 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 700 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 701 .access = PL1_RW, .accessfn = access_tda, 702 .fgt = FGT_MDSCR_EL1, 703 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 704 .resetvalue = 0 }, 705 /* 706 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external 707 * Debug Communication Channel is not implemented. 708 */ 709 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, 710 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, 711 .access = PL0_R, .accessfn = access_tdcc, 712 .type = ARM_CP_CONST, .resetvalue = 0 }, 713 /* 714 * OSDTRRX_EL1/OSDTRTX_EL1 are used for save and restore of DBGDTRRX_EL0. 715 * It is a component of the Debug Communications Channel, which is not implemented. 716 */ 717 { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 718 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2, 719 .access = PL1_RW, .accessfn = access_tdcc, 720 .type = ARM_CP_CONST, .resetvalue = 0 }, 721 { .name = "OSDTRTX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 722 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 723 .access = PL1_RW, .accessfn = access_tdcc, 724 .type = ARM_CP_CONST, .resetvalue = 0 }, 725 /* 726 * OSECCR_EL1 provides a mechanism for an operating system 727 * to access the contents of EDECCR. EDECCR is not implemented though, 728 * as is the rest of external device mechanism. 729 */ 730 { .name = "OSECCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 731 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 732 .access = PL1_RW, .accessfn = access_tda, 733 .fgt = FGT_OSECCR_EL1, 734 .type = ARM_CP_CONST, .resetvalue = 0 }, 735 /* 736 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as 737 * it is unlikely a guest will care. 738 * We don't implement the configurable EL0 access. 739 */ 740 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, 741 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 742 .type = ARM_CP_ALIAS, 743 .access = PL1_R, .accessfn = access_tda, 744 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 745 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 746 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 747 .access = PL1_W, .type = ARM_CP_NO_RAW, 748 .accessfn = access_tdosa, 749 .fgt = FGT_OSLAR_EL1, 750 .writefn = oslar_write }, 751 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 752 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 753 .access = PL1_R, .resetvalue = 10, 754 .accessfn = access_tdosa, 755 .fgt = FGT_OSLSR_EL1, 756 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 757 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 758 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 759 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 760 .access = PL1_RW, .accessfn = access_tdosa, 761 .fgt = FGT_OSDLR_EL1, 762 .writefn = osdlr_write, 763 .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) }, 764 /* 765 * Dummy DBGVCR: Linux wants to clear this on startup, but we don't 766 * implement vector catch debug events yet. 767 */ 768 { .name = "DBGVCR", 769 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 770 .access = PL1_RW, .accessfn = access_tda, 771 .type = ARM_CP_NOP }, 772 /* 773 * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 774 * to save and restore a 32-bit guest's DBGVCR) 775 */ 776 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 777 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 778 .access = PL2_RW, .accessfn = access_tda, 779 .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, 780 /* 781 * Dummy MDCCINT_EL1, since we don't implement the Debug Communications 782 * Channel but Linux may try to access this register. The 32-bit 783 * alias is DBGDCCINT. 784 */ 785 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 786 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 787 .access = PL1_RW, .accessfn = access_tdcc, 788 .type = ARM_CP_NOP }, 789 /* 790 * Dummy DBGCLAIM registers. 791 * "The architecture does not define any functionality for the CLAIM tag bits.", 792 * so we only keep the raw bits 793 */ 794 { .name = "DBGCLAIMSET_EL1", .state = ARM_CP_STATE_BOTH, 795 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6, 796 .type = ARM_CP_ALIAS, 797 .access = PL1_RW, .accessfn = access_tda, 798 .fgt = FGT_DBGCLAIM, 799 .writefn = dbgclaimset_write, .readfn = dbgclaimset_read }, 800 { .name = "DBGCLAIMCLR_EL1", .state = ARM_CP_STATE_BOTH, 801 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 6, 802 .access = PL1_RW, .accessfn = access_tda, 803 .fgt = FGT_DBGCLAIM, 804 .writefn = dbgclaimclr_write, .raw_writefn = raw_write, 805 .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, 806 }; 807 808 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 809 /* 64 bit access versions of the (dummy) debug registers */ 810 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 811 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 812 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 813 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 814 }; 815 816 void hw_watchpoint_update(ARMCPU *cpu, int n) 817 { 818 CPUARMState *env = &cpu->env; 819 vaddr len = 0; 820 vaddr wvr = env->cp15.dbgwvr[n]; 821 uint64_t wcr = env->cp15.dbgwcr[n]; 822 int mask; 823 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 824 825 if (env->cpu_watchpoint[n]) { 826 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 827 env->cpu_watchpoint[n] = NULL; 828 } 829 830 if (!FIELD_EX64(wcr, DBGWCR, E)) { 831 /* E bit clear : watchpoint disabled */ 832 return; 833 } 834 835 switch (FIELD_EX64(wcr, DBGWCR, LSC)) { 836 case 0: 837 /* LSC 00 is reserved and must behave as if the wp is disabled */ 838 return; 839 case 1: 840 flags |= BP_MEM_READ; 841 break; 842 case 2: 843 flags |= BP_MEM_WRITE; 844 break; 845 case 3: 846 flags |= BP_MEM_ACCESS; 847 break; 848 } 849 850 /* 851 * Attempts to use both MASK and BAS fields simultaneously are 852 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 853 * thus generating a watchpoint for every byte in the masked region. 854 */ 855 mask = FIELD_EX64(wcr, DBGWCR, MASK); 856 if (mask == 1 || mask == 2) { 857 /* 858 * Reserved values of MASK; we must act as if the mask value was 859 * some non-reserved value, or as if the watchpoint were disabled. 860 * We choose the latter. 861 */ 862 return; 863 } else if (mask) { 864 /* Watchpoint covers an aligned area up to 2GB in size */ 865 len = 1ULL << mask; 866 /* 867 * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 868 * whether the watchpoint fires when the unmasked bits match; we opt 869 * to generate the exceptions. 870 */ 871 wvr &= ~(len - 1); 872 } else { 873 /* Watchpoint covers bytes defined by the byte address select bits */ 874 int bas = FIELD_EX64(wcr, DBGWCR, BAS); 875 int basstart; 876 877 if (extract64(wvr, 2, 1)) { 878 /* 879 * Deprecated case of an only 4-aligned address. BAS[7:4] are 880 * ignored, and BAS[3:0] define which bytes to watch. 881 */ 882 bas &= 0xf; 883 } 884 885 if (bas == 0) { 886 /* This must act as if the watchpoint is disabled */ 887 return; 888 } 889 890 /* 891 * The BAS bits are supposed to be programmed to indicate a contiguous 892 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 893 * we fire for each byte in the word/doubleword addressed by the WVR. 894 * We choose to ignore any non-zero bits after the first range of 1s. 895 */ 896 basstart = ctz32(bas); 897 len = cto32(bas >> basstart); 898 wvr += basstart; 899 } 900 901 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 902 &env->cpu_watchpoint[n]); 903 } 904 905 void hw_watchpoint_update_all(ARMCPU *cpu) 906 { 907 int i; 908 CPUARMState *env = &cpu->env; 909 910 /* 911 * Completely clear out existing QEMU watchpoints and our array, to 912 * avoid possible stale entries following migration load. 913 */ 914 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 915 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 916 917 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 918 hw_watchpoint_update(cpu, i); 919 } 920 } 921 922 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 923 uint64_t value) 924 { 925 ARMCPU *cpu = env_archcpu(env); 926 int i = ri->crm; 927 928 /* 929 * Bits [1:0] are RES0. 930 * 931 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) 932 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if 933 * they contain the value written. It is CONSTRAINED UNPREDICTABLE 934 * whether the RESS bits are ignored when comparing an address. 935 * 936 * Therefore we are allowed to compare the entire register, which lets 937 * us avoid considering whether or not FEAT_LVA is actually enabled. 938 */ 939 value &= ~3ULL; 940 941 raw_write(env, ri, value); 942 hw_watchpoint_update(cpu, i); 943 } 944 945 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 946 uint64_t value) 947 { 948 ARMCPU *cpu = env_archcpu(env); 949 int i = ri->crm; 950 951 raw_write(env, ri, value); 952 hw_watchpoint_update(cpu, i); 953 } 954 955 void hw_breakpoint_update(ARMCPU *cpu, int n) 956 { 957 CPUARMState *env = &cpu->env; 958 uint64_t bvr = env->cp15.dbgbvr[n]; 959 uint64_t bcr = env->cp15.dbgbcr[n]; 960 vaddr addr; 961 int bt; 962 int flags = BP_CPU; 963 964 if (env->cpu_breakpoint[n]) { 965 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 966 env->cpu_breakpoint[n] = NULL; 967 } 968 969 if (!extract64(bcr, 0, 1)) { 970 /* E bit clear : watchpoint disabled */ 971 return; 972 } 973 974 bt = extract64(bcr, 20, 4); 975 976 switch (bt) { 977 case 4: /* unlinked address mismatch (reserved if AArch64) */ 978 case 5: /* linked address mismatch (reserved if AArch64) */ 979 qemu_log_mask(LOG_UNIMP, 980 "arm: address mismatch breakpoint types not implemented\n"); 981 return; 982 case 0: /* unlinked address match */ 983 case 1: /* linked address match */ 984 { 985 /* 986 * Bits [1:0] are RES0. 987 * 988 * It is IMPLEMENTATION DEFINED whether bits [63:49] 989 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit 990 * of the VA field ([48] or [52] for FEAT_LVA), or whether the 991 * value is read as written. It is CONSTRAINED UNPREDICTABLE 992 * whether the RESS bits are ignored when comparing an address. 993 * Therefore we are allowed to compare the entire register, which 994 * lets us avoid considering whether FEAT_LVA is actually enabled. 995 * 996 * The BAS field is used to allow setting breakpoints on 16-bit 997 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether 998 * a bp will fire if the addresses covered by the bp and the addresses 999 * covered by the insn overlap but the insn doesn't start at the 1000 * start of the bp address range. We choose to require the insn and 1001 * the bp to have the same address. The constraints on writing to 1002 * BAS enforced in dbgbcr_write mean we have only four cases: 1003 * 0b0000 => no breakpoint 1004 * 0b0011 => breakpoint on addr 1005 * 0b1100 => breakpoint on addr + 2 1006 * 0b1111 => breakpoint on addr 1007 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 1008 */ 1009 int bas = extract64(bcr, 5, 4); 1010 addr = bvr & ~3ULL; 1011 if (bas == 0) { 1012 return; 1013 } 1014 if (bas == 0xc) { 1015 addr += 2; 1016 } 1017 break; 1018 } 1019 case 2: /* unlinked context ID match */ 1020 case 8: /* unlinked VMID match (reserved if no EL2) */ 1021 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 1022 qemu_log_mask(LOG_UNIMP, 1023 "arm: unlinked context breakpoint types not implemented\n"); 1024 return; 1025 case 9: /* linked VMID match (reserved if no EL2) */ 1026 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 1027 case 3: /* linked context ID match */ 1028 default: 1029 /* 1030 * We must generate no events for Linked context matches (unless 1031 * they are linked to by some other bp/wp, which is handled in 1032 * updates for the linking bp/wp). We choose to also generate no events 1033 * for reserved values. 1034 */ 1035 return; 1036 } 1037 1038 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 1039 } 1040 1041 void hw_breakpoint_update_all(ARMCPU *cpu) 1042 { 1043 int i; 1044 CPUARMState *env = &cpu->env; 1045 1046 /* 1047 * Completely clear out existing QEMU breakpoints and our array, to 1048 * avoid possible stale entries following migration load. 1049 */ 1050 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 1051 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 1052 1053 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 1054 hw_breakpoint_update(cpu, i); 1055 } 1056 } 1057 1058 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1059 uint64_t value) 1060 { 1061 ARMCPU *cpu = env_archcpu(env); 1062 int i = ri->crm; 1063 1064 raw_write(env, ri, value); 1065 hw_breakpoint_update(cpu, i); 1066 } 1067 1068 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1069 uint64_t value) 1070 { 1071 ARMCPU *cpu = env_archcpu(env); 1072 int i = ri->crm; 1073 1074 /* 1075 * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 1076 * copy of BAS[0]. 1077 */ 1078 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 1079 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 1080 1081 raw_write(env, ri, value); 1082 hw_breakpoint_update(cpu, i); 1083 } 1084 1085 void define_debug_regs(ARMCPU *cpu) 1086 { 1087 /* 1088 * Define v7 and v8 architectural debug registers. 1089 * These are just dummy implementations for now. 1090 */ 1091 int i; 1092 int wrps, brps, ctx_cmps; 1093 1094 /* 1095 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot 1096 * use AArch32. Given that bit 15 is RES1, if the value is 0 then 1097 * the register must not exist for this cpu. 1098 */ 1099 if (cpu->isar.dbgdidr != 0) { 1100 ARMCPRegInfo dbgdidr = { 1101 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, 1102 .opc1 = 0, .opc2 = 0, 1103 .access = PL0_R, .accessfn = access_tda, 1104 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 1105 }; 1106 define_one_arm_cp_reg(cpu, &dbgdidr); 1107 } 1108 1109 /* 1110 * DBGDEVID is present in the v7 debug architecture if 1111 * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is 1112 * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist 1113 * from v7.1 of the debug architecture. Because no fields have yet 1114 * been defined in DBGDEVID2 (and quite possibly none will ever 1115 * be) we don't define an ARMISARegisters field for it. 1116 * These registers exist only if EL1 can use AArch32, but that 1117 * happens naturally because they are only PL1 accessible anyway. 1118 */ 1119 if (extract32(cpu->isar.dbgdidr, 15, 1)) { 1120 ARMCPRegInfo dbgdevid = { 1121 .name = "DBGDEVID", 1122 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7, 1123 .access = PL1_R, .accessfn = access_tda, 1124 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid, 1125 }; 1126 define_one_arm_cp_reg(cpu, &dbgdevid); 1127 } 1128 if (cpu_isar_feature(aa32_debugv7p1, cpu)) { 1129 ARMCPRegInfo dbgdevid12[] = { 1130 { 1131 .name = "DBGDEVID1", 1132 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7, 1133 .access = PL1_R, .accessfn = access_tda, 1134 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1, 1135 }, { 1136 .name = "DBGDEVID2", 1137 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7, 1138 .access = PL1_R, .accessfn = access_tda, 1139 .type = ARM_CP_CONST, .resetvalue = 0, 1140 }, 1141 }; 1142 define_arm_cp_regs(cpu, dbgdevid12); 1143 } 1144 1145 brps = arm_num_brps(cpu); 1146 wrps = arm_num_wrps(cpu); 1147 ctx_cmps = arm_num_ctx_cmps(cpu); 1148 1149 assert(ctx_cmps <= brps); 1150 1151 define_arm_cp_regs(cpu, debug_cp_reginfo); 1152 1153 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 1154 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 1155 } 1156 1157 for (i = 0; i < brps; i++) { 1158 char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); 1159 char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); 1160 ARMCPRegInfo dbgregs[] = { 1161 { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, 1162 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 1163 .access = PL1_RW, .accessfn = access_tda, 1164 .fgt = FGT_DBGBVRN_EL1, 1165 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 1166 .writefn = dbgbvr_write, .raw_writefn = raw_write 1167 }, 1168 { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, 1169 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 1170 .access = PL1_RW, .accessfn = access_tda, 1171 .fgt = FGT_DBGBCRN_EL1, 1172 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 1173 .writefn = dbgbcr_write, .raw_writefn = raw_write 1174 }, 1175 }; 1176 define_arm_cp_regs(cpu, dbgregs); 1177 g_free(dbgbvr_el1_name); 1178 g_free(dbgbcr_el1_name); 1179 } 1180 1181 for (i = 0; i < wrps; i++) { 1182 char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); 1183 char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); 1184 ARMCPRegInfo dbgregs[] = { 1185 { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, 1186 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 1187 .access = PL1_RW, .accessfn = access_tda, 1188 .fgt = FGT_DBGWVRN_EL1, 1189 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 1190 .writefn = dbgwvr_write, .raw_writefn = raw_write 1191 }, 1192 { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, 1193 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 1194 .access = PL1_RW, .accessfn = access_tda, 1195 .fgt = FGT_DBGWCRN_EL1, 1196 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 1197 .writefn = dbgwcr_write, .raw_writefn = raw_write 1198 }, 1199 }; 1200 define_arm_cp_regs(cpu, dbgregs); 1201 g_free(dbgwvr_el1_name); 1202 g_free(dbgwcr_el1_name); 1203 } 1204 } 1205 1206 #if !defined(CONFIG_USER_ONLY) 1207 1208 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) 1209 { 1210 ARMCPU *cpu = ARM_CPU(cs); 1211 CPUARMState *env = &cpu->env; 1212 1213 /* 1214 * In BE32 system mode, target memory is stored byteswapped (on a 1215 * little-endian host system), and by the time we reach here (via an 1216 * opcode helper) the addresses of subword accesses have been adjusted 1217 * to account for that, which means that watchpoints will not match. 1218 * Undo the adjustment here. 1219 */ 1220 if (arm_sctlr_b(env)) { 1221 if (len == 1) { 1222 addr ^= 3; 1223 } else if (len == 2) { 1224 addr ^= 2; 1225 } 1226 } 1227 1228 return addr; 1229 } 1230 1231 #endif 1232