1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License version 2 as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 14 * 15 * Copyright (C) 2009, 2010 ARM Limited 16 * 17 * Author: Will Deacon <will.deacon@arm.com> 18 */ 19 20 /* 21 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 22 * using the CPU's debug registers. 23 */ 24 #define pr_fmt(fmt) "hw-breakpoint: " fmt 25 26 #include <linux/errno.h> 27 #include <linux/hardirq.h> 28 #include <linux/perf_event.h> 29 #include <linux/hw_breakpoint.h> 30 #include <linux/smp.h> 31 32 #include <asm/cacheflush.h> 33 #include <asm/cputype.h> 34 #include <asm/current.h> 35 #include <asm/hw_breakpoint.h> 36 #include <asm/kdebug.h> 37 #include <asm/system.h> 38 #include <asm/traps.h> 39 40 /* Breakpoint currently in use for each BRP. */ 41 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 42 43 /* Watchpoint currently in use for each WRP. */ 44 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); 45 46 /* Number of BRP/WRP registers on this CPU. */ 47 static int core_num_brps; 48 static int core_num_reserved_brps; 49 static int core_num_wrps; 50 51 /* Debug architecture version. */ 52 static u8 debug_arch; 53 54 /* Maximum supported watchpoint length. */ 55 static u8 max_watchpoint_len; 56 57 #define READ_WB_REG_CASE(OP2, M, VAL) \ 58 case ((OP2 << 4) + M): \ 59 ARM_DBG_READ(c ## M, OP2, VAL); \ 60 break 61 62 #define WRITE_WB_REG_CASE(OP2, M, VAL) \ 63 case ((OP2 << 4) + M): \ 64 ARM_DBG_WRITE(c ## M, OP2, VAL);\ 65 break 66 67 #define GEN_READ_WB_REG_CASES(OP2, VAL) \ 68 READ_WB_REG_CASE(OP2, 0, VAL); \ 69 READ_WB_REG_CASE(OP2, 1, VAL); \ 70 READ_WB_REG_CASE(OP2, 2, VAL); \ 71 READ_WB_REG_CASE(OP2, 3, VAL); \ 72 READ_WB_REG_CASE(OP2, 4, VAL); \ 73 READ_WB_REG_CASE(OP2, 5, VAL); \ 74 READ_WB_REG_CASE(OP2, 6, VAL); \ 75 READ_WB_REG_CASE(OP2, 7, VAL); \ 76 READ_WB_REG_CASE(OP2, 8, VAL); \ 77 READ_WB_REG_CASE(OP2, 9, VAL); \ 78 READ_WB_REG_CASE(OP2, 10, VAL); \ 79 READ_WB_REG_CASE(OP2, 11, VAL); \ 80 READ_WB_REG_CASE(OP2, 12, VAL); \ 81 READ_WB_REG_CASE(OP2, 13, VAL); \ 82 READ_WB_REG_CASE(OP2, 14, VAL); \ 83 READ_WB_REG_CASE(OP2, 15, VAL) 84 85 #define GEN_WRITE_WB_REG_CASES(OP2, VAL) \ 86 WRITE_WB_REG_CASE(OP2, 0, VAL); \ 87 WRITE_WB_REG_CASE(OP2, 1, VAL); \ 88 WRITE_WB_REG_CASE(OP2, 2, VAL); \ 89 WRITE_WB_REG_CASE(OP2, 3, VAL); \ 90 WRITE_WB_REG_CASE(OP2, 4, VAL); \ 91 WRITE_WB_REG_CASE(OP2, 5, VAL); \ 92 WRITE_WB_REG_CASE(OP2, 6, VAL); \ 93 WRITE_WB_REG_CASE(OP2, 7, VAL); \ 94 WRITE_WB_REG_CASE(OP2, 8, VAL); \ 95 WRITE_WB_REG_CASE(OP2, 9, VAL); \ 96 WRITE_WB_REG_CASE(OP2, 10, VAL); \ 97 WRITE_WB_REG_CASE(OP2, 11, VAL); \ 98 WRITE_WB_REG_CASE(OP2, 12, VAL); \ 99 WRITE_WB_REG_CASE(OP2, 13, VAL); \ 100 WRITE_WB_REG_CASE(OP2, 14, VAL); \ 101 WRITE_WB_REG_CASE(OP2, 15, VAL) 102 103 static u32 read_wb_reg(int n) 104 { 105 u32 val = 0; 106 107 switch (n) { 108 GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val); 109 GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val); 110 GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); 111 GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); 112 default: 113 pr_warning("attempt to read from unknown breakpoint " 114 "register %d\n", n); 115 } 116 117 return val; 118 } 119 120 static void write_wb_reg(int n, u32 val) 121 { 122 switch (n) { 123 GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val); 124 GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val); 125 GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); 126 GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); 127 default: 128 pr_warning("attempt to write to unknown breakpoint " 129 "register %d\n", n); 130 } 131 isb(); 132 } 133 134 /* Determine debug architecture. */ 135 static u8 get_debug_arch(void) 136 { 137 u32 didr; 138 139 /* Do we implement the extended CPUID interface? */ 140 if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), 141 "CPUID feature registers not supported. " 142 "Assuming v6 debug is present.\n")) 143 return ARM_DEBUG_ARCH_V6; 144 145 ARM_DBG_READ(c0, 0, didr); 146 return (didr >> 16) & 0xf; 147 } 148 149 u8 arch_get_debug_arch(void) 150 { 151 return debug_arch; 152 } 153 154 static int debug_arch_supported(void) 155 { 156 u8 arch = get_debug_arch(); 157 return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; 158 } 159 160 /* Determine number of BRP register available. */ 161 static int get_num_brp_resources(void) 162 { 163 u32 didr; 164 ARM_DBG_READ(c0, 0, didr); 165 return ((didr >> 24) & 0xf) + 1; 166 } 167 168 /* Does this core support mismatch breakpoints? */ 169 static int core_has_mismatch_brps(void) 170 { 171 return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 && 172 get_num_brp_resources() > 1); 173 } 174 175 /* Determine number of usable WRPs available. */ 176 static int get_num_wrps(void) 177 { 178 /* 179 * FIXME: When a watchpoint fires, the only way to work out which 180 * watchpoint it was is by disassembling the faulting instruction 181 * and working out the address of the memory access. 182 * 183 * Furthermore, we can only do this if the watchpoint was precise 184 * since imprecise watchpoints prevent us from calculating register 185 * based addresses. 186 * 187 * Providing we have more than 1 breakpoint register, we only report 188 * a single watchpoint register for the time being. This way, we always 189 * know which watchpoint fired. In the future we can either add a 190 * disassembler and address generation emulator, or we can insert a 191 * check to see if the DFAR is set on watchpoint exception entry 192 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows 193 * that it is set on some implementations]. 194 */ 195 196 #if 0 197 int wrps; 198 u32 didr; 199 ARM_DBG_READ(c0, 0, didr); 200 wrps = ((didr >> 28) & 0xf) + 1; 201 #endif 202 int wrps = 1; 203 204 if (core_has_mismatch_brps() && wrps >= get_num_brp_resources()) 205 wrps = get_num_brp_resources() - 1; 206 207 return wrps; 208 } 209 210 /* We reserve one breakpoint for each watchpoint. */ 211 static int get_num_reserved_brps(void) 212 { 213 if (core_has_mismatch_brps()) 214 return get_num_wrps(); 215 return 0; 216 } 217 218 /* Determine number of usable BRPs available. */ 219 static int get_num_brps(void) 220 { 221 int brps = get_num_brp_resources(); 222 if (core_has_mismatch_brps()) 223 brps -= get_num_reserved_brps(); 224 return brps; 225 } 226 227 /* 228 * In order to access the breakpoint/watchpoint control registers, 229 * we must be running in debug monitor mode. Unfortunately, we can 230 * be put into halting debug mode at any time by an external debugger 231 * but there is nothing we can do to prevent that. 232 */ 233 static int enable_monitor_mode(void) 234 { 235 u32 dscr; 236 int ret = 0; 237 238 ARM_DBG_READ(c1, 0, dscr); 239 240 /* Ensure that halting mode is disabled. */ 241 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, 242 "halting debug mode enabled. Unable to access hardware resources.\n")) { 243 ret = -EPERM; 244 goto out; 245 } 246 247 /* If monitor mode is already enabled, just return. */ 248 if (dscr & ARM_DSCR_MDBGEN) 249 goto out; 250 251 /* Write to the corresponding DSCR. */ 252 switch (get_debug_arch()) { 253 case ARM_DEBUG_ARCH_V6: 254 case ARM_DEBUG_ARCH_V6_1: 255 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); 256 break; 257 case ARM_DEBUG_ARCH_V7_ECP14: 258 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); 259 break; 260 default: 261 ret = -ENODEV; 262 goto out; 263 } 264 265 /* Check that the write made it through. */ 266 ARM_DBG_READ(c1, 0, dscr); 267 if (!(dscr & ARM_DSCR_MDBGEN)) 268 ret = -EPERM; 269 270 out: 271 return ret; 272 } 273 274 int hw_breakpoint_slots(int type) 275 { 276 if (!debug_arch_supported()) 277 return 0; 278 279 /* 280 * We can be called early, so don't rely on 281 * our static variables being initialised. 282 */ 283 switch (type) { 284 case TYPE_INST: 285 return get_num_brps(); 286 case TYPE_DATA: 287 return get_num_wrps(); 288 default: 289 pr_warning("unknown slot type: %d\n", type); 290 return 0; 291 } 292 } 293 294 /* 295 * Check if 8-bit byte-address select is available. 296 * This clobbers WRP 0. 297 */ 298 static u8 get_max_wp_len(void) 299 { 300 u32 ctrl_reg; 301 struct arch_hw_breakpoint_ctrl ctrl; 302 u8 size = 4; 303 304 if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) 305 goto out; 306 307 memset(&ctrl, 0, sizeof(ctrl)); 308 ctrl.len = ARM_BREAKPOINT_LEN_8; 309 ctrl_reg = encode_ctrl_reg(ctrl); 310 311 write_wb_reg(ARM_BASE_WVR, 0); 312 write_wb_reg(ARM_BASE_WCR, ctrl_reg); 313 if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg) 314 size = 8; 315 316 out: 317 return size; 318 } 319 320 u8 arch_get_max_wp_len(void) 321 { 322 return max_watchpoint_len; 323 } 324 325 /* 326 * Install a perf counter breakpoint. 327 */ 328 int arch_install_hw_breakpoint(struct perf_event *bp) 329 { 330 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 331 struct perf_event **slot, **slots; 332 int i, max_slots, ctrl_base, val_base, ret = 0; 333 u32 addr, ctrl; 334 335 /* Ensure that we are in monitor mode and halting mode is disabled. */ 336 ret = enable_monitor_mode(); 337 if (ret) 338 goto out; 339 340 addr = info->address; 341 ctrl = encode_ctrl_reg(info->ctrl) | 0x1; 342 343 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 344 /* Breakpoint */ 345 ctrl_base = ARM_BASE_BCR; 346 val_base = ARM_BASE_BVR; 347 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 348 max_slots = core_num_brps; 349 if (info->step_ctrl.enabled) { 350 /* Override the breakpoint data with the step data. */ 351 addr = info->trigger & ~0x3; 352 ctrl = encode_ctrl_reg(info->step_ctrl); 353 } 354 } else { 355 /* Watchpoint */ 356 if (info->step_ctrl.enabled) { 357 /* Install into the reserved breakpoint region. */ 358 ctrl_base = ARM_BASE_BCR + core_num_brps; 359 val_base = ARM_BASE_BVR + core_num_brps; 360 /* Override the watchpoint data with the step data. */ 361 addr = info->trigger & ~0x3; 362 ctrl = encode_ctrl_reg(info->step_ctrl); 363 } else { 364 ctrl_base = ARM_BASE_WCR; 365 val_base = ARM_BASE_WVR; 366 } 367 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 368 max_slots = core_num_wrps; 369 } 370 371 for (i = 0; i < max_slots; ++i) { 372 slot = &slots[i]; 373 374 if (!*slot) { 375 *slot = bp; 376 break; 377 } 378 } 379 380 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { 381 ret = -EBUSY; 382 goto out; 383 } 384 385 /* Setup the address register. */ 386 write_wb_reg(val_base + i, addr); 387 388 /* Setup the control register. */ 389 write_wb_reg(ctrl_base + i, ctrl); 390 391 out: 392 return ret; 393 } 394 395 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 396 { 397 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 398 struct perf_event **slot, **slots; 399 int i, max_slots, base; 400 401 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 402 /* Breakpoint */ 403 base = ARM_BASE_BCR; 404 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 405 max_slots = core_num_brps; 406 } else { 407 /* Watchpoint */ 408 if (info->step_ctrl.enabled) 409 base = ARM_BASE_BCR + core_num_brps; 410 else 411 base = ARM_BASE_WCR; 412 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 413 max_slots = core_num_wrps; 414 } 415 416 /* Remove the breakpoint. */ 417 for (i = 0; i < max_slots; ++i) { 418 slot = &slots[i]; 419 420 if (*slot == bp) { 421 *slot = NULL; 422 break; 423 } 424 } 425 426 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) 427 return; 428 429 /* Reset the control register. */ 430 write_wb_reg(base + i, 0); 431 } 432 433 static int get_hbp_len(u8 hbp_len) 434 { 435 unsigned int len_in_bytes = 0; 436 437 switch (hbp_len) { 438 case ARM_BREAKPOINT_LEN_1: 439 len_in_bytes = 1; 440 break; 441 case ARM_BREAKPOINT_LEN_2: 442 len_in_bytes = 2; 443 break; 444 case ARM_BREAKPOINT_LEN_4: 445 len_in_bytes = 4; 446 break; 447 case ARM_BREAKPOINT_LEN_8: 448 len_in_bytes = 8; 449 break; 450 } 451 452 return len_in_bytes; 453 } 454 455 /* 456 * Check whether bp virtual address is in kernel space. 457 */ 458 int arch_check_bp_in_kernelspace(struct perf_event *bp) 459 { 460 unsigned int len; 461 unsigned long va; 462 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 463 464 va = info->address; 465 len = get_hbp_len(info->ctrl.len); 466 467 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 468 } 469 470 /* 471 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. 472 * Hopefully this will disappear when ptrace can bypass the conversion 473 * to generic breakpoint descriptions. 474 */ 475 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 476 int *gen_len, int *gen_type) 477 { 478 /* Type */ 479 switch (ctrl.type) { 480 case ARM_BREAKPOINT_EXECUTE: 481 *gen_type = HW_BREAKPOINT_X; 482 break; 483 case ARM_BREAKPOINT_LOAD: 484 *gen_type = HW_BREAKPOINT_R; 485 break; 486 case ARM_BREAKPOINT_STORE: 487 *gen_type = HW_BREAKPOINT_W; 488 break; 489 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: 490 *gen_type = HW_BREAKPOINT_RW; 491 break; 492 default: 493 return -EINVAL; 494 } 495 496 /* Len */ 497 switch (ctrl.len) { 498 case ARM_BREAKPOINT_LEN_1: 499 *gen_len = HW_BREAKPOINT_LEN_1; 500 break; 501 case ARM_BREAKPOINT_LEN_2: 502 *gen_len = HW_BREAKPOINT_LEN_2; 503 break; 504 case ARM_BREAKPOINT_LEN_4: 505 *gen_len = HW_BREAKPOINT_LEN_4; 506 break; 507 case ARM_BREAKPOINT_LEN_8: 508 *gen_len = HW_BREAKPOINT_LEN_8; 509 break; 510 default: 511 return -EINVAL; 512 } 513 514 return 0; 515 } 516 517 /* 518 * Construct an arch_hw_breakpoint from a perf_event. 519 */ 520 static int arch_build_bp_info(struct perf_event *bp) 521 { 522 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 523 524 /* Type */ 525 switch (bp->attr.bp_type) { 526 case HW_BREAKPOINT_X: 527 info->ctrl.type = ARM_BREAKPOINT_EXECUTE; 528 break; 529 case HW_BREAKPOINT_R: 530 info->ctrl.type = ARM_BREAKPOINT_LOAD; 531 break; 532 case HW_BREAKPOINT_W: 533 info->ctrl.type = ARM_BREAKPOINT_STORE; 534 break; 535 case HW_BREAKPOINT_RW: 536 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; 537 break; 538 default: 539 return -EINVAL; 540 } 541 542 /* Len */ 543 switch (bp->attr.bp_len) { 544 case HW_BREAKPOINT_LEN_1: 545 info->ctrl.len = ARM_BREAKPOINT_LEN_1; 546 break; 547 case HW_BREAKPOINT_LEN_2: 548 info->ctrl.len = ARM_BREAKPOINT_LEN_2; 549 break; 550 case HW_BREAKPOINT_LEN_4: 551 info->ctrl.len = ARM_BREAKPOINT_LEN_4; 552 break; 553 case HW_BREAKPOINT_LEN_8: 554 info->ctrl.len = ARM_BREAKPOINT_LEN_8; 555 if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) 556 && max_watchpoint_len >= 8) 557 break; 558 default: 559 return -EINVAL; 560 } 561 562 /* 563 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes. 564 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported 565 * by the hardware and must be aligned to the appropriate number of 566 * bytes. 567 */ 568 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE && 569 info->ctrl.len != ARM_BREAKPOINT_LEN_2 && 570 info->ctrl.len != ARM_BREAKPOINT_LEN_4) 571 return -EINVAL; 572 573 /* Address */ 574 info->address = bp->attr.bp_addr; 575 576 /* Privilege */ 577 info->ctrl.privilege = ARM_BREAKPOINT_USER; 578 if (arch_check_bp_in_kernelspace(bp)) 579 info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; 580 581 /* Enabled? */ 582 info->ctrl.enabled = !bp->attr.disabled; 583 584 /* Mismatch */ 585 info->ctrl.mismatch = 0; 586 587 return 0; 588 } 589 590 /* 591 * Validate the arch-specific HW Breakpoint register settings. 592 */ 593 int arch_validate_hwbkpt_settings(struct perf_event *bp) 594 { 595 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 596 int ret = 0; 597 u32 offset, alignment_mask = 0x3; 598 599 /* Build the arch_hw_breakpoint. */ 600 ret = arch_build_bp_info(bp); 601 if (ret) 602 goto out; 603 604 /* Check address alignment. */ 605 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 606 alignment_mask = 0x7; 607 offset = info->address & alignment_mask; 608 switch (offset) { 609 case 0: 610 /* Aligned */ 611 break; 612 case 1: 613 /* Allow single byte watchpoint. */ 614 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) 615 break; 616 case 2: 617 /* Allow halfword watchpoints and breakpoints. */ 618 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) 619 break; 620 default: 621 ret = -EINVAL; 622 goto out; 623 } 624 625 info->address &= ~alignment_mask; 626 info->ctrl.len <<= offset; 627 628 /* 629 * Currently we rely on an overflow handler to take 630 * care of single-stepping the breakpoint when it fires. 631 * In the case of userspace breakpoints on a core with V7 debug, 632 * we can use the mismatch feature as a poor-man's hardware 633 * single-step, but this only works for per-task breakpoints. 634 */ 635 if (WARN_ONCE(!bp->overflow_handler && 636 (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() 637 || !bp->hw.bp_target), 638 "overflow handler required but none found\n")) { 639 ret = -EINVAL; 640 } 641 out: 642 return ret; 643 } 644 645 /* 646 * Enable/disable single-stepping over the breakpoint bp at address addr. 647 */ 648 static void enable_single_step(struct perf_event *bp, u32 addr) 649 { 650 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 651 652 arch_uninstall_hw_breakpoint(bp); 653 info->step_ctrl.mismatch = 1; 654 info->step_ctrl.len = ARM_BREAKPOINT_LEN_4; 655 info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE; 656 info->step_ctrl.privilege = info->ctrl.privilege; 657 info->step_ctrl.enabled = 1; 658 info->trigger = addr; 659 arch_install_hw_breakpoint(bp); 660 } 661 662 static void disable_single_step(struct perf_event *bp) 663 { 664 arch_uninstall_hw_breakpoint(bp); 665 counter_arch_bp(bp)->step_ctrl.enabled = 0; 666 arch_install_hw_breakpoint(bp); 667 } 668 669 static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) 670 { 671 int i; 672 struct perf_event *wp, **slots; 673 struct arch_hw_breakpoint *info; 674 675 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 676 677 /* Without a disassembler, we can only handle 1 watchpoint. */ 678 BUG_ON(core_num_wrps > 1); 679 680 for (i = 0; i < core_num_wrps; ++i) { 681 rcu_read_lock(); 682 683 wp = slots[i]; 684 685 if (wp == NULL) { 686 rcu_read_unlock(); 687 continue; 688 } 689 690 /* 691 * The DFAR is an unknown value. Since we only allow a 692 * single watchpoint, we can set the trigger to the lowest 693 * possible faulting address. 694 */ 695 info = counter_arch_bp(wp); 696 info->trigger = wp->attr.bp_addr; 697 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); 698 perf_bp_event(wp, regs); 699 700 /* 701 * If no overflow handler is present, insert a temporary 702 * mismatch breakpoint so we can single-step over the 703 * watchpoint trigger. 704 */ 705 if (!wp->overflow_handler) 706 enable_single_step(wp, instruction_pointer(regs)); 707 708 rcu_read_unlock(); 709 } 710 } 711 712 static void watchpoint_single_step_handler(unsigned long pc) 713 { 714 int i; 715 struct perf_event *wp, **slots; 716 struct arch_hw_breakpoint *info; 717 718 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 719 720 for (i = 0; i < core_num_reserved_brps; ++i) { 721 rcu_read_lock(); 722 723 wp = slots[i]; 724 725 if (wp == NULL) 726 goto unlock; 727 728 info = counter_arch_bp(wp); 729 if (!info->step_ctrl.enabled) 730 goto unlock; 731 732 /* 733 * Restore the original watchpoint if we've completed the 734 * single-step. 735 */ 736 if (info->trigger != pc) 737 disable_single_step(wp); 738 739 unlock: 740 rcu_read_unlock(); 741 } 742 } 743 744 static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) 745 { 746 int i; 747 u32 ctrl_reg, val, addr; 748 struct perf_event *bp, **slots; 749 struct arch_hw_breakpoint *info; 750 struct arch_hw_breakpoint_ctrl ctrl; 751 752 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 753 754 /* The exception entry code places the amended lr in the PC. */ 755 addr = regs->ARM_pc; 756 757 /* Check the currently installed breakpoints first. */ 758 for (i = 0; i < core_num_brps; ++i) { 759 rcu_read_lock(); 760 761 bp = slots[i]; 762 763 if (bp == NULL) 764 goto unlock; 765 766 info = counter_arch_bp(bp); 767 768 /* Check if the breakpoint value matches. */ 769 val = read_wb_reg(ARM_BASE_BVR + i); 770 if (val != (addr & ~0x3)) 771 goto mismatch; 772 773 /* Possible match, check the byte address select to confirm. */ 774 ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); 775 decode_ctrl_reg(ctrl_reg, &ctrl); 776 if ((1 << (addr & 0x3)) & ctrl.len) { 777 info->trigger = addr; 778 pr_debug("breakpoint fired: address = 0x%x\n", addr); 779 perf_bp_event(bp, regs); 780 if (!bp->overflow_handler) 781 enable_single_step(bp, addr); 782 goto unlock; 783 } 784 785 mismatch: 786 /* If we're stepping a breakpoint, it can now be restored. */ 787 if (info->step_ctrl.enabled) 788 disable_single_step(bp); 789 unlock: 790 rcu_read_unlock(); 791 } 792 793 /* Handle any pending watchpoint single-step breakpoints. */ 794 watchpoint_single_step_handler(addr); 795 } 796 797 /* 798 * Called from either the Data Abort Handler [watchpoint] or the 799 * Prefetch Abort Handler [breakpoint] with interrupts disabled. 800 */ 801 static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, 802 struct pt_regs *regs) 803 { 804 int ret = 0; 805 u32 dscr; 806 807 preempt_disable(); 808 809 if (interrupts_enabled(regs)) 810 local_irq_enable(); 811 812 /* We only handle watchpoints and hardware breakpoints. */ 813 ARM_DBG_READ(c1, 0, dscr); 814 815 /* Perform perf callbacks. */ 816 switch (ARM_DSCR_MOE(dscr)) { 817 case ARM_ENTRY_BREAKPOINT: 818 breakpoint_handler(addr, regs); 819 break; 820 case ARM_ENTRY_ASYNC_WATCHPOINT: 821 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); 822 case ARM_ENTRY_SYNC_WATCHPOINT: 823 watchpoint_handler(addr, regs); 824 break; 825 default: 826 ret = 1; /* Unhandled fault. */ 827 } 828 829 preempt_enable(); 830 831 return ret; 832 } 833 834 /* 835 * One-time initialisation. 836 */ 837 static void reset_ctrl_regs(void *info) 838 { 839 int i, cpu = smp_processor_id(); 840 u32 dbg_power; 841 cpumask_t *cpumask = info; 842 843 /* 844 * v7 debug contains save and restore registers so that debug state 845 * can be maintained across low-power modes without leaving the debug 846 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access 847 * the debug registers out of reset, so we must unlock the OS Lock 848 * Access Register to avoid taking undefined instruction exceptions 849 * later on. 850 */ 851 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 852 /* 853 * Ensure sticky power-down is clear (i.e. debug logic is 854 * powered up). 855 */ 856 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); 857 if ((dbg_power & 0x1) == 0) { 858 pr_warning("CPU %d debug is powered down!\n", cpu); 859 cpumask_or(cpumask, cpumask, cpumask_of(cpu)); 860 return; 861 } 862 863 /* 864 * Unconditionally clear the lock by writing a value 865 * other than 0xC5ACCE55 to the access register. 866 */ 867 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 868 isb(); 869 870 /* 871 * Clear any configured vector-catch events before 872 * enabling monitor mode. 873 */ 874 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); 875 isb(); 876 } 877 878 if (enable_monitor_mode()) 879 return; 880 881 /* We must also reset any reserved registers. */ 882 for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) { 883 write_wb_reg(ARM_BASE_BCR + i, 0UL); 884 write_wb_reg(ARM_BASE_BVR + i, 0UL); 885 } 886 887 for (i = 0; i < core_num_wrps; ++i) { 888 write_wb_reg(ARM_BASE_WCR + i, 0UL); 889 write_wb_reg(ARM_BASE_WVR + i, 0UL); 890 } 891 } 892 893 static int __cpuinit dbg_reset_notify(struct notifier_block *self, 894 unsigned long action, void *cpu) 895 { 896 if (action == CPU_ONLINE) 897 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); 898 return NOTIFY_OK; 899 } 900 901 static struct notifier_block __cpuinitdata dbg_reset_nb = { 902 .notifier_call = dbg_reset_notify, 903 }; 904 905 static int __init arch_hw_breakpoint_init(void) 906 { 907 u32 dscr; 908 cpumask_t cpumask = { CPU_BITS_NONE }; 909 910 debug_arch = get_debug_arch(); 911 912 if (!debug_arch_supported()) { 913 pr_info("debug architecture 0x%x unsupported.\n", debug_arch); 914 return 0; 915 } 916 917 /* Determine how many BRPs/WRPs are available. */ 918 core_num_brps = get_num_brps(); 919 core_num_reserved_brps = get_num_reserved_brps(); 920 core_num_wrps = get_num_wrps(); 921 922 pr_info("found %d breakpoint and %d watchpoint registers.\n", 923 core_num_brps + core_num_reserved_brps, core_num_wrps); 924 925 if (core_num_reserved_brps) 926 pr_info("%d breakpoint(s) reserved for watchpoint " 927 "single-step.\n", core_num_reserved_brps); 928 929 /* 930 * Reset the breakpoint resources. We assume that a halting 931 * debugger will leave the world in a nice state for us. 932 */ 933 on_each_cpu(reset_ctrl_regs, &cpumask, 1); 934 if (!cpumask_empty(&cpumask)) { 935 core_num_brps = 0; 936 core_num_reserved_brps = 0; 937 core_num_wrps = 0; 938 return 0; 939 } 940 941 ARM_DBG_READ(c1, 0, dscr); 942 if (dscr & ARM_DSCR_HDBGEN) { 943 max_watchpoint_len = 4; 944 pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n", 945 max_watchpoint_len); 946 } else { 947 /* Work out the maximum supported watchpoint length. */ 948 max_watchpoint_len = get_max_wp_len(); 949 pr_info("maximum watchpoint size is %u bytes.\n", 950 max_watchpoint_len); 951 } 952 953 /* Register debug fault handler. */ 954 hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, 955 "watchpoint debug exception"); 956 hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, 957 "breakpoint debug exception"); 958 959 /* Register hotplug notifier. */ 960 register_cpu_notifier(&dbg_reset_nb); 961 return 0; 962 } 963 arch_initcall(arch_hw_breakpoint_init); 964 965 void hw_breakpoint_pmu_read(struct perf_event *bp) 966 { 967 } 968 969 /* 970 * Dummy function to register with die_notifier. 971 */ 972 int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 973 unsigned long val, void *data) 974 { 975 return NOTIFY_DONE; 976 } 977