1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 4 * using the CPU's debug registers. Derived from 5 * "arch/x86/kernel/hw_breakpoint.c" 6 * 7 * Copyright 2010 IBM Corporation 8 * Author: K.Prasad <prasad@linux.vnet.ibm.com> 9 */ 10 11 #include <linux/hw_breakpoint.h> 12 #include <linux/notifier.h> 13 #include <linux/kprobes.h> 14 #include <linux/percpu.h> 15 #include <linux/kernel.h> 16 #include <linux/sched.h> 17 #include <linux/smp.h> 18 #include <linux/spinlock.h> 19 #include <linux/debugfs.h> 20 #include <linux/init.h> 21 22 #include <asm/hw_breakpoint.h> 23 #include <asm/processor.h> 24 #include <asm/sstep.h> 25 #include <asm/debug.h> 26 #include <asm/hvcall.h> 27 #include <asm/inst.h> 28 #include <linux/uaccess.h> 29 30 /* 31 * Stores the breakpoints currently in use on each breakpoint address 32 * register for every cpu 33 */ 34 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]); 35 36 /* 37 * Returns total number of data or instruction breakpoints available. 38 */ 39 int hw_breakpoint_slots(int type) 40 { 41 if (type == TYPE_DATA) 42 return nr_wp_slots(); 43 return 0; /* no instruction breakpoints available */ 44 } 45 46 47 /* 48 * Install a perf counter breakpoint. 49 * 50 * We seek a free debug address register and use it for this 51 * breakpoint. 52 * 53 * Atomic: we hold the counter->ctx->lock and we only handle variables 54 * and registers local to this cpu. 55 */ 56 int arch_install_hw_breakpoint(struct perf_event *bp) 57 { 58 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 59 struct perf_event **slot; 60 int i; 61 62 for (i = 0; i < nr_wp_slots(); i++) { 63 slot = this_cpu_ptr(&bp_per_reg[i]); 64 if (!*slot) { 65 *slot = bp; 66 break; 67 } 68 } 69 70 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 71 return -EBUSY; 72 73 /* 74 * Do not install DABR values if the instruction must be single-stepped. 75 * If so, DABR will be populated in single_step_dabr_instruction(). 76 */ 77 if (!info->perf_single_step) 78 __set_breakpoint(i, info); 79 80 return 0; 81 } 82 83 /* 84 * Uninstall the breakpoint contained in the given counter. 85 * 86 * First we search the debug address register it uses and then we disable 87 * it. 88 * 89 * Atomic: we hold the counter->ctx->lock and we only handle variables 90 * and registers local to this cpu. 91 */ 92 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 93 { 94 struct arch_hw_breakpoint null_brk = {0}; 95 struct perf_event **slot; 96 int i; 97 98 for (i = 0; i < nr_wp_slots(); i++) { 99 slot = this_cpu_ptr(&bp_per_reg[i]); 100 if (*slot == bp) { 101 *slot = NULL; 102 break; 103 } 104 } 105 106 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 107 return; 108 109 __set_breakpoint(i, &null_brk); 110 } 111 112 static bool is_ptrace_bp(struct perf_event *bp) 113 { 114 return bp->overflow_handler == ptrace_triggered; 115 } 116 117 struct breakpoint { 118 struct list_head list; 119 struct perf_event *bp; 120 bool ptrace_bp; 121 }; 122 123 /* 124 * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot 125 * rely on it safely synchronizing internals here; however, we can rely on it 126 * not requesting more breakpoints than available. 127 */ 128 static DEFINE_SPINLOCK(cpu_bps_lock); 129 static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); 130 static DEFINE_SPINLOCK(task_bps_lock); 131 static LIST_HEAD(task_bps); 132 133 static struct breakpoint *alloc_breakpoint(struct perf_event *bp) 134 { 135 struct breakpoint *tmp; 136 137 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 138 if (!tmp) 139 return ERR_PTR(-ENOMEM); 140 tmp->bp = bp; 141 tmp->ptrace_bp = is_ptrace_bp(bp); 142 return tmp; 143 } 144 145 static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2) 146 { 147 __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr; 148 149 bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE); 150 bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE); 151 bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE); 152 bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE); 153 154 return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr); 155 } 156 157 static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp) 158 { 159 return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp; 160 } 161 162 static bool can_co_exist(struct breakpoint *b, struct perf_event *bp) 163 { 164 return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp)); 165 } 166 167 static int task_bps_add(struct perf_event *bp) 168 { 169 struct breakpoint *tmp; 170 171 tmp = alloc_breakpoint(bp); 172 if (IS_ERR(tmp)) 173 return PTR_ERR(tmp); 174 175 spin_lock(&task_bps_lock); 176 list_add(&tmp->list, &task_bps); 177 spin_unlock(&task_bps_lock); 178 return 0; 179 } 180 181 static void task_bps_remove(struct perf_event *bp) 182 { 183 struct list_head *pos, *q; 184 185 spin_lock(&task_bps_lock); 186 list_for_each_safe(pos, q, &task_bps) { 187 struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); 188 189 if (tmp->bp == bp) { 190 list_del(&tmp->list); 191 kfree(tmp); 192 break; 193 } 194 } 195 spin_unlock(&task_bps_lock); 196 } 197 198 /* 199 * If any task has breakpoint from alternate infrastructure, 200 * return true. Otherwise return false. 201 */ 202 static bool all_task_bps_check(struct perf_event *bp) 203 { 204 struct breakpoint *tmp; 205 bool ret = false; 206 207 spin_lock(&task_bps_lock); 208 list_for_each_entry(tmp, &task_bps, list) { 209 if (!can_co_exist(tmp, bp)) { 210 ret = true; 211 break; 212 } 213 } 214 spin_unlock(&task_bps_lock); 215 return ret; 216 } 217 218 /* 219 * If same task has breakpoint from alternate infrastructure, 220 * return true. Otherwise return false. 221 */ 222 static bool same_task_bps_check(struct perf_event *bp) 223 { 224 struct breakpoint *tmp; 225 bool ret = false; 226 227 spin_lock(&task_bps_lock); 228 list_for_each_entry(tmp, &task_bps, list) { 229 if (tmp->bp->hw.target == bp->hw.target && 230 !can_co_exist(tmp, bp)) { 231 ret = true; 232 break; 233 } 234 } 235 spin_unlock(&task_bps_lock); 236 return ret; 237 } 238 239 static int cpu_bps_add(struct perf_event *bp) 240 { 241 struct breakpoint **cpu_bp; 242 struct breakpoint *tmp; 243 int i = 0; 244 245 tmp = alloc_breakpoint(bp); 246 if (IS_ERR(tmp)) 247 return PTR_ERR(tmp); 248 249 spin_lock(&cpu_bps_lock); 250 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 251 for (i = 0; i < nr_wp_slots(); i++) { 252 if (!cpu_bp[i]) { 253 cpu_bp[i] = tmp; 254 break; 255 } 256 } 257 spin_unlock(&cpu_bps_lock); 258 return 0; 259 } 260 261 static void cpu_bps_remove(struct perf_event *bp) 262 { 263 struct breakpoint **cpu_bp; 264 int i = 0; 265 266 spin_lock(&cpu_bps_lock); 267 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 268 for (i = 0; i < nr_wp_slots(); i++) { 269 if (!cpu_bp[i]) 270 continue; 271 272 if (cpu_bp[i]->bp == bp) { 273 kfree(cpu_bp[i]); 274 cpu_bp[i] = NULL; 275 break; 276 } 277 } 278 spin_unlock(&cpu_bps_lock); 279 } 280 281 static bool cpu_bps_check(int cpu, struct perf_event *bp) 282 { 283 struct breakpoint **cpu_bp; 284 bool ret = false; 285 int i; 286 287 spin_lock(&cpu_bps_lock); 288 cpu_bp = per_cpu_ptr(cpu_bps, cpu); 289 for (i = 0; i < nr_wp_slots(); i++) { 290 if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) { 291 ret = true; 292 break; 293 } 294 } 295 spin_unlock(&cpu_bps_lock); 296 return ret; 297 } 298 299 static bool all_cpu_bps_check(struct perf_event *bp) 300 { 301 int cpu; 302 303 for_each_online_cpu(cpu) { 304 if (cpu_bps_check(cpu, bp)) 305 return true; 306 } 307 return false; 308 } 309 310 int arch_reserve_bp_slot(struct perf_event *bp) 311 { 312 int ret; 313 314 /* ptrace breakpoint */ 315 if (is_ptrace_bp(bp)) { 316 if (all_cpu_bps_check(bp)) 317 return -ENOSPC; 318 319 if (same_task_bps_check(bp)) 320 return -ENOSPC; 321 322 return task_bps_add(bp); 323 } 324 325 /* perf breakpoint */ 326 if (is_kernel_addr(bp->attr.bp_addr)) 327 return 0; 328 329 if (bp->hw.target && bp->cpu == -1) { 330 if (same_task_bps_check(bp)) 331 return -ENOSPC; 332 333 return task_bps_add(bp); 334 } else if (!bp->hw.target && bp->cpu != -1) { 335 if (all_task_bps_check(bp)) 336 return -ENOSPC; 337 338 return cpu_bps_add(bp); 339 } 340 341 if (same_task_bps_check(bp)) 342 return -ENOSPC; 343 344 ret = cpu_bps_add(bp); 345 if (ret) 346 return ret; 347 ret = task_bps_add(bp); 348 if (ret) 349 cpu_bps_remove(bp); 350 351 return ret; 352 } 353 354 void arch_release_bp_slot(struct perf_event *bp) 355 { 356 if (!is_kernel_addr(bp->attr.bp_addr)) { 357 if (bp->hw.target) 358 task_bps_remove(bp); 359 if (bp->cpu != -1) 360 cpu_bps_remove(bp); 361 } 362 } 363 364 /* 365 * Check for virtual address in kernel space. 366 */ 367 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 368 { 369 return is_kernel_addr(hw->address); 370 } 371 372 int arch_bp_generic_fields(int type, int *gen_bp_type) 373 { 374 *gen_bp_type = 0; 375 if (type & HW_BRK_TYPE_READ) 376 *gen_bp_type |= HW_BREAKPOINT_R; 377 if (type & HW_BRK_TYPE_WRITE) 378 *gen_bp_type |= HW_BREAKPOINT_W; 379 if (*gen_bp_type == 0) 380 return -EINVAL; 381 return 0; 382 } 383 384 /* 385 * Watchpoint match range is always doubleword(8 bytes) aligned on 386 * powerpc. If the given range is crossing doubleword boundary, we 387 * need to increase the length such that next doubleword also get 388 * covered. Ex, 389 * 390 * address len = 6 bytes 391 * |=========. 392 * |------------v--|------v--------| 393 * | | | | | | | | | | | | | | | | | 394 * |---------------|---------------| 395 * <---8 bytes---> 396 * 397 * In this case, we should configure hw as: 398 * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1) 399 * len = 16 bytes 400 * 401 * @start_addr is inclusive but @end_addr is exclusive. 402 */ 403 static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) 404 { 405 u16 max_len = DABR_MAX_LEN; 406 u16 hw_len; 407 unsigned long start_addr, end_addr; 408 409 start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE); 410 end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE); 411 hw_len = end_addr - start_addr; 412 413 if (dawr_enabled()) { 414 max_len = DAWR_MAX_LEN; 415 /* DAWR region can't cross 512 bytes boundary on p10 predecessors */ 416 if (!cpu_has_feature(CPU_FTR_ARCH_31) && 417 (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512))) 418 return -EINVAL; 419 } else if (IS_ENABLED(CONFIG_PPC_8xx)) { 420 /* 8xx can setup a range without limitation */ 421 max_len = U16_MAX; 422 } 423 424 if (hw_len > max_len) 425 return -EINVAL; 426 427 hw->hw_len = hw_len; 428 return 0; 429 } 430 431 /* 432 * Validate the arch-specific HW Breakpoint register settings 433 */ 434 int hw_breakpoint_arch_parse(struct perf_event *bp, 435 const struct perf_event_attr *attr, 436 struct arch_hw_breakpoint *hw) 437 { 438 int ret = -EINVAL; 439 440 if (!bp || !attr->bp_len) 441 return ret; 442 443 hw->type = HW_BRK_TYPE_TRANSLATE; 444 if (attr->bp_type & HW_BREAKPOINT_R) 445 hw->type |= HW_BRK_TYPE_READ; 446 if (attr->bp_type & HW_BREAKPOINT_W) 447 hw->type |= HW_BRK_TYPE_WRITE; 448 if (hw->type == HW_BRK_TYPE_TRANSLATE) 449 /* must set alteast read or write */ 450 return ret; 451 if (!attr->exclude_user) 452 hw->type |= HW_BRK_TYPE_USER; 453 if (!attr->exclude_kernel) 454 hw->type |= HW_BRK_TYPE_KERNEL; 455 if (!attr->exclude_hv) 456 hw->type |= HW_BRK_TYPE_HYP; 457 hw->address = attr->bp_addr; 458 hw->len = attr->bp_len; 459 460 if (!ppc_breakpoint_available()) 461 return -ENODEV; 462 463 return hw_breakpoint_validate_len(hw); 464 } 465 466 /* 467 * Restores the breakpoint on the debug registers. 468 * Invoke this function if it is known that the execution context is 469 * about to change to cause loss of MSR_SE settings. 470 * 471 * The perf watchpoint will simply re-trigger once the thread is started again, 472 * and the watchpoint handler will set up MSR_SE and perf_single_step as 473 * needed. 474 */ 475 void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) 476 { 477 struct arch_hw_breakpoint *info; 478 int i; 479 480 for (i = 0; i < nr_wp_slots(); i++) { 481 struct perf_event *bp = __this_cpu_read(bp_per_reg[i]); 482 483 if (unlikely(bp && counter_arch_bp(bp)->perf_single_step)) 484 goto reset; 485 } 486 return; 487 488 reset: 489 regs_set_return_msr(regs, regs->msr & ~MSR_SE); 490 for (i = 0; i < nr_wp_slots(); i++) { 491 info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); 492 __set_breakpoint(i, info); 493 info->perf_single_step = false; 494 } 495 } 496 497 static bool is_larx_stcx_instr(int type) 498 { 499 return type == LARX || type == STCX; 500 } 501 502 static bool is_octword_vsx_instr(int type, int size) 503 { 504 return ((type == LOAD_VSX || type == STORE_VSX) && size == 32); 505 } 506 507 /* 508 * We've failed in reliably handling the hw-breakpoint. Unregister 509 * it and throw a warning message to let the user know about it. 510 */ 511 static void handler_error(struct perf_event *bp) 512 { 513 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.", 514 counter_arch_bp(bp)->address); 515 perf_event_disable_inatomic(bp); 516 } 517 518 static void larx_stcx_err(struct perf_event *bp) 519 { 520 printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n", 521 counter_arch_bp(bp)->address); 522 perf_event_disable_inatomic(bp); 523 } 524 525 static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, 526 int *hit, ppc_inst_t instr) 527 { 528 int i; 529 int stepped; 530 531 /* Do not emulate user-space instructions, instead single-step them */ 532 if (user_mode(regs)) { 533 for (i = 0; i < nr_wp_slots(); i++) { 534 if (!hit[i]) 535 continue; 536 537 counter_arch_bp(bp[i])->perf_single_step = true; 538 bp[i] = NULL; 539 } 540 regs_set_return_msr(regs, regs->msr | MSR_SE); 541 return false; 542 } 543 544 stepped = emulate_step(regs, instr); 545 if (!stepped) { 546 for (i = 0; i < nr_wp_slots(); i++) { 547 if (!hit[i]) 548 continue; 549 handler_error(bp[i]); 550 bp[i] = NULL; 551 } 552 return false; 553 } 554 return true; 555 } 556 557 static void handle_p10dd1_spurious_exception(struct perf_event **bp, 558 int *hit, unsigned long ea) 559 { 560 int i; 561 unsigned long hw_end_addr; 562 563 /* 564 * Handle spurious exception only when any bp_per_reg is set. 565 * Otherwise this might be created by xmon and not actually a 566 * spurious exception. 567 */ 568 for (i = 0; i < nr_wp_slots(); i++) { 569 struct arch_hw_breakpoint *info; 570 571 if (!bp[i]) 572 continue; 573 574 info = counter_arch_bp(bp[i]); 575 576 hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); 577 578 /* 579 * Ending address of DAWR range is less than starting 580 * address of op. 581 */ 582 if ((hw_end_addr - 1) >= ea) 583 continue; 584 585 /* 586 * Those addresses need to be in the same or in two 587 * consecutive 512B blocks; 588 */ 589 if (((hw_end_addr - 1) >> 10) != (ea >> 10)) 590 continue; 591 592 /* 593 * 'op address + 64B' generates an address that has a 594 * carry into bit 52 (crosses 2K boundary). 595 */ 596 if ((ea & 0x800) == ((ea + 64) & 0x800)) 597 continue; 598 599 break; 600 } 601 602 if (i == nr_wp_slots()) 603 return; 604 605 for (i = 0; i < nr_wp_slots(); i++) { 606 if (bp[i]) { 607 hit[i] = 1; 608 counter_arch_bp(bp[i])->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 609 } 610 } 611 } 612 613 int hw_breakpoint_handler(struct die_args *args) 614 { 615 bool err = false; 616 int rc = NOTIFY_STOP; 617 struct perf_event *bp[HBP_NUM_MAX] = { NULL }; 618 struct pt_regs *regs = args->regs; 619 int i; 620 int hit[HBP_NUM_MAX] = {0}; 621 int nr_hit = 0; 622 bool ptrace_bp = false; 623 ppc_inst_t instr = ppc_inst(0); 624 int type = 0; 625 int size = 0; 626 unsigned long ea = 0; 627 628 /* Disable breakpoints during exception handling */ 629 hw_breakpoint_disable(); 630 631 /* 632 * The counter may be concurrently released but that can only 633 * occur from a call_rcu() path. We can then safely fetch 634 * the breakpoint, use its callback, touch its counter 635 * while we are in an rcu_read_lock() path. 636 */ 637 rcu_read_lock(); 638 639 if (!IS_ENABLED(CONFIG_PPC_8xx)) 640 wp_get_instr_detail(regs, &instr, &type, &size, &ea); 641 642 for (i = 0; i < nr_wp_slots(); i++) { 643 struct arch_hw_breakpoint *info; 644 645 bp[i] = __this_cpu_read(bp_per_reg[i]); 646 if (!bp[i]) 647 continue; 648 649 info = counter_arch_bp(bp[i]); 650 info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; 651 652 if (wp_check_constraints(regs, instr, ea, type, size, info)) { 653 if (!IS_ENABLED(CONFIG_PPC_8xx) && 654 ppc_inst_equal(instr, ppc_inst(0))) { 655 handler_error(bp[i]); 656 bp[i] = NULL; 657 err = 1; 658 continue; 659 } 660 661 if (is_ptrace_bp(bp[i])) 662 ptrace_bp = true; 663 hit[i] = 1; 664 nr_hit++; 665 } 666 } 667 668 if (err) 669 goto reset; 670 671 if (!nr_hit) { 672 /* Workaround for Power10 DD1 */ 673 if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 && 674 is_octword_vsx_instr(type, size)) { 675 handle_p10dd1_spurious_exception(bp, hit, ea); 676 } else { 677 rc = NOTIFY_DONE; 678 goto out; 679 } 680 } 681 682 /* 683 * Return early after invoking user-callback function without restoring 684 * DABR if the breakpoint is from ptrace which always operates in 685 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal 686 * generated in do_dabr(). 687 */ 688 if (ptrace_bp) { 689 for (i = 0; i < nr_wp_slots(); i++) { 690 if (!hit[i]) 691 continue; 692 perf_bp_event(bp[i], regs); 693 bp[i] = NULL; 694 } 695 rc = NOTIFY_DONE; 696 goto reset; 697 } 698 699 if (!IS_ENABLED(CONFIG_PPC_8xx)) { 700 if (is_larx_stcx_instr(type)) { 701 for (i = 0; i < nr_wp_slots(); i++) { 702 if (!hit[i]) 703 continue; 704 larx_stcx_err(bp[i]); 705 bp[i] = NULL; 706 } 707 goto reset; 708 } 709 710 if (!stepping_handler(regs, bp, hit, instr)) 711 goto reset; 712 } 713 714 /* 715 * As a policy, the callback is invoked in a 'trigger-after-execute' 716 * fashion 717 */ 718 for (i = 0; i < nr_wp_slots(); i++) { 719 if (!hit[i]) 720 continue; 721 if (!(counter_arch_bp(bp[i])->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 722 perf_bp_event(bp[i], regs); 723 } 724 725 reset: 726 for (i = 0; i < nr_wp_slots(); i++) { 727 if (!bp[i]) 728 continue; 729 __set_breakpoint(i, counter_arch_bp(bp[i])); 730 } 731 732 out: 733 rcu_read_unlock(); 734 return rc; 735 } 736 NOKPROBE_SYMBOL(hw_breakpoint_handler); 737 738 /* 739 * Handle single-step exceptions following a DABR hit. 740 */ 741 static int single_step_dabr_instruction(struct die_args *args) 742 { 743 struct pt_regs *regs = args->regs; 744 bool found = false; 745 746 /* 747 * Check if we are single-stepping as a result of a 748 * previous HW Breakpoint exception 749 */ 750 for (int i = 0; i < nr_wp_slots(); i++) { 751 struct perf_event *bp; 752 struct arch_hw_breakpoint *info; 753 754 bp = __this_cpu_read(bp_per_reg[i]); 755 756 if (!bp) 757 continue; 758 759 info = counter_arch_bp(bp); 760 761 if (!info->perf_single_step) 762 continue; 763 764 found = true; 765 766 /* 767 * We shall invoke the user-defined callback function in the 768 * single stepping handler to confirm to 'trigger-after-execute' 769 * semantics 770 */ 771 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 772 perf_bp_event(bp, regs); 773 774 info->perf_single_step = false; 775 } 776 777 if (!found) 778 return NOTIFY_DONE; 779 780 for (int i = 0; i < nr_wp_slots(); i++) { 781 struct perf_event *bp = __this_cpu_read(bp_per_reg[i]); 782 if (!bp) 783 continue; 784 785 __set_breakpoint(i, counter_arch_bp(bp)); 786 } 787 788 /* 789 * If the process was being single-stepped by ptrace, let the 790 * other single-step actions occur (e.g. generate SIGTRAP). 791 */ 792 if (test_thread_flag(TIF_SINGLESTEP)) 793 return NOTIFY_DONE; 794 795 return NOTIFY_STOP; 796 } 797 NOKPROBE_SYMBOL(single_step_dabr_instruction); 798 799 /* 800 * Handle debug exception notifications. 801 */ 802 int hw_breakpoint_exceptions_notify( 803 struct notifier_block *unused, unsigned long val, void *data) 804 { 805 int ret = NOTIFY_DONE; 806 807 switch (val) { 808 case DIE_DABR_MATCH: 809 ret = hw_breakpoint_handler(data); 810 break; 811 case DIE_SSTEP: 812 ret = single_step_dabr_instruction(data); 813 break; 814 } 815 816 return ret; 817 } 818 NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify); 819 820 /* 821 * Release the user breakpoints used by ptrace 822 */ 823 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 824 { 825 int i; 826 struct thread_struct *t = &tsk->thread; 827 828 for (i = 0; i < nr_wp_slots(); i++) { 829 unregister_hw_breakpoint(t->ptrace_bps[i]); 830 t->ptrace_bps[i] = NULL; 831 } 832 } 833 834 void hw_breakpoint_pmu_read(struct perf_event *bp) 835 { 836 /* TODO */ 837 } 838 839 void ptrace_triggered(struct perf_event *bp, 840 struct perf_sample_data *data, struct pt_regs *regs) 841 { 842 struct perf_event_attr attr; 843 844 /* 845 * Disable the breakpoint request here since ptrace has defined a 846 * one-shot behaviour for breakpoint exceptions in PPC64. 847 * The SIGTRAP signal is generated automatically for us in do_dabr(). 848 * We don't have to do anything about that here 849 */ 850 attr = bp->attr; 851 attr.disabled = true; 852 modify_user_hw_breakpoint(bp, &attr); 853 } 854