1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 4 * using the CPU's debug registers. Derived from 5 * "arch/x86/kernel/hw_breakpoint.c" 6 * 7 * Copyright 2010 IBM Corporation 8 * Author: K.Prasad <prasad@linux.vnet.ibm.com> 9 */ 10 11 #include <linux/hw_breakpoint.h> 12 #include <linux/notifier.h> 13 #include <linux/kprobes.h> 14 #include <linux/percpu.h> 15 #include <linux/kernel.h> 16 #include <linux/sched.h> 17 #include <linux/smp.h> 18 #include <linux/spinlock.h> 19 #include <linux/debugfs.h> 20 #include <linux/init.h> 21 22 #include <asm/hw_breakpoint.h> 23 #include <asm/processor.h> 24 #include <asm/sstep.h> 25 #include <asm/debug.h> 26 #include <asm/hvcall.h> 27 #include <asm/inst.h> 28 #include <linux/uaccess.h> 29 30 /* 31 * Stores the breakpoints currently in use on each breakpoint address 32 * register for every cpu 33 */ 34 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]); 35 36 /* 37 * Returns total number of data or instruction breakpoints available. 38 */ 39 int hw_breakpoint_slots(int type) 40 { 41 if (type == TYPE_DATA) 42 return nr_wp_slots(); 43 return 0; /* no instruction breakpoints available */ 44 } 45 46 static bool single_step_pending(void) 47 { 48 int i; 49 50 for (i = 0; i < nr_wp_slots(); i++) { 51 if (current->thread.last_hit_ubp[i]) 52 return true; 53 } 54 return false; 55 } 56 57 /* 58 * Install a perf counter breakpoint. 59 * 60 * We seek a free debug address register and use it for this 61 * breakpoint. 62 * 63 * Atomic: we hold the counter->ctx->lock and we only handle variables 64 * and registers local to this cpu. 65 */ 66 int arch_install_hw_breakpoint(struct perf_event *bp) 67 { 68 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 69 struct perf_event **slot; 70 int i; 71 72 for (i = 0; i < nr_wp_slots(); i++) { 73 slot = this_cpu_ptr(&bp_per_reg[i]); 74 if (!*slot) { 75 *slot = bp; 76 break; 77 } 78 } 79 80 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 81 return -EBUSY; 82 83 /* 84 * Do not install DABR values if the instruction must be single-stepped. 85 * If so, DABR will be populated in single_step_dabr_instruction(). 86 */ 87 if (!single_step_pending()) 88 __set_breakpoint(i, info); 89 90 return 0; 91 } 92 93 /* 94 * Uninstall the breakpoint contained in the given counter. 95 * 96 * First we search the debug address register it uses and then we disable 97 * it. 98 * 99 * Atomic: we hold the counter->ctx->lock and we only handle variables 100 * and registers local to this cpu. 101 */ 102 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 103 { 104 struct arch_hw_breakpoint null_brk = {0}; 105 struct perf_event **slot; 106 int i; 107 108 for (i = 0; i < nr_wp_slots(); i++) { 109 slot = this_cpu_ptr(&bp_per_reg[i]); 110 if (*slot == bp) { 111 *slot = NULL; 112 break; 113 } 114 } 115 116 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 117 return; 118 119 __set_breakpoint(i, &null_brk); 120 } 121 122 static bool is_ptrace_bp(struct perf_event *bp) 123 { 124 return bp->overflow_handler == ptrace_triggered; 125 } 126 127 struct breakpoint { 128 struct list_head list; 129 struct perf_event *bp; 130 bool ptrace_bp; 131 }; 132 133 /* 134 * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot 135 * rely on it safely synchronizing internals here; however, we can rely on it 136 * not requesting more breakpoints than available. 137 */ 138 static DEFINE_SPINLOCK(cpu_bps_lock); 139 static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); 140 static DEFINE_SPINLOCK(task_bps_lock); 141 static LIST_HEAD(task_bps); 142 143 static struct breakpoint *alloc_breakpoint(struct perf_event *bp) 144 { 145 struct breakpoint *tmp; 146 147 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 148 if (!tmp) 149 return ERR_PTR(-ENOMEM); 150 tmp->bp = bp; 151 tmp->ptrace_bp = is_ptrace_bp(bp); 152 return tmp; 153 } 154 155 static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2) 156 { 157 __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr; 158 159 bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE); 160 bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE); 161 bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE); 162 bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE); 163 164 return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr); 165 } 166 167 static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp) 168 { 169 return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp; 170 } 171 172 static bool can_co_exist(struct breakpoint *b, struct perf_event *bp) 173 { 174 return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp)); 175 } 176 177 static int task_bps_add(struct perf_event *bp) 178 { 179 struct breakpoint *tmp; 180 181 tmp = alloc_breakpoint(bp); 182 if (IS_ERR(tmp)) 183 return PTR_ERR(tmp); 184 185 spin_lock(&task_bps_lock); 186 list_add(&tmp->list, &task_bps); 187 spin_unlock(&task_bps_lock); 188 return 0; 189 } 190 191 static void task_bps_remove(struct perf_event *bp) 192 { 193 struct list_head *pos, *q; 194 195 spin_lock(&task_bps_lock); 196 list_for_each_safe(pos, q, &task_bps) { 197 struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); 198 199 if (tmp->bp == bp) { 200 list_del(&tmp->list); 201 kfree(tmp); 202 break; 203 } 204 } 205 spin_unlock(&task_bps_lock); 206 } 207 208 /* 209 * If any task has breakpoint from alternate infrastructure, 210 * return true. Otherwise return false. 211 */ 212 static bool all_task_bps_check(struct perf_event *bp) 213 { 214 struct breakpoint *tmp; 215 bool ret = false; 216 217 spin_lock(&task_bps_lock); 218 list_for_each_entry(tmp, &task_bps, list) { 219 if (!can_co_exist(tmp, bp)) { 220 ret = true; 221 break; 222 } 223 } 224 spin_unlock(&task_bps_lock); 225 return ret; 226 } 227 228 /* 229 * If same task has breakpoint from alternate infrastructure, 230 * return true. Otherwise return false. 231 */ 232 static bool same_task_bps_check(struct perf_event *bp) 233 { 234 struct breakpoint *tmp; 235 bool ret = false; 236 237 spin_lock(&task_bps_lock); 238 list_for_each_entry(tmp, &task_bps, list) { 239 if (tmp->bp->hw.target == bp->hw.target && 240 !can_co_exist(tmp, bp)) { 241 ret = true; 242 break; 243 } 244 } 245 spin_unlock(&task_bps_lock); 246 return ret; 247 } 248 249 static int cpu_bps_add(struct perf_event *bp) 250 { 251 struct breakpoint **cpu_bp; 252 struct breakpoint *tmp; 253 int i = 0; 254 255 tmp = alloc_breakpoint(bp); 256 if (IS_ERR(tmp)) 257 return PTR_ERR(tmp); 258 259 spin_lock(&cpu_bps_lock); 260 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 261 for (i = 0; i < nr_wp_slots(); i++) { 262 if (!cpu_bp[i]) { 263 cpu_bp[i] = tmp; 264 break; 265 } 266 } 267 spin_unlock(&cpu_bps_lock); 268 return 0; 269 } 270 271 static void cpu_bps_remove(struct perf_event *bp) 272 { 273 struct breakpoint **cpu_bp; 274 int i = 0; 275 276 spin_lock(&cpu_bps_lock); 277 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 278 for (i = 0; i < nr_wp_slots(); i++) { 279 if (!cpu_bp[i]) 280 continue; 281 282 if (cpu_bp[i]->bp == bp) { 283 kfree(cpu_bp[i]); 284 cpu_bp[i] = NULL; 285 break; 286 } 287 } 288 spin_unlock(&cpu_bps_lock); 289 } 290 291 static bool cpu_bps_check(int cpu, struct perf_event *bp) 292 { 293 struct breakpoint **cpu_bp; 294 bool ret = false; 295 int i; 296 297 spin_lock(&cpu_bps_lock); 298 cpu_bp = per_cpu_ptr(cpu_bps, cpu); 299 for (i = 0; i < nr_wp_slots(); i++) { 300 if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) { 301 ret = true; 302 break; 303 } 304 } 305 spin_unlock(&cpu_bps_lock); 306 return ret; 307 } 308 309 static bool all_cpu_bps_check(struct perf_event *bp) 310 { 311 int cpu; 312 313 for_each_online_cpu(cpu) { 314 if (cpu_bps_check(cpu, bp)) 315 return true; 316 } 317 return false; 318 } 319 320 int arch_reserve_bp_slot(struct perf_event *bp) 321 { 322 int ret; 323 324 /* ptrace breakpoint */ 325 if (is_ptrace_bp(bp)) { 326 if (all_cpu_bps_check(bp)) 327 return -ENOSPC; 328 329 if (same_task_bps_check(bp)) 330 return -ENOSPC; 331 332 return task_bps_add(bp); 333 } 334 335 /* perf breakpoint */ 336 if (is_kernel_addr(bp->attr.bp_addr)) 337 return 0; 338 339 if (bp->hw.target && bp->cpu == -1) { 340 if (same_task_bps_check(bp)) 341 return -ENOSPC; 342 343 return task_bps_add(bp); 344 } else if (!bp->hw.target && bp->cpu != -1) { 345 if (all_task_bps_check(bp)) 346 return -ENOSPC; 347 348 return cpu_bps_add(bp); 349 } 350 351 if (same_task_bps_check(bp)) 352 return -ENOSPC; 353 354 ret = cpu_bps_add(bp); 355 if (ret) 356 return ret; 357 ret = task_bps_add(bp); 358 if (ret) 359 cpu_bps_remove(bp); 360 361 return ret; 362 } 363 364 void arch_release_bp_slot(struct perf_event *bp) 365 { 366 if (!is_kernel_addr(bp->attr.bp_addr)) { 367 if (bp->hw.target) 368 task_bps_remove(bp); 369 if (bp->cpu != -1) 370 cpu_bps_remove(bp); 371 } 372 } 373 374 /* 375 * Perform cleanup of arch-specific counters during unregistration 376 * of the perf-event 377 */ 378 void arch_unregister_hw_breakpoint(struct perf_event *bp) 379 { 380 /* 381 * If the breakpoint is unregistered between a hw_breakpoint_handler() 382 * and the single_step_dabr_instruction(), then cleanup the breakpoint 383 * restoration variables to prevent dangling pointers. 384 * FIXME, this should not be using bp->ctx at all! Sayeth peterz. 385 */ 386 if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) { 387 int i; 388 389 for (i = 0; i < nr_wp_slots(); i++) { 390 if (bp->ctx->task->thread.last_hit_ubp[i] == bp) 391 bp->ctx->task->thread.last_hit_ubp[i] = NULL; 392 } 393 } 394 } 395 396 /* 397 * Check for virtual address in kernel space. 398 */ 399 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 400 { 401 return is_kernel_addr(hw->address); 402 } 403 404 int arch_bp_generic_fields(int type, int *gen_bp_type) 405 { 406 *gen_bp_type = 0; 407 if (type & HW_BRK_TYPE_READ) 408 *gen_bp_type |= HW_BREAKPOINT_R; 409 if (type & HW_BRK_TYPE_WRITE) 410 *gen_bp_type |= HW_BREAKPOINT_W; 411 if (*gen_bp_type == 0) 412 return -EINVAL; 413 return 0; 414 } 415 416 /* 417 * Watchpoint match range is always doubleword(8 bytes) aligned on 418 * powerpc. If the given range is crossing doubleword boundary, we 419 * need to increase the length such that next doubleword also get 420 * covered. Ex, 421 * 422 * address len = 6 bytes 423 * |=========. 424 * |------------v--|------v--------| 425 * | | | | | | | | | | | | | | | | | 426 * |---------------|---------------| 427 * <---8 bytes---> 428 * 429 * In this case, we should configure hw as: 430 * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1) 431 * len = 16 bytes 432 * 433 * @start_addr is inclusive but @end_addr is exclusive. 434 */ 435 static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) 436 { 437 u16 max_len = DABR_MAX_LEN; 438 u16 hw_len; 439 unsigned long start_addr, end_addr; 440 441 start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE); 442 end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE); 443 hw_len = end_addr - start_addr; 444 445 if (dawr_enabled()) { 446 max_len = DAWR_MAX_LEN; 447 /* DAWR region can't cross 512 bytes boundary on p10 predecessors */ 448 if (!cpu_has_feature(CPU_FTR_ARCH_31) && 449 (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512))) 450 return -EINVAL; 451 } else if (IS_ENABLED(CONFIG_PPC_8xx)) { 452 /* 8xx can setup a range without limitation */ 453 max_len = U16_MAX; 454 } 455 456 if (hw_len > max_len) 457 return -EINVAL; 458 459 hw->hw_len = hw_len; 460 return 0; 461 } 462 463 /* 464 * Validate the arch-specific HW Breakpoint register settings 465 */ 466 int hw_breakpoint_arch_parse(struct perf_event *bp, 467 const struct perf_event_attr *attr, 468 struct arch_hw_breakpoint *hw) 469 { 470 int ret = -EINVAL; 471 472 if (!bp || !attr->bp_len) 473 return ret; 474 475 hw->type = HW_BRK_TYPE_TRANSLATE; 476 if (attr->bp_type & HW_BREAKPOINT_R) 477 hw->type |= HW_BRK_TYPE_READ; 478 if (attr->bp_type & HW_BREAKPOINT_W) 479 hw->type |= HW_BRK_TYPE_WRITE; 480 if (hw->type == HW_BRK_TYPE_TRANSLATE) 481 /* must set alteast read or write */ 482 return ret; 483 if (!attr->exclude_user) 484 hw->type |= HW_BRK_TYPE_USER; 485 if (!attr->exclude_kernel) 486 hw->type |= HW_BRK_TYPE_KERNEL; 487 if (!attr->exclude_hv) 488 hw->type |= HW_BRK_TYPE_HYP; 489 hw->address = attr->bp_addr; 490 hw->len = attr->bp_len; 491 492 if (!ppc_breakpoint_available()) 493 return -ENODEV; 494 495 return hw_breakpoint_validate_len(hw); 496 } 497 498 /* 499 * Restores the breakpoint on the debug registers. 500 * Invoke this function if it is known that the execution context is 501 * about to change to cause loss of MSR_SE settings. 502 * 503 * The perf watchpoint will simply re-trigger once the thread is started again, 504 * and the watchpoint handler will set up MSR_SE and perf_single_step as 505 * needed. 506 */ 507 void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) 508 { 509 struct arch_hw_breakpoint *info; 510 int i; 511 512 for (i = 0; i < nr_wp_slots(); i++) { 513 if (unlikely(tsk->thread.last_hit_ubp[i])) 514 goto reset; 515 } 516 return; 517 518 reset: 519 regs_set_return_msr(regs, regs->msr & ~MSR_SE); 520 for (i = 0; i < nr_wp_slots(); i++) { 521 info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); 522 __set_breakpoint(i, info); 523 tsk->thread.last_hit_ubp[i] = NULL; 524 } 525 } 526 527 static bool is_larx_stcx_instr(int type) 528 { 529 return type == LARX || type == STCX; 530 } 531 532 static bool is_octword_vsx_instr(int type, int size) 533 { 534 return ((type == LOAD_VSX || type == STORE_VSX) && size == 32); 535 } 536 537 /* 538 * We've failed in reliably handling the hw-breakpoint. Unregister 539 * it and throw a warning message to let the user know about it. 540 */ 541 static void handler_error(struct perf_event *bp) 542 { 543 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.", 544 counter_arch_bp(bp)->address); 545 perf_event_disable_inatomic(bp); 546 } 547 548 static void larx_stcx_err(struct perf_event *bp) 549 { 550 printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n", 551 counter_arch_bp(bp)->address); 552 perf_event_disable_inatomic(bp); 553 } 554 555 static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, 556 int *hit, ppc_inst_t instr) 557 { 558 int i; 559 int stepped; 560 561 /* Do not emulate user-space instructions, instead single-step them */ 562 if (user_mode(regs)) { 563 for (i = 0; i < nr_wp_slots(); i++) { 564 if (!hit[i]) 565 continue; 566 current->thread.last_hit_ubp[i] = bp[i]; 567 bp[i] = NULL; 568 } 569 regs_set_return_msr(regs, regs->msr | MSR_SE); 570 return false; 571 } 572 573 stepped = emulate_step(regs, instr); 574 if (!stepped) { 575 for (i = 0; i < nr_wp_slots(); i++) { 576 if (!hit[i]) 577 continue; 578 handler_error(bp[i]); 579 bp[i] = NULL; 580 } 581 return false; 582 } 583 return true; 584 } 585 586 static void handle_p10dd1_spurious_exception(struct perf_event **bp, 587 int *hit, unsigned long ea) 588 { 589 int i; 590 unsigned long hw_end_addr; 591 592 /* 593 * Handle spurious exception only when any bp_per_reg is set. 594 * Otherwise this might be created by xmon and not actually a 595 * spurious exception. 596 */ 597 for (i = 0; i < nr_wp_slots(); i++) { 598 struct arch_hw_breakpoint *info; 599 600 if (!bp[i]) 601 continue; 602 603 info = counter_arch_bp(bp[i]); 604 605 hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); 606 607 /* 608 * Ending address of DAWR range is less than starting 609 * address of op. 610 */ 611 if ((hw_end_addr - 1) >= ea) 612 continue; 613 614 /* 615 * Those addresses need to be in the same or in two 616 * consecutive 512B blocks; 617 */ 618 if (((hw_end_addr - 1) >> 10) != (ea >> 10)) 619 continue; 620 621 /* 622 * 'op address + 64B' generates an address that has a 623 * carry into bit 52 (crosses 2K boundary). 624 */ 625 if ((ea & 0x800) == ((ea + 64) & 0x800)) 626 continue; 627 628 break; 629 } 630 631 if (i == nr_wp_slots()) 632 return; 633 634 for (i = 0; i < nr_wp_slots(); i++) { 635 if (bp[i]) { 636 hit[i] = 1; 637 counter_arch_bp(bp[i])->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 638 } 639 } 640 } 641 642 int hw_breakpoint_handler(struct die_args *args) 643 { 644 bool err = false; 645 int rc = NOTIFY_STOP; 646 struct perf_event *bp[HBP_NUM_MAX] = { NULL }; 647 struct pt_regs *regs = args->regs; 648 int i; 649 int hit[HBP_NUM_MAX] = {0}; 650 int nr_hit = 0; 651 bool ptrace_bp = false; 652 ppc_inst_t instr = ppc_inst(0); 653 int type = 0; 654 int size = 0; 655 unsigned long ea = 0; 656 657 /* Disable breakpoints during exception handling */ 658 hw_breakpoint_disable(); 659 660 /* 661 * The counter may be concurrently released but that can only 662 * occur from a call_rcu() path. We can then safely fetch 663 * the breakpoint, use its callback, touch its counter 664 * while we are in an rcu_read_lock() path. 665 */ 666 rcu_read_lock(); 667 668 if (!IS_ENABLED(CONFIG_PPC_8xx)) 669 wp_get_instr_detail(regs, &instr, &type, &size, &ea); 670 671 for (i = 0; i < nr_wp_slots(); i++) { 672 struct arch_hw_breakpoint *info; 673 674 bp[i] = __this_cpu_read(bp_per_reg[i]); 675 if (!bp[i]) 676 continue; 677 678 info = counter_arch_bp(bp[i]); 679 info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; 680 681 if (wp_check_constraints(regs, instr, ea, type, size, info)) { 682 if (!IS_ENABLED(CONFIG_PPC_8xx) && 683 ppc_inst_equal(instr, ppc_inst(0))) { 684 handler_error(bp[i]); 685 bp[i] = NULL; 686 err = 1; 687 continue; 688 } 689 690 if (is_ptrace_bp(bp[i])) 691 ptrace_bp = true; 692 hit[i] = 1; 693 nr_hit++; 694 } 695 } 696 697 if (err) 698 goto reset; 699 700 if (!nr_hit) { 701 /* Workaround for Power10 DD1 */ 702 if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 && 703 is_octword_vsx_instr(type, size)) { 704 handle_p10dd1_spurious_exception(bp, hit, ea); 705 } else { 706 rc = NOTIFY_DONE; 707 goto out; 708 } 709 } 710 711 /* 712 * Return early after invoking user-callback function without restoring 713 * DABR if the breakpoint is from ptrace which always operates in 714 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal 715 * generated in do_dabr(). 716 */ 717 if (ptrace_bp) { 718 for (i = 0; i < nr_wp_slots(); i++) { 719 if (!hit[i]) 720 continue; 721 perf_bp_event(bp[i], regs); 722 bp[i] = NULL; 723 } 724 rc = NOTIFY_DONE; 725 goto reset; 726 } 727 728 if (!IS_ENABLED(CONFIG_PPC_8xx)) { 729 if (is_larx_stcx_instr(type)) { 730 for (i = 0; i < nr_wp_slots(); i++) { 731 if (!hit[i]) 732 continue; 733 larx_stcx_err(bp[i]); 734 bp[i] = NULL; 735 } 736 goto reset; 737 } 738 739 if (!stepping_handler(regs, bp, hit, instr)) 740 goto reset; 741 } 742 743 /* 744 * As a policy, the callback is invoked in a 'trigger-after-execute' 745 * fashion 746 */ 747 for (i = 0; i < nr_wp_slots(); i++) { 748 if (!hit[i]) 749 continue; 750 if (!(counter_arch_bp(bp[i])->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 751 perf_bp_event(bp[i], regs); 752 } 753 754 reset: 755 for (i = 0; i < nr_wp_slots(); i++) { 756 if (!bp[i]) 757 continue; 758 __set_breakpoint(i, counter_arch_bp(bp[i])); 759 } 760 761 out: 762 rcu_read_unlock(); 763 return rc; 764 } 765 NOKPROBE_SYMBOL(hw_breakpoint_handler); 766 767 /* 768 * Handle single-step exceptions following a DABR hit. 769 */ 770 static int single_step_dabr_instruction(struct die_args *args) 771 { 772 struct pt_regs *regs = args->regs; 773 struct perf_event *bp = NULL; 774 struct arch_hw_breakpoint *info; 775 int i; 776 bool found = false; 777 778 /* 779 * Check if we are single-stepping as a result of a 780 * previous HW Breakpoint exception 781 */ 782 for (i = 0; i < nr_wp_slots(); i++) { 783 bp = current->thread.last_hit_ubp[i]; 784 785 if (!bp) 786 continue; 787 788 found = true; 789 info = counter_arch_bp(bp); 790 791 /* 792 * We shall invoke the user-defined callback function in the 793 * single stepping handler to confirm to 'trigger-after-execute' 794 * semantics 795 */ 796 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 797 perf_bp_event(bp, regs); 798 current->thread.last_hit_ubp[i] = NULL; 799 } 800 801 if (!found) 802 return NOTIFY_DONE; 803 804 for (i = 0; i < nr_wp_slots(); i++) { 805 bp = __this_cpu_read(bp_per_reg[i]); 806 if (!bp) 807 continue; 808 809 info = counter_arch_bp(bp); 810 __set_breakpoint(i, info); 811 } 812 813 /* 814 * If the process was being single-stepped by ptrace, let the 815 * other single-step actions occur (e.g. generate SIGTRAP). 816 */ 817 if (test_thread_flag(TIF_SINGLESTEP)) 818 return NOTIFY_DONE; 819 820 return NOTIFY_STOP; 821 } 822 NOKPROBE_SYMBOL(single_step_dabr_instruction); 823 824 /* 825 * Handle debug exception notifications. 826 */ 827 int hw_breakpoint_exceptions_notify( 828 struct notifier_block *unused, unsigned long val, void *data) 829 { 830 int ret = NOTIFY_DONE; 831 832 switch (val) { 833 case DIE_DABR_MATCH: 834 ret = hw_breakpoint_handler(data); 835 break; 836 case DIE_SSTEP: 837 ret = single_step_dabr_instruction(data); 838 break; 839 } 840 841 return ret; 842 } 843 NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify); 844 845 /* 846 * Release the user breakpoints used by ptrace 847 */ 848 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 849 { 850 int i; 851 struct thread_struct *t = &tsk->thread; 852 853 for (i = 0; i < nr_wp_slots(); i++) { 854 unregister_hw_breakpoint(t->ptrace_bps[i]); 855 t->ptrace_bps[i] = NULL; 856 } 857 } 858 859 void hw_breakpoint_pmu_read(struct perf_event *bp) 860 { 861 /* TODO */ 862 } 863 864 void ptrace_triggered(struct perf_event *bp, 865 struct perf_sample_data *data, struct pt_regs *regs) 866 { 867 struct perf_event_attr attr; 868 869 /* 870 * Disable the breakpoint request here since ptrace has defined a 871 * one-shot behaviour for breakpoint exceptions in PPC64. 872 * The SIGTRAP signal is generated automatically for us in do_dabr(). 873 * We don't have to do anything about that here 874 */ 875 attr = bp->attr; 876 attr.disabled = true; 877 modify_user_hw_breakpoint(bp, &attr); 878 } 879