1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 4 * using the CPU's debug registers. Derived from 5 * "arch/x86/kernel/hw_breakpoint.c" 6 * 7 * Copyright 2010 IBM Corporation 8 * Author: K.Prasad <prasad@linux.vnet.ibm.com> 9 */ 10 11 #include <linux/hw_breakpoint.h> 12 #include <linux/notifier.h> 13 #include <linux/kprobes.h> 14 #include <linux/percpu.h> 15 #include <linux/kernel.h> 16 #include <linux/sched.h> 17 #include <linux/smp.h> 18 #include <linux/debugfs.h> 19 #include <linux/init.h> 20 21 #include <asm/hw_breakpoint.h> 22 #include <asm/processor.h> 23 #include <asm/sstep.h> 24 #include <asm/debug.h> 25 #include <asm/debugfs.h> 26 #include <asm/hvcall.h> 27 #include <asm/inst.h> 28 #include <linux/uaccess.h> 29 30 /* 31 * Stores the breakpoints currently in use on each breakpoint address 32 * register for every cpu 33 */ 34 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]); 35 36 /* 37 * Returns total number of data or instruction breakpoints available. 38 */ 39 int hw_breakpoint_slots(int type) 40 { 41 if (type == TYPE_DATA) 42 return nr_wp_slots(); 43 return 0; /* no instruction breakpoints available */ 44 } 45 46 static bool single_step_pending(void) 47 { 48 int i; 49 50 for (i = 0; i < nr_wp_slots(); i++) { 51 if (current->thread.last_hit_ubp[i]) 52 return true; 53 } 54 return false; 55 } 56 57 /* 58 * Install a perf counter breakpoint. 59 * 60 * We seek a free debug address register and use it for this 61 * breakpoint. 62 * 63 * Atomic: we hold the counter->ctx->lock and we only handle variables 64 * and registers local to this cpu. 65 */ 66 int arch_install_hw_breakpoint(struct perf_event *bp) 67 { 68 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 69 struct perf_event **slot; 70 int i; 71 72 for (i = 0; i < nr_wp_slots(); i++) { 73 slot = this_cpu_ptr(&bp_per_reg[i]); 74 if (!*slot) { 75 *slot = bp; 76 break; 77 } 78 } 79 80 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 81 return -EBUSY; 82 83 /* 84 * Do not install DABR values if the instruction must be single-stepped. 85 * If so, DABR will be populated in single_step_dabr_instruction(). 86 */ 87 if (!single_step_pending()) 88 __set_breakpoint(i, info); 89 90 return 0; 91 } 92 93 /* 94 * Uninstall the breakpoint contained in the given counter. 95 * 96 * First we search the debug address register it uses and then we disable 97 * it. 98 * 99 * Atomic: we hold the counter->ctx->lock and we only handle variables 100 * and registers local to this cpu. 101 */ 102 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 103 { 104 struct arch_hw_breakpoint null_brk = {0}; 105 struct perf_event **slot; 106 int i; 107 108 for (i = 0; i < nr_wp_slots(); i++) { 109 slot = this_cpu_ptr(&bp_per_reg[i]); 110 if (*slot == bp) { 111 *slot = NULL; 112 break; 113 } 114 } 115 116 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 117 return; 118 119 __set_breakpoint(i, &null_brk); 120 } 121 122 static bool is_ptrace_bp(struct perf_event *bp) 123 { 124 return bp->overflow_handler == ptrace_triggered; 125 } 126 127 struct breakpoint { 128 struct list_head list; 129 struct perf_event *bp; 130 bool ptrace_bp; 131 }; 132 133 static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); 134 static LIST_HEAD(task_bps); 135 136 static struct breakpoint *alloc_breakpoint(struct perf_event *bp) 137 { 138 struct breakpoint *tmp; 139 140 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 141 if (!tmp) 142 return ERR_PTR(-ENOMEM); 143 tmp->bp = bp; 144 tmp->ptrace_bp = is_ptrace_bp(bp); 145 return tmp; 146 } 147 148 static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2) 149 { 150 __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr; 151 152 bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE); 153 bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE); 154 bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE); 155 bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE); 156 157 return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr); 158 } 159 160 static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp) 161 { 162 return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp; 163 } 164 165 static bool can_co_exist(struct breakpoint *b, struct perf_event *bp) 166 { 167 return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp)); 168 } 169 170 static int task_bps_add(struct perf_event *bp) 171 { 172 struct breakpoint *tmp; 173 174 tmp = alloc_breakpoint(bp); 175 if (IS_ERR(tmp)) 176 return PTR_ERR(tmp); 177 178 list_add(&tmp->list, &task_bps); 179 return 0; 180 } 181 182 static void task_bps_remove(struct perf_event *bp) 183 { 184 struct list_head *pos, *q; 185 186 list_for_each_safe(pos, q, &task_bps) { 187 struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); 188 189 if (tmp->bp == bp) { 190 list_del(&tmp->list); 191 kfree(tmp); 192 break; 193 } 194 } 195 } 196 197 /* 198 * If any task has breakpoint from alternate infrastructure, 199 * return true. Otherwise return false. 200 */ 201 static bool all_task_bps_check(struct perf_event *bp) 202 { 203 struct breakpoint *tmp; 204 205 list_for_each_entry(tmp, &task_bps, list) { 206 if (!can_co_exist(tmp, bp)) 207 return true; 208 } 209 return false; 210 } 211 212 /* 213 * If same task has breakpoint from alternate infrastructure, 214 * return true. Otherwise return false. 215 */ 216 static bool same_task_bps_check(struct perf_event *bp) 217 { 218 struct breakpoint *tmp; 219 220 list_for_each_entry(tmp, &task_bps, list) { 221 if (tmp->bp->hw.target == bp->hw.target && 222 !can_co_exist(tmp, bp)) 223 return true; 224 } 225 return false; 226 } 227 228 static int cpu_bps_add(struct perf_event *bp) 229 { 230 struct breakpoint **cpu_bp; 231 struct breakpoint *tmp; 232 int i = 0; 233 234 tmp = alloc_breakpoint(bp); 235 if (IS_ERR(tmp)) 236 return PTR_ERR(tmp); 237 238 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 239 for (i = 0; i < nr_wp_slots(); i++) { 240 if (!cpu_bp[i]) { 241 cpu_bp[i] = tmp; 242 break; 243 } 244 } 245 return 0; 246 } 247 248 static void cpu_bps_remove(struct perf_event *bp) 249 { 250 struct breakpoint **cpu_bp; 251 int i = 0; 252 253 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 254 for (i = 0; i < nr_wp_slots(); i++) { 255 if (!cpu_bp[i]) 256 continue; 257 258 if (cpu_bp[i]->bp == bp) { 259 kfree(cpu_bp[i]); 260 cpu_bp[i] = NULL; 261 break; 262 } 263 } 264 } 265 266 static bool cpu_bps_check(int cpu, struct perf_event *bp) 267 { 268 struct breakpoint **cpu_bp; 269 int i; 270 271 cpu_bp = per_cpu_ptr(cpu_bps, cpu); 272 for (i = 0; i < nr_wp_slots(); i++) { 273 if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) 274 return true; 275 } 276 return false; 277 } 278 279 static bool all_cpu_bps_check(struct perf_event *bp) 280 { 281 int cpu; 282 283 for_each_online_cpu(cpu) { 284 if (cpu_bps_check(cpu, bp)) 285 return true; 286 } 287 return false; 288 } 289 290 /* 291 * We don't use any locks to serialize accesses to cpu_bps or task_bps 292 * because are already inside nr_bp_mutex. 293 */ 294 int arch_reserve_bp_slot(struct perf_event *bp) 295 { 296 int ret; 297 298 /* ptrace breakpoint */ 299 if (is_ptrace_bp(bp)) { 300 if (all_cpu_bps_check(bp)) 301 return -ENOSPC; 302 303 if (same_task_bps_check(bp)) 304 return -ENOSPC; 305 306 return task_bps_add(bp); 307 } 308 309 /* perf breakpoint */ 310 if (is_kernel_addr(bp->attr.bp_addr)) 311 return 0; 312 313 if (bp->hw.target && bp->cpu == -1) { 314 if (same_task_bps_check(bp)) 315 return -ENOSPC; 316 317 return task_bps_add(bp); 318 } else if (!bp->hw.target && bp->cpu != -1) { 319 if (all_task_bps_check(bp)) 320 return -ENOSPC; 321 322 return cpu_bps_add(bp); 323 } 324 325 if (same_task_bps_check(bp)) 326 return -ENOSPC; 327 328 ret = cpu_bps_add(bp); 329 if (ret) 330 return ret; 331 ret = task_bps_add(bp); 332 if (ret) 333 cpu_bps_remove(bp); 334 335 return ret; 336 } 337 338 void arch_release_bp_slot(struct perf_event *bp) 339 { 340 if (!is_kernel_addr(bp->attr.bp_addr)) { 341 if (bp->hw.target) 342 task_bps_remove(bp); 343 if (bp->cpu != -1) 344 cpu_bps_remove(bp); 345 } 346 } 347 348 /* 349 * Perform cleanup of arch-specific counters during unregistration 350 * of the perf-event 351 */ 352 void arch_unregister_hw_breakpoint(struct perf_event *bp) 353 { 354 /* 355 * If the breakpoint is unregistered between a hw_breakpoint_handler() 356 * and the single_step_dabr_instruction(), then cleanup the breakpoint 357 * restoration variables to prevent dangling pointers. 358 * FIXME, this should not be using bp->ctx at all! Sayeth peterz. 359 */ 360 if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) { 361 int i; 362 363 for (i = 0; i < nr_wp_slots(); i++) { 364 if (bp->ctx->task->thread.last_hit_ubp[i] == bp) 365 bp->ctx->task->thread.last_hit_ubp[i] = NULL; 366 } 367 } 368 } 369 370 /* 371 * Check for virtual address in kernel space. 372 */ 373 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 374 { 375 return is_kernel_addr(hw->address); 376 } 377 378 int arch_bp_generic_fields(int type, int *gen_bp_type) 379 { 380 *gen_bp_type = 0; 381 if (type & HW_BRK_TYPE_READ) 382 *gen_bp_type |= HW_BREAKPOINT_R; 383 if (type & HW_BRK_TYPE_WRITE) 384 *gen_bp_type |= HW_BREAKPOINT_W; 385 if (*gen_bp_type == 0) 386 return -EINVAL; 387 return 0; 388 } 389 390 /* 391 * Watchpoint match range is always doubleword(8 bytes) aligned on 392 * powerpc. If the given range is crossing doubleword boundary, we 393 * need to increase the length such that next doubleword also get 394 * covered. Ex, 395 * 396 * address len = 6 bytes 397 * |=========. 398 * |------------v--|------v--------| 399 * | | | | | | | | | | | | | | | | | 400 * |---------------|---------------| 401 * <---8 bytes---> 402 * 403 * In this case, we should configure hw as: 404 * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1) 405 * len = 16 bytes 406 * 407 * @start_addr is inclusive but @end_addr is exclusive. 408 */ 409 static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) 410 { 411 u16 max_len = DABR_MAX_LEN; 412 u16 hw_len; 413 unsigned long start_addr, end_addr; 414 415 start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE); 416 end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE); 417 hw_len = end_addr - start_addr; 418 419 if (dawr_enabled()) { 420 max_len = DAWR_MAX_LEN; 421 /* DAWR region can't cross 512 bytes boundary on p10 predecessors */ 422 if (!cpu_has_feature(CPU_FTR_ARCH_31) && 423 (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512))) 424 return -EINVAL; 425 } else if (IS_ENABLED(CONFIG_PPC_8xx)) { 426 /* 8xx can setup a range without limitation */ 427 max_len = U16_MAX; 428 } 429 430 if (hw_len > max_len) 431 return -EINVAL; 432 433 hw->hw_len = hw_len; 434 return 0; 435 } 436 437 /* 438 * Validate the arch-specific HW Breakpoint register settings 439 */ 440 int hw_breakpoint_arch_parse(struct perf_event *bp, 441 const struct perf_event_attr *attr, 442 struct arch_hw_breakpoint *hw) 443 { 444 int ret = -EINVAL; 445 446 if (!bp || !attr->bp_len) 447 return ret; 448 449 hw->type = HW_BRK_TYPE_TRANSLATE; 450 if (attr->bp_type & HW_BREAKPOINT_R) 451 hw->type |= HW_BRK_TYPE_READ; 452 if (attr->bp_type & HW_BREAKPOINT_W) 453 hw->type |= HW_BRK_TYPE_WRITE; 454 if (hw->type == HW_BRK_TYPE_TRANSLATE) 455 /* must set alteast read or write */ 456 return ret; 457 if (!attr->exclude_user) 458 hw->type |= HW_BRK_TYPE_USER; 459 if (!attr->exclude_kernel) 460 hw->type |= HW_BRK_TYPE_KERNEL; 461 if (!attr->exclude_hv) 462 hw->type |= HW_BRK_TYPE_HYP; 463 hw->address = attr->bp_addr; 464 hw->len = attr->bp_len; 465 466 if (!ppc_breakpoint_available()) 467 return -ENODEV; 468 469 return hw_breakpoint_validate_len(hw); 470 } 471 472 /* 473 * Restores the breakpoint on the debug registers. 474 * Invoke this function if it is known that the execution context is 475 * about to change to cause loss of MSR_SE settings. 476 */ 477 void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) 478 { 479 struct arch_hw_breakpoint *info; 480 int i; 481 482 for (i = 0; i < nr_wp_slots(); i++) { 483 if (unlikely(tsk->thread.last_hit_ubp[i])) 484 goto reset; 485 } 486 return; 487 488 reset: 489 regs->msr &= ~MSR_SE; 490 for (i = 0; i < nr_wp_slots(); i++) { 491 info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); 492 __set_breakpoint(i, info); 493 tsk->thread.last_hit_ubp[i] = NULL; 494 } 495 } 496 497 static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info) 498 { 499 return ((info->address <= dar) && (dar - info->address < info->len)); 500 } 501 502 static bool ea_user_range_overlaps(unsigned long ea, int size, 503 struct arch_hw_breakpoint *info) 504 { 505 return ((ea < info->address + info->len) && 506 (ea + size > info->address)); 507 } 508 509 static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) 510 { 511 unsigned long hw_start_addr, hw_end_addr; 512 513 hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); 514 hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); 515 516 return ((hw_start_addr <= dar) && (hw_end_addr > dar)); 517 } 518 519 static bool ea_hw_range_overlaps(unsigned long ea, int size, 520 struct arch_hw_breakpoint *info) 521 { 522 unsigned long hw_start_addr, hw_end_addr; 523 524 hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); 525 hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); 526 527 return ((ea < hw_end_addr) && (ea + size > hw_start_addr)); 528 } 529 530 /* 531 * If hw has multiple DAWR registers, we also need to check all 532 * dawrx constraint bits to confirm this is _really_ a valid event. 533 * If type is UNKNOWN, but privilege level matches, consider it as 534 * a positive match. 535 */ 536 static bool check_dawrx_constraints(struct pt_regs *regs, int type, 537 struct arch_hw_breakpoint *info) 538 { 539 if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) 540 return false; 541 542 /* 543 * The Cache Management instructions other than dcbz never 544 * cause a match. i.e. if type is CACHEOP, the instruction 545 * is dcbz, and dcbz is treated as Store. 546 */ 547 if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE)) 548 return false; 549 550 if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) 551 return false; 552 553 if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER)) 554 return false; 555 556 return true; 557 } 558 559 /* 560 * Return true if the event is valid wrt dawr configuration, 561 * including extraneous exception. Otherwise return false. 562 */ 563 static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, 564 unsigned long ea, int type, int size, 565 struct arch_hw_breakpoint *info) 566 { 567 bool in_user_range = dar_in_user_range(regs->dar, info); 568 bool dawrx_constraints; 569 570 /* 571 * 8xx supports only one breakpoint and thus we can 572 * unconditionally return true. 573 */ 574 if (IS_ENABLED(CONFIG_PPC_8xx)) { 575 if (!in_user_range) 576 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 577 return true; 578 } 579 580 if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { 581 if (cpu_has_feature(CPU_FTR_ARCH_31) && 582 !dar_in_hw_range(regs->dar, info)) 583 return false; 584 585 return true; 586 } 587 588 dawrx_constraints = check_dawrx_constraints(regs, type, info); 589 590 if (type == UNKNOWN) { 591 if (cpu_has_feature(CPU_FTR_ARCH_31) && 592 !dar_in_hw_range(regs->dar, info)) 593 return false; 594 595 return dawrx_constraints; 596 } 597 598 if (ea_user_range_overlaps(ea, size, info)) 599 return dawrx_constraints; 600 601 if (ea_hw_range_overlaps(ea, size, info)) { 602 if (dawrx_constraints) { 603 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 604 return true; 605 } 606 } 607 return false; 608 } 609 610 static int cache_op_size(void) 611 { 612 #ifdef __powerpc64__ 613 return ppc64_caches.l1d.block_size; 614 #else 615 return L1_CACHE_BYTES; 616 #endif 617 } 618 619 static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, 620 int *type, int *size, unsigned long *ea) 621 { 622 struct instruction_op op; 623 624 if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) 625 return; 626 627 analyse_instr(&op, regs, *instr); 628 *type = GETTYPE(op.type); 629 *ea = op.ea; 630 #ifdef __powerpc64__ 631 if (!(regs->msr & MSR_64BIT)) 632 *ea &= 0xffffffffUL; 633 #endif 634 635 *size = GETSIZE(op.type); 636 if (*type == CACHEOP) { 637 *size = cache_op_size(); 638 *ea &= ~(*size - 1); 639 } 640 } 641 642 static bool is_larx_stcx_instr(int type) 643 { 644 return type == LARX || type == STCX; 645 } 646 647 /* 648 * We've failed in reliably handling the hw-breakpoint. Unregister 649 * it and throw a warning message to let the user know about it. 650 */ 651 static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info) 652 { 653 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.", 654 info->address); 655 perf_event_disable_inatomic(bp); 656 } 657 658 static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info) 659 { 660 printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n", 661 info->address); 662 perf_event_disable_inatomic(bp); 663 } 664 665 static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, 666 struct arch_hw_breakpoint **info, int *hit, 667 struct ppc_inst instr) 668 { 669 int i; 670 int stepped; 671 672 /* Do not emulate user-space instructions, instead single-step them */ 673 if (user_mode(regs)) { 674 for (i = 0; i < nr_wp_slots(); i++) { 675 if (!hit[i]) 676 continue; 677 current->thread.last_hit_ubp[i] = bp[i]; 678 info[i] = NULL; 679 } 680 regs->msr |= MSR_SE; 681 return false; 682 } 683 684 stepped = emulate_step(regs, instr); 685 if (!stepped) { 686 for (i = 0; i < nr_wp_slots(); i++) { 687 if (!hit[i]) 688 continue; 689 handler_error(bp[i], info[i]); 690 info[i] = NULL; 691 } 692 return false; 693 } 694 return true; 695 } 696 697 int hw_breakpoint_handler(struct die_args *args) 698 { 699 bool err = false; 700 int rc = NOTIFY_STOP; 701 struct perf_event *bp[HBP_NUM_MAX] = { NULL }; 702 struct pt_regs *regs = args->regs; 703 struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL }; 704 int i; 705 int hit[HBP_NUM_MAX] = {0}; 706 int nr_hit = 0; 707 bool ptrace_bp = false; 708 struct ppc_inst instr = ppc_inst(0); 709 int type = 0; 710 int size = 0; 711 unsigned long ea; 712 713 /* Disable breakpoints during exception handling */ 714 hw_breakpoint_disable(); 715 716 /* 717 * The counter may be concurrently released but that can only 718 * occur from a call_rcu() path. We can then safely fetch 719 * the breakpoint, use its callback, touch its counter 720 * while we are in an rcu_read_lock() path. 721 */ 722 rcu_read_lock(); 723 724 if (!IS_ENABLED(CONFIG_PPC_8xx)) 725 get_instr_detail(regs, &instr, &type, &size, &ea); 726 727 for (i = 0; i < nr_wp_slots(); i++) { 728 bp[i] = __this_cpu_read(bp_per_reg[i]); 729 if (!bp[i]) 730 continue; 731 732 info[i] = counter_arch_bp(bp[i]); 733 info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; 734 735 if (check_constraints(regs, instr, ea, type, size, info[i])) { 736 if (!IS_ENABLED(CONFIG_PPC_8xx) && 737 ppc_inst_equal(instr, ppc_inst(0))) { 738 handler_error(bp[i], info[i]); 739 info[i] = NULL; 740 err = 1; 741 continue; 742 } 743 744 if (is_ptrace_bp(bp[i])) 745 ptrace_bp = true; 746 hit[i] = 1; 747 nr_hit++; 748 } 749 } 750 751 if (err) 752 goto reset; 753 754 if (!nr_hit) { 755 rc = NOTIFY_DONE; 756 goto out; 757 } 758 759 /* 760 * Return early after invoking user-callback function without restoring 761 * DABR if the breakpoint is from ptrace which always operates in 762 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal 763 * generated in do_dabr(). 764 */ 765 if (ptrace_bp) { 766 for (i = 0; i < nr_wp_slots(); i++) { 767 if (!hit[i]) 768 continue; 769 perf_bp_event(bp[i], regs); 770 info[i] = NULL; 771 } 772 rc = NOTIFY_DONE; 773 goto reset; 774 } 775 776 if (!IS_ENABLED(CONFIG_PPC_8xx)) { 777 if (is_larx_stcx_instr(type)) { 778 for (i = 0; i < nr_wp_slots(); i++) { 779 if (!hit[i]) 780 continue; 781 larx_stcx_err(bp[i], info[i]); 782 info[i] = NULL; 783 } 784 goto reset; 785 } 786 787 if (!stepping_handler(regs, bp, info, hit, instr)) 788 goto reset; 789 } 790 791 /* 792 * As a policy, the callback is invoked in a 'trigger-after-execute' 793 * fashion 794 */ 795 for (i = 0; i < nr_wp_slots(); i++) { 796 if (!hit[i]) 797 continue; 798 if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 799 perf_bp_event(bp[i], regs); 800 } 801 802 reset: 803 for (i = 0; i < nr_wp_slots(); i++) { 804 if (!info[i]) 805 continue; 806 __set_breakpoint(i, info[i]); 807 } 808 809 out: 810 rcu_read_unlock(); 811 return rc; 812 } 813 NOKPROBE_SYMBOL(hw_breakpoint_handler); 814 815 /* 816 * Handle single-step exceptions following a DABR hit. 817 */ 818 static int single_step_dabr_instruction(struct die_args *args) 819 { 820 struct pt_regs *regs = args->regs; 821 struct perf_event *bp = NULL; 822 struct arch_hw_breakpoint *info; 823 int i; 824 bool found = false; 825 826 /* 827 * Check if we are single-stepping as a result of a 828 * previous HW Breakpoint exception 829 */ 830 for (i = 0; i < nr_wp_slots(); i++) { 831 bp = current->thread.last_hit_ubp[i]; 832 833 if (!bp) 834 continue; 835 836 found = true; 837 info = counter_arch_bp(bp); 838 839 /* 840 * We shall invoke the user-defined callback function in the 841 * single stepping handler to confirm to 'trigger-after-execute' 842 * semantics 843 */ 844 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 845 perf_bp_event(bp, regs); 846 current->thread.last_hit_ubp[i] = NULL; 847 } 848 849 if (!found) 850 return NOTIFY_DONE; 851 852 for (i = 0; i < nr_wp_slots(); i++) { 853 bp = __this_cpu_read(bp_per_reg[i]); 854 if (!bp) 855 continue; 856 857 info = counter_arch_bp(bp); 858 __set_breakpoint(i, info); 859 } 860 861 /* 862 * If the process was being single-stepped by ptrace, let the 863 * other single-step actions occur (e.g. generate SIGTRAP). 864 */ 865 if (test_thread_flag(TIF_SINGLESTEP)) 866 return NOTIFY_DONE; 867 868 return NOTIFY_STOP; 869 } 870 NOKPROBE_SYMBOL(single_step_dabr_instruction); 871 872 /* 873 * Handle debug exception notifications. 874 */ 875 int hw_breakpoint_exceptions_notify( 876 struct notifier_block *unused, unsigned long val, void *data) 877 { 878 int ret = NOTIFY_DONE; 879 880 switch (val) { 881 case DIE_DABR_MATCH: 882 ret = hw_breakpoint_handler(data); 883 break; 884 case DIE_SSTEP: 885 ret = single_step_dabr_instruction(data); 886 break; 887 } 888 889 return ret; 890 } 891 NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify); 892 893 /* 894 * Release the user breakpoints used by ptrace 895 */ 896 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 897 { 898 int i; 899 struct thread_struct *t = &tsk->thread; 900 901 for (i = 0; i < nr_wp_slots(); i++) { 902 unregister_hw_breakpoint(t->ptrace_bps[i]); 903 t->ptrace_bps[i] = NULL; 904 } 905 } 906 907 void hw_breakpoint_pmu_read(struct perf_event *bp) 908 { 909 /* TODO */ 910 } 911 912 void ptrace_triggered(struct perf_event *bp, 913 struct perf_sample_data *data, struct pt_regs *regs) 914 { 915 struct perf_event_attr attr; 916 917 /* 918 * Disable the breakpoint request here since ptrace has defined a 919 * one-shot behaviour for breakpoint exceptions in PPC64. 920 * The SIGTRAP signal is generated automatically for us in do_dabr(). 921 * We don't have to do anything about that here 922 */ 923 attr = bp->attr; 924 attr.disabled = true; 925 modify_user_hw_breakpoint(bp, &attr); 926 } 927