11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 25aae8a53SK.Prasad /* 35aae8a53SK.Prasad * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 45aae8a53SK.Prasad * using the CPU's debug registers. Derived from 55aae8a53SK.Prasad * "arch/x86/kernel/hw_breakpoint.c" 65aae8a53SK.Prasad * 75aae8a53SK.Prasad * Copyright 2010 IBM Corporation 85aae8a53SK.Prasad * Author: K.Prasad <prasad@linux.vnet.ibm.com> 95aae8a53SK.Prasad */ 105aae8a53SK.Prasad 115aae8a53SK.Prasad #include <linux/hw_breakpoint.h> 125aae8a53SK.Prasad #include <linux/notifier.h> 135aae8a53SK.Prasad #include <linux/kprobes.h> 145aae8a53SK.Prasad #include <linux/percpu.h> 155aae8a53SK.Prasad #include <linux/kernel.h> 165aae8a53SK.Prasad #include <linux/sched.h> 175aae8a53SK.Prasad #include <linux/smp.h> 18c1fe190cSMichael Neuling #include <linux/debugfs.h> 19c1fe190cSMichael Neuling #include <linux/init.h> 205aae8a53SK.Prasad 215aae8a53SK.Prasad #include <asm/hw_breakpoint.h> 225aae8a53SK.Prasad #include <asm/processor.h> 235aae8a53SK.Prasad #include <asm/sstep.h> 2485ce9a5dSMichael Neuling #include <asm/debug.h> 25c1fe190cSMichael Neuling #include <asm/debugfs.h> 26c1fe190cSMichael Neuling #include <asm/hvcall.h> 2775346251SJordan Niethe #include <asm/inst.h> 287c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 295aae8a53SK.Prasad 305aae8a53SK.Prasad /* 315aae8a53SK.Prasad * Stores the breakpoints currently in use on each breakpoint address 325aae8a53SK.Prasad * register for every cpu 335aae8a53SK.Prasad */ 3474c68810SRavi Bangoria static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]); 355aae8a53SK.Prasad 365aae8a53SK.Prasad /* 37d09ec738SPaul Mackerras * Returns total number of data or instruction breakpoints available. 38d09ec738SPaul Mackerras */ 39d09ec738SPaul Mackerras int hw_breakpoint_slots(int type) 40d09ec738SPaul Mackerras { 41d09ec738SPaul Mackerras if (type == TYPE_DATA) 42a6ba44e8SRavi Bangoria return nr_wp_slots(); 43d09ec738SPaul Mackerras return 0; /* no instruction breakpoints available */ 44d09ec738SPaul Mackerras } 45d09ec738SPaul Mackerras 4674c68810SRavi Bangoria static bool single_step_pending(void) 4774c68810SRavi Bangoria { 4874c68810SRavi Bangoria int i; 4974c68810SRavi Bangoria 5074c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 5174c68810SRavi Bangoria if (current->thread.last_hit_ubp[i]) 5274c68810SRavi Bangoria return true; 5374c68810SRavi Bangoria } 5474c68810SRavi Bangoria return false; 5574c68810SRavi Bangoria } 5674c68810SRavi Bangoria 57d09ec738SPaul Mackerras /* 585aae8a53SK.Prasad * Install a perf counter breakpoint. 595aae8a53SK.Prasad * 605aae8a53SK.Prasad * We seek a free debug address register and use it for this 615aae8a53SK.Prasad * breakpoint. 625aae8a53SK.Prasad * 635aae8a53SK.Prasad * Atomic: we hold the counter->ctx->lock and we only handle variables 645aae8a53SK.Prasad * and registers local to this cpu. 655aae8a53SK.Prasad */ 665aae8a53SK.Prasad int arch_install_hw_breakpoint(struct perf_event *bp) 675aae8a53SK.Prasad { 685aae8a53SK.Prasad struct arch_hw_breakpoint *info = counter_arch_bp(bp); 6974c68810SRavi Bangoria struct perf_event **slot; 7074c68810SRavi Bangoria int i; 715aae8a53SK.Prasad 7274c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 7374c68810SRavi Bangoria slot = this_cpu_ptr(&bp_per_reg[i]); 7474c68810SRavi Bangoria if (!*slot) { 755aae8a53SK.Prasad *slot = bp; 7674c68810SRavi Bangoria break; 7774c68810SRavi Bangoria } 7874c68810SRavi Bangoria } 7974c68810SRavi Bangoria 8074c68810SRavi Bangoria if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 8174c68810SRavi Bangoria return -EBUSY; 825aae8a53SK.Prasad 835aae8a53SK.Prasad /* 845aae8a53SK.Prasad * Do not install DABR values if the instruction must be single-stepped. 855aae8a53SK.Prasad * If so, DABR will be populated in single_step_dabr_instruction(). 865aae8a53SK.Prasad */ 8774c68810SRavi Bangoria if (!single_step_pending()) 8874c68810SRavi Bangoria __set_breakpoint(i, info); 895aae8a53SK.Prasad 905aae8a53SK.Prasad return 0; 915aae8a53SK.Prasad } 925aae8a53SK.Prasad 935aae8a53SK.Prasad /* 945aae8a53SK.Prasad * Uninstall the breakpoint contained in the given counter. 955aae8a53SK.Prasad * 965aae8a53SK.Prasad * First we search the debug address register it uses and then we disable 975aae8a53SK.Prasad * it. 985aae8a53SK.Prasad * 995aae8a53SK.Prasad * Atomic: we hold the counter->ctx->lock and we only handle variables 1005aae8a53SK.Prasad * and registers local to this cpu. 1015aae8a53SK.Prasad */ 1025aae8a53SK.Prasad void arch_uninstall_hw_breakpoint(struct perf_event *bp) 1035aae8a53SK.Prasad { 10474c68810SRavi Bangoria struct arch_hw_breakpoint null_brk = {0}; 10574c68810SRavi Bangoria struct perf_event **slot; 10674c68810SRavi Bangoria int i; 1075aae8a53SK.Prasad 10874c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 10974c68810SRavi Bangoria slot = this_cpu_ptr(&bp_per_reg[i]); 11074c68810SRavi Bangoria if (*slot == bp) { 11174c68810SRavi Bangoria *slot = NULL; 11274c68810SRavi Bangoria break; 11374c68810SRavi Bangoria } 1145aae8a53SK.Prasad } 1155aae8a53SK.Prasad 11674c68810SRavi Bangoria if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 11774c68810SRavi Bangoria return; 11874c68810SRavi Bangoria 11974c68810SRavi Bangoria __set_breakpoint(i, &null_brk); 1205aae8a53SK.Prasad } 1215aae8a53SK.Prasad 122c9e82aebSRavi Bangoria static bool is_ptrace_bp(struct perf_event *bp) 123c9e82aebSRavi Bangoria { 124c9e82aebSRavi Bangoria return bp->overflow_handler == ptrace_triggered; 125c9e82aebSRavi Bangoria } 126c9e82aebSRavi Bangoria 127*29da4f91SRavi Bangoria struct breakpoint { 128*29da4f91SRavi Bangoria struct list_head list; 129*29da4f91SRavi Bangoria struct perf_event *bp; 130*29da4f91SRavi Bangoria bool ptrace_bp; 131*29da4f91SRavi Bangoria }; 132*29da4f91SRavi Bangoria 133*29da4f91SRavi Bangoria static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); 134*29da4f91SRavi Bangoria static LIST_HEAD(task_bps); 135*29da4f91SRavi Bangoria 136*29da4f91SRavi Bangoria static struct breakpoint *alloc_breakpoint(struct perf_event *bp) 137*29da4f91SRavi Bangoria { 138*29da4f91SRavi Bangoria struct breakpoint *tmp; 139*29da4f91SRavi Bangoria 140*29da4f91SRavi Bangoria tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 141*29da4f91SRavi Bangoria if (!tmp) 142*29da4f91SRavi Bangoria return ERR_PTR(-ENOMEM); 143*29da4f91SRavi Bangoria tmp->bp = bp; 144*29da4f91SRavi Bangoria tmp->ptrace_bp = is_ptrace_bp(bp); 145*29da4f91SRavi Bangoria return tmp; 146*29da4f91SRavi Bangoria } 147*29da4f91SRavi Bangoria 148*29da4f91SRavi Bangoria static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2) 149*29da4f91SRavi Bangoria { 150*29da4f91SRavi Bangoria __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr; 151*29da4f91SRavi Bangoria 152*29da4f91SRavi Bangoria bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE); 153*29da4f91SRavi Bangoria bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE); 154*29da4f91SRavi Bangoria bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE); 155*29da4f91SRavi Bangoria bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE); 156*29da4f91SRavi Bangoria 157*29da4f91SRavi Bangoria return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr); 158*29da4f91SRavi Bangoria } 159*29da4f91SRavi Bangoria 160*29da4f91SRavi Bangoria static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp) 161*29da4f91SRavi Bangoria { 162*29da4f91SRavi Bangoria return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp; 163*29da4f91SRavi Bangoria } 164*29da4f91SRavi Bangoria 165*29da4f91SRavi Bangoria static bool can_co_exist(struct breakpoint *b, struct perf_event *bp) 166*29da4f91SRavi Bangoria { 167*29da4f91SRavi Bangoria return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp)); 168*29da4f91SRavi Bangoria } 169*29da4f91SRavi Bangoria 170*29da4f91SRavi Bangoria static int task_bps_add(struct perf_event *bp) 171*29da4f91SRavi Bangoria { 172*29da4f91SRavi Bangoria struct breakpoint *tmp; 173*29da4f91SRavi Bangoria 174*29da4f91SRavi Bangoria tmp = alloc_breakpoint(bp); 175*29da4f91SRavi Bangoria if (IS_ERR(tmp)) 176*29da4f91SRavi Bangoria return PTR_ERR(tmp); 177*29da4f91SRavi Bangoria 178*29da4f91SRavi Bangoria list_add(&tmp->list, &task_bps); 179*29da4f91SRavi Bangoria return 0; 180*29da4f91SRavi Bangoria } 181*29da4f91SRavi Bangoria 182*29da4f91SRavi Bangoria static void task_bps_remove(struct perf_event *bp) 183*29da4f91SRavi Bangoria { 184*29da4f91SRavi Bangoria struct list_head *pos, *q; 185*29da4f91SRavi Bangoria 186*29da4f91SRavi Bangoria list_for_each_safe(pos, q, &task_bps) { 187*29da4f91SRavi Bangoria struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); 188*29da4f91SRavi Bangoria 189*29da4f91SRavi Bangoria if (tmp->bp == bp) { 190*29da4f91SRavi Bangoria list_del(&tmp->list); 191*29da4f91SRavi Bangoria kfree(tmp); 192*29da4f91SRavi Bangoria break; 193*29da4f91SRavi Bangoria } 194*29da4f91SRavi Bangoria } 195*29da4f91SRavi Bangoria } 196*29da4f91SRavi Bangoria 197*29da4f91SRavi Bangoria /* 198*29da4f91SRavi Bangoria * If any task has breakpoint from alternate infrastructure, 199*29da4f91SRavi Bangoria * return true. Otherwise return false. 200*29da4f91SRavi Bangoria */ 201*29da4f91SRavi Bangoria static bool all_task_bps_check(struct perf_event *bp) 202*29da4f91SRavi Bangoria { 203*29da4f91SRavi Bangoria struct breakpoint *tmp; 204*29da4f91SRavi Bangoria 205*29da4f91SRavi Bangoria list_for_each_entry(tmp, &task_bps, list) { 206*29da4f91SRavi Bangoria if (!can_co_exist(tmp, bp)) 207*29da4f91SRavi Bangoria return true; 208*29da4f91SRavi Bangoria } 209*29da4f91SRavi Bangoria return false; 210*29da4f91SRavi Bangoria } 211*29da4f91SRavi Bangoria 212*29da4f91SRavi Bangoria /* 213*29da4f91SRavi Bangoria * If same task has breakpoint from alternate infrastructure, 214*29da4f91SRavi Bangoria * return true. Otherwise return false. 215*29da4f91SRavi Bangoria */ 216*29da4f91SRavi Bangoria static bool same_task_bps_check(struct perf_event *bp) 217*29da4f91SRavi Bangoria { 218*29da4f91SRavi Bangoria struct breakpoint *tmp; 219*29da4f91SRavi Bangoria 220*29da4f91SRavi Bangoria list_for_each_entry(tmp, &task_bps, list) { 221*29da4f91SRavi Bangoria if (tmp->bp->hw.target == bp->hw.target && 222*29da4f91SRavi Bangoria !can_co_exist(tmp, bp)) 223*29da4f91SRavi Bangoria return true; 224*29da4f91SRavi Bangoria } 225*29da4f91SRavi Bangoria return false; 226*29da4f91SRavi Bangoria } 227*29da4f91SRavi Bangoria 228*29da4f91SRavi Bangoria static int cpu_bps_add(struct perf_event *bp) 229*29da4f91SRavi Bangoria { 230*29da4f91SRavi Bangoria struct breakpoint **cpu_bp; 231*29da4f91SRavi Bangoria struct breakpoint *tmp; 232*29da4f91SRavi Bangoria int i = 0; 233*29da4f91SRavi Bangoria 234*29da4f91SRavi Bangoria tmp = alloc_breakpoint(bp); 235*29da4f91SRavi Bangoria if (IS_ERR(tmp)) 236*29da4f91SRavi Bangoria return PTR_ERR(tmp); 237*29da4f91SRavi Bangoria 238*29da4f91SRavi Bangoria cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 239*29da4f91SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 240*29da4f91SRavi Bangoria if (!cpu_bp[i]) { 241*29da4f91SRavi Bangoria cpu_bp[i] = tmp; 242*29da4f91SRavi Bangoria break; 243*29da4f91SRavi Bangoria } 244*29da4f91SRavi Bangoria } 245*29da4f91SRavi Bangoria return 0; 246*29da4f91SRavi Bangoria } 247*29da4f91SRavi Bangoria 248*29da4f91SRavi Bangoria static void cpu_bps_remove(struct perf_event *bp) 249*29da4f91SRavi Bangoria { 250*29da4f91SRavi Bangoria struct breakpoint **cpu_bp; 251*29da4f91SRavi Bangoria int i = 0; 252*29da4f91SRavi Bangoria 253*29da4f91SRavi Bangoria cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 254*29da4f91SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 255*29da4f91SRavi Bangoria if (!cpu_bp[i]) 256*29da4f91SRavi Bangoria continue; 257*29da4f91SRavi Bangoria 258*29da4f91SRavi Bangoria if (cpu_bp[i]->bp == bp) { 259*29da4f91SRavi Bangoria kfree(cpu_bp[i]); 260*29da4f91SRavi Bangoria cpu_bp[i] = NULL; 261*29da4f91SRavi Bangoria break; 262*29da4f91SRavi Bangoria } 263*29da4f91SRavi Bangoria } 264*29da4f91SRavi Bangoria } 265*29da4f91SRavi Bangoria 266*29da4f91SRavi Bangoria static bool cpu_bps_check(int cpu, struct perf_event *bp) 267*29da4f91SRavi Bangoria { 268*29da4f91SRavi Bangoria struct breakpoint **cpu_bp; 269*29da4f91SRavi Bangoria int i; 270*29da4f91SRavi Bangoria 271*29da4f91SRavi Bangoria cpu_bp = per_cpu_ptr(cpu_bps, cpu); 272*29da4f91SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 273*29da4f91SRavi Bangoria if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) 274*29da4f91SRavi Bangoria return true; 275*29da4f91SRavi Bangoria } 276*29da4f91SRavi Bangoria return false; 277*29da4f91SRavi Bangoria } 278*29da4f91SRavi Bangoria 279*29da4f91SRavi Bangoria static bool all_cpu_bps_check(struct perf_event *bp) 280*29da4f91SRavi Bangoria { 281*29da4f91SRavi Bangoria int cpu; 282*29da4f91SRavi Bangoria 283*29da4f91SRavi Bangoria for_each_online_cpu(cpu) { 284*29da4f91SRavi Bangoria if (cpu_bps_check(cpu, bp)) 285*29da4f91SRavi Bangoria return true; 286*29da4f91SRavi Bangoria } 287*29da4f91SRavi Bangoria return false; 288*29da4f91SRavi Bangoria } 289*29da4f91SRavi Bangoria 290*29da4f91SRavi Bangoria /* 291*29da4f91SRavi Bangoria * We don't use any locks to serialize accesses to cpu_bps or task_bps 292*29da4f91SRavi Bangoria * because are already inside nr_bp_mutex. 293*29da4f91SRavi Bangoria */ 294*29da4f91SRavi Bangoria int arch_reserve_bp_slot(struct perf_event *bp) 295*29da4f91SRavi Bangoria { 296*29da4f91SRavi Bangoria int ret; 297*29da4f91SRavi Bangoria 298*29da4f91SRavi Bangoria /* ptrace breakpoint */ 299*29da4f91SRavi Bangoria if (is_ptrace_bp(bp)) { 300*29da4f91SRavi Bangoria if (all_cpu_bps_check(bp)) 301*29da4f91SRavi Bangoria return -ENOSPC; 302*29da4f91SRavi Bangoria 303*29da4f91SRavi Bangoria if (same_task_bps_check(bp)) 304*29da4f91SRavi Bangoria return -ENOSPC; 305*29da4f91SRavi Bangoria 306*29da4f91SRavi Bangoria return task_bps_add(bp); 307*29da4f91SRavi Bangoria } 308*29da4f91SRavi Bangoria 309*29da4f91SRavi Bangoria /* perf breakpoint */ 310*29da4f91SRavi Bangoria if (is_kernel_addr(bp->attr.bp_addr)) 311*29da4f91SRavi Bangoria return 0; 312*29da4f91SRavi Bangoria 313*29da4f91SRavi Bangoria if (bp->hw.target && bp->cpu == -1) { 314*29da4f91SRavi Bangoria if (same_task_bps_check(bp)) 315*29da4f91SRavi Bangoria return -ENOSPC; 316*29da4f91SRavi Bangoria 317*29da4f91SRavi Bangoria return task_bps_add(bp); 318*29da4f91SRavi Bangoria } else if (!bp->hw.target && bp->cpu != -1) { 319*29da4f91SRavi Bangoria if (all_task_bps_check(bp)) 320*29da4f91SRavi Bangoria return -ENOSPC; 321*29da4f91SRavi Bangoria 322*29da4f91SRavi Bangoria return cpu_bps_add(bp); 323*29da4f91SRavi Bangoria } 324*29da4f91SRavi Bangoria 325*29da4f91SRavi Bangoria if (same_task_bps_check(bp)) 326*29da4f91SRavi Bangoria return -ENOSPC; 327*29da4f91SRavi Bangoria 328*29da4f91SRavi Bangoria ret = cpu_bps_add(bp); 329*29da4f91SRavi Bangoria if (ret) 330*29da4f91SRavi Bangoria return ret; 331*29da4f91SRavi Bangoria ret = task_bps_add(bp); 332*29da4f91SRavi Bangoria if (ret) 333*29da4f91SRavi Bangoria cpu_bps_remove(bp); 334*29da4f91SRavi Bangoria 335*29da4f91SRavi Bangoria return ret; 336*29da4f91SRavi Bangoria } 337*29da4f91SRavi Bangoria 338*29da4f91SRavi Bangoria void arch_release_bp_slot(struct perf_event *bp) 339*29da4f91SRavi Bangoria { 340*29da4f91SRavi Bangoria if (!is_kernel_addr(bp->attr.bp_addr)) { 341*29da4f91SRavi Bangoria if (bp->hw.target) 342*29da4f91SRavi Bangoria task_bps_remove(bp); 343*29da4f91SRavi Bangoria if (bp->cpu != -1) 344*29da4f91SRavi Bangoria cpu_bps_remove(bp); 345*29da4f91SRavi Bangoria } 346*29da4f91SRavi Bangoria } 347*29da4f91SRavi Bangoria 3485aae8a53SK.Prasad /* 3495aae8a53SK.Prasad * Perform cleanup of arch-specific counters during unregistration 3505aae8a53SK.Prasad * of the perf-event 3515aae8a53SK.Prasad */ 3525aae8a53SK.Prasad void arch_unregister_hw_breakpoint(struct perf_event *bp) 3535aae8a53SK.Prasad { 3545aae8a53SK.Prasad /* 3555aae8a53SK.Prasad * If the breakpoint is unregistered between a hw_breakpoint_handler() 3565aae8a53SK.Prasad * and the single_step_dabr_instruction(), then cleanup the breakpoint 3575aae8a53SK.Prasad * restoration variables to prevent dangling pointers. 358fb822e60SRavi Bangoria * FIXME, this should not be using bp->ctx at all! Sayeth peterz. 3595aae8a53SK.Prasad */ 36074c68810SRavi Bangoria if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) { 36174c68810SRavi Bangoria int i; 36274c68810SRavi Bangoria 36374c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 36474c68810SRavi Bangoria if (bp->ctx->task->thread.last_hit_ubp[i] == bp) 36574c68810SRavi Bangoria bp->ctx->task->thread.last_hit_ubp[i] = NULL; 36674c68810SRavi Bangoria } 36774c68810SRavi Bangoria } 3685aae8a53SK.Prasad } 3695aae8a53SK.Prasad 3705aae8a53SK.Prasad /* 3715aae8a53SK.Prasad * Check for virtual address in kernel space. 3725aae8a53SK.Prasad */ 3738e983ff9SFrederic Weisbecker int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 3745aae8a53SK.Prasad { 3758e983ff9SFrederic Weisbecker return is_kernel_addr(hw->address); 3765aae8a53SK.Prasad } 3775aae8a53SK.Prasad 3785aae8a53SK.Prasad int arch_bp_generic_fields(int type, int *gen_bp_type) 3795aae8a53SK.Prasad { 3809422de3eSMichael Neuling *gen_bp_type = 0; 3819422de3eSMichael Neuling if (type & HW_BRK_TYPE_READ) 3829422de3eSMichael Neuling *gen_bp_type |= HW_BREAKPOINT_R; 3839422de3eSMichael Neuling if (type & HW_BRK_TYPE_WRITE) 3849422de3eSMichael Neuling *gen_bp_type |= HW_BREAKPOINT_W; 3859422de3eSMichael Neuling if (*gen_bp_type == 0) 3865aae8a53SK.Prasad return -EINVAL; 3875aae8a53SK.Prasad return 0; 3885aae8a53SK.Prasad } 3895aae8a53SK.Prasad 3905aae8a53SK.Prasad /* 391b57aeab8SRavi Bangoria * Watchpoint match range is always doubleword(8 bytes) aligned on 392b57aeab8SRavi Bangoria * powerpc. If the given range is crossing doubleword boundary, we 393b57aeab8SRavi Bangoria * need to increase the length such that next doubleword also get 394b57aeab8SRavi Bangoria * covered. Ex, 395b57aeab8SRavi Bangoria * 396b57aeab8SRavi Bangoria * address len = 6 bytes 397b57aeab8SRavi Bangoria * |=========. 398b57aeab8SRavi Bangoria * |------------v--|------v--------| 399b57aeab8SRavi Bangoria * | | | | | | | | | | | | | | | | | 400b57aeab8SRavi Bangoria * |---------------|---------------| 401b57aeab8SRavi Bangoria * <---8 bytes---> 402b57aeab8SRavi Bangoria * 403b57aeab8SRavi Bangoria * In this case, we should configure hw as: 404e68ef121SRavi Bangoria * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1) 405b57aeab8SRavi Bangoria * len = 16 bytes 406b57aeab8SRavi Bangoria * 407e68ef121SRavi Bangoria * @start_addr is inclusive but @end_addr is exclusive. 408b57aeab8SRavi Bangoria */ 409b57aeab8SRavi Bangoria static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) 410b57aeab8SRavi Bangoria { 411b57aeab8SRavi Bangoria u16 max_len = DABR_MAX_LEN; 412b57aeab8SRavi Bangoria u16 hw_len; 413b57aeab8SRavi Bangoria unsigned long start_addr, end_addr; 414b57aeab8SRavi Bangoria 415e68ef121SRavi Bangoria start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE); 416e68ef121SRavi Bangoria end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE); 417e68ef121SRavi Bangoria hw_len = end_addr - start_addr; 418b57aeab8SRavi Bangoria 419b57aeab8SRavi Bangoria if (dawr_enabled()) { 420b57aeab8SRavi Bangoria max_len = DAWR_MAX_LEN; 421b57aeab8SRavi Bangoria /* DAWR region can't cross 512 bytes boundary */ 422e68ef121SRavi Bangoria if (ALIGN(start_addr, SZ_512M) != ALIGN(end_addr - 1, SZ_512M)) 423b57aeab8SRavi Bangoria return -EINVAL; 42439413ae0SChristophe Leroy } else if (IS_ENABLED(CONFIG_PPC_8xx)) { 42539413ae0SChristophe Leroy /* 8xx can setup a range without limitation */ 42639413ae0SChristophe Leroy max_len = U16_MAX; 427b57aeab8SRavi Bangoria } 428b57aeab8SRavi Bangoria 429b57aeab8SRavi Bangoria if (hw_len > max_len) 430b57aeab8SRavi Bangoria return -EINVAL; 431b57aeab8SRavi Bangoria 432b57aeab8SRavi Bangoria hw->hw_len = hw_len; 433b57aeab8SRavi Bangoria return 0; 434b57aeab8SRavi Bangoria } 435b57aeab8SRavi Bangoria 436b57aeab8SRavi Bangoria /* 4375aae8a53SK.Prasad * Validate the arch-specific HW Breakpoint register settings 4385aae8a53SK.Prasad */ 4395d5176baSFrederic Weisbecker int hw_breakpoint_arch_parse(struct perf_event *bp, 4405d5176baSFrederic Weisbecker const struct perf_event_attr *attr, 4415d5176baSFrederic Weisbecker struct arch_hw_breakpoint *hw) 4425aae8a53SK.Prasad { 443b57aeab8SRavi Bangoria int ret = -EINVAL; 4445aae8a53SK.Prasad 445b57aeab8SRavi Bangoria if (!bp || !attr->bp_len) 4465aae8a53SK.Prasad return ret; 4475aae8a53SK.Prasad 4485d5176baSFrederic Weisbecker hw->type = HW_BRK_TYPE_TRANSLATE; 4495d5176baSFrederic Weisbecker if (attr->bp_type & HW_BREAKPOINT_R) 4505d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_READ; 4515d5176baSFrederic Weisbecker if (attr->bp_type & HW_BREAKPOINT_W) 4525d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_WRITE; 4535d5176baSFrederic Weisbecker if (hw->type == HW_BRK_TYPE_TRANSLATE) 4549422de3eSMichael Neuling /* must set alteast read or write */ 4555aae8a53SK.Prasad return ret; 4565d5176baSFrederic Weisbecker if (!attr->exclude_user) 4575d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_USER; 4585d5176baSFrederic Weisbecker if (!attr->exclude_kernel) 4595d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_KERNEL; 4605d5176baSFrederic Weisbecker if (!attr->exclude_hv) 4615d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_HYP; 4625d5176baSFrederic Weisbecker hw->address = attr->bp_addr; 4635d5176baSFrederic Weisbecker hw->len = attr->bp_len; 4645aae8a53SK.Prasad 46585ce9a5dSMichael Neuling if (!ppc_breakpoint_available()) 46685ce9a5dSMichael Neuling return -ENODEV; 467b57aeab8SRavi Bangoria 468b57aeab8SRavi Bangoria return hw_breakpoint_validate_len(hw); 4695aae8a53SK.Prasad } 4705aae8a53SK.Prasad 4715aae8a53SK.Prasad /* 47206532a67SK.Prasad * Restores the breakpoint on the debug registers. 47306532a67SK.Prasad * Invoke this function if it is known that the execution context is 47406532a67SK.Prasad * about to change to cause loss of MSR_SE settings. 47506532a67SK.Prasad */ 47606532a67SK.Prasad void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) 47706532a67SK.Prasad { 47806532a67SK.Prasad struct arch_hw_breakpoint *info; 47974c68810SRavi Bangoria int i; 48006532a67SK.Prasad 48174c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 48274c68810SRavi Bangoria if (unlikely(tsk->thread.last_hit_ubp[i])) 48374c68810SRavi Bangoria goto reset; 48474c68810SRavi Bangoria } 48506532a67SK.Prasad return; 48606532a67SK.Prasad 48774c68810SRavi Bangoria reset: 48806532a67SK.Prasad regs->msr &= ~MSR_SE; 48974c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 49074c68810SRavi Bangoria info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); 49174c68810SRavi Bangoria __set_breakpoint(i, info); 49274c68810SRavi Bangoria tsk->thread.last_hit_ubp[i] = NULL; 49374c68810SRavi Bangoria } 49406532a67SK.Prasad } 49506532a67SK.Prasad 49674c68810SRavi Bangoria static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info) 497bc01bdf6SRavi Bangoria { 49827985b2aSRavi Bangoria return ((info->address <= dar) && (dar - info->address < info->len)); 49927985b2aSRavi Bangoria } 500bc01bdf6SRavi Bangoria 50174c68810SRavi Bangoria static bool dar_user_range_overlaps(unsigned long dar, int size, 50227985b2aSRavi Bangoria struct arch_hw_breakpoint *info) 503658d029dSChristophe Leroy { 50474c68810SRavi Bangoria return ((dar < info->address + info->len) && 50574c68810SRavi Bangoria (dar + size > info->address)); 50674c68810SRavi Bangoria } 507bc01bdf6SRavi Bangoria 50874c68810SRavi Bangoria static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) 50974c68810SRavi Bangoria { 51074c68810SRavi Bangoria unsigned long hw_start_addr, hw_end_addr; 511bc01bdf6SRavi Bangoria 51274c68810SRavi Bangoria hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); 51374c68810SRavi Bangoria hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); 51427985b2aSRavi Bangoria 51574c68810SRavi Bangoria return ((hw_start_addr <= dar) && (hw_end_addr > dar)); 51674c68810SRavi Bangoria } 51774c68810SRavi Bangoria 51874c68810SRavi Bangoria static bool dar_hw_range_overlaps(unsigned long dar, int size, 51974c68810SRavi Bangoria struct arch_hw_breakpoint *info) 52074c68810SRavi Bangoria { 52174c68810SRavi Bangoria unsigned long hw_start_addr, hw_end_addr; 52274c68810SRavi Bangoria 52374c68810SRavi Bangoria hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); 52474c68810SRavi Bangoria hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); 52574c68810SRavi Bangoria 52674c68810SRavi Bangoria return ((dar < hw_end_addr) && (dar + size > hw_start_addr)); 527bc01bdf6SRavi Bangoria } 528658d029dSChristophe Leroy 52927985b2aSRavi Bangoria /* 53074c68810SRavi Bangoria * If hw has multiple DAWR registers, we also need to check all 53174c68810SRavi Bangoria * dawrx constraint bits to confirm this is _really_ a valid event. 53227985b2aSRavi Bangoria */ 53374c68810SRavi Bangoria static bool check_dawrx_constraints(struct pt_regs *regs, int type, 53474c68810SRavi Bangoria struct arch_hw_breakpoint *info) 53574c68810SRavi Bangoria { 53674c68810SRavi Bangoria if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) 53774c68810SRavi Bangoria return false; 53827985b2aSRavi Bangoria 53974c68810SRavi Bangoria if (OP_IS_STORE(type) && !(info->type & HW_BRK_TYPE_WRITE)) 54074c68810SRavi Bangoria return false; 54174c68810SRavi Bangoria 54274c68810SRavi Bangoria if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) 54374c68810SRavi Bangoria return false; 54474c68810SRavi Bangoria 54574c68810SRavi Bangoria if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER)) 54674c68810SRavi Bangoria return false; 54774c68810SRavi Bangoria 54874c68810SRavi Bangoria return true; 54974c68810SRavi Bangoria } 55074c68810SRavi Bangoria 55174c68810SRavi Bangoria /* 55274c68810SRavi Bangoria * Return true if the event is valid wrt dawr configuration, 55374c68810SRavi Bangoria * including extraneous exception. Otherwise return false. 55474c68810SRavi Bangoria */ 55574c68810SRavi Bangoria static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, 55674c68810SRavi Bangoria int type, int size, struct arch_hw_breakpoint *info) 55774c68810SRavi Bangoria { 55874c68810SRavi Bangoria bool in_user_range = dar_in_user_range(regs->dar, info); 55974c68810SRavi Bangoria bool dawrx_constraints; 56074c68810SRavi Bangoria 56174c68810SRavi Bangoria /* 56274c68810SRavi Bangoria * 8xx supports only one breakpoint and thus we can 56374c68810SRavi Bangoria * unconditionally return true. 56474c68810SRavi Bangoria */ 56574c68810SRavi Bangoria if (IS_ENABLED(CONFIG_PPC_8xx)) { 56674c68810SRavi Bangoria if (!in_user_range) 56774c68810SRavi Bangoria info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 56874c68810SRavi Bangoria return true; 56974c68810SRavi Bangoria } 57074c68810SRavi Bangoria 57174c68810SRavi Bangoria if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { 57274c68810SRavi Bangoria if (in_user_range) 57374c68810SRavi Bangoria return true; 57474c68810SRavi Bangoria 57574c68810SRavi Bangoria if (dar_in_hw_range(regs->dar, info)) { 57674c68810SRavi Bangoria info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 57774c68810SRavi Bangoria return true; 57874c68810SRavi Bangoria } 579658d029dSChristophe Leroy return false; 580658d029dSChristophe Leroy } 581658d029dSChristophe Leroy 58274c68810SRavi Bangoria dawrx_constraints = check_dawrx_constraints(regs, type, info); 583658d029dSChristophe Leroy 58474c68810SRavi Bangoria if (dar_user_range_overlaps(regs->dar, size, info)) 58574c68810SRavi Bangoria return dawrx_constraints; 58674c68810SRavi Bangoria 58774c68810SRavi Bangoria if (dar_hw_range_overlaps(regs->dar, size, info)) { 58874c68810SRavi Bangoria if (dawrx_constraints) { 58974c68810SRavi Bangoria info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 590bc01bdf6SRavi Bangoria return true; 59174c68810SRavi Bangoria } 59274c68810SRavi Bangoria } 59374c68810SRavi Bangoria return false; 59474c68810SRavi Bangoria } 595bc01bdf6SRavi Bangoria 59674c68810SRavi Bangoria static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, 59774c68810SRavi Bangoria int *type, int *size, bool *larx_stcx) 59874c68810SRavi Bangoria { 59974c68810SRavi Bangoria struct instruction_op op; 60074c68810SRavi Bangoria 60174c68810SRavi Bangoria if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) 60274c68810SRavi Bangoria return; 60374c68810SRavi Bangoria 60474c68810SRavi Bangoria analyse_instr(&op, regs, *instr); 60574c68810SRavi Bangoria 60674c68810SRavi Bangoria /* 60774c68810SRavi Bangoria * Set size = 8 if analyse_instr() fails. If it's a userspace 60874c68810SRavi Bangoria * watchpoint(valid or extraneous), we can notify user about it. 60974c68810SRavi Bangoria * If it's a kernel watchpoint, instruction emulation will fail 61074c68810SRavi Bangoria * in stepping_handler() and watchpoint will be disabled. 61174c68810SRavi Bangoria */ 61274c68810SRavi Bangoria *type = GETTYPE(op.type); 61374c68810SRavi Bangoria *size = !(*type == UNKNOWN) ? GETSIZE(op.type) : 8; 61474c68810SRavi Bangoria *larx_stcx = (*type == LARX || *type == STCX); 61574c68810SRavi Bangoria } 61674c68810SRavi Bangoria 617658d029dSChristophe Leroy /* 618bc01bdf6SRavi Bangoria * We've failed in reliably handling the hw-breakpoint. Unregister 619bc01bdf6SRavi Bangoria * it and throw a warning message to let the user know about it. 620658d029dSChristophe Leroy */ 62174c68810SRavi Bangoria static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info) 62274c68810SRavi Bangoria { 62374c68810SRavi Bangoria WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.", 62474c68810SRavi Bangoria info->address); 625658d029dSChristophe Leroy perf_event_disable_inatomic(bp); 62674c68810SRavi Bangoria } 62774c68810SRavi Bangoria 62874c68810SRavi Bangoria static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info) 62974c68810SRavi Bangoria { 63074c68810SRavi Bangoria printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n", 63174c68810SRavi Bangoria info->address); 63274c68810SRavi Bangoria perf_event_disable_inatomic(bp); 63374c68810SRavi Bangoria } 63474c68810SRavi Bangoria 63574c68810SRavi Bangoria static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, 63674c68810SRavi Bangoria struct arch_hw_breakpoint **info, int *hit, 63774c68810SRavi Bangoria struct ppc_inst instr) 63874c68810SRavi Bangoria { 63974c68810SRavi Bangoria int i; 64074c68810SRavi Bangoria int stepped; 64174c68810SRavi Bangoria 64274c68810SRavi Bangoria /* Do not emulate user-space instructions, instead single-step them */ 64374c68810SRavi Bangoria if (user_mode(regs)) { 64474c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 64574c68810SRavi Bangoria if (!hit[i]) 64674c68810SRavi Bangoria continue; 64774c68810SRavi Bangoria current->thread.last_hit_ubp[i] = bp[i]; 64874c68810SRavi Bangoria info[i] = NULL; 64974c68810SRavi Bangoria } 65074c68810SRavi Bangoria regs->msr |= MSR_SE; 651658d029dSChristophe Leroy return false; 652658d029dSChristophe Leroy } 653658d029dSChristophe Leroy 65474c68810SRavi Bangoria stepped = emulate_step(regs, instr); 65574c68810SRavi Bangoria if (!stepped) { 65674c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 65774c68810SRavi Bangoria if (!hit[i]) 65874c68810SRavi Bangoria continue; 65974c68810SRavi Bangoria handler_error(bp[i], info[i]); 66074c68810SRavi Bangoria info[i] = NULL; 66174c68810SRavi Bangoria } 66274c68810SRavi Bangoria return false; 66374c68810SRavi Bangoria } 66474c68810SRavi Bangoria return true; 66574c68810SRavi Bangoria } 66674c68810SRavi Bangoria 66703465f89SNicholas Piggin int hw_breakpoint_handler(struct die_args *args) 6685aae8a53SK.Prasad { 66974c68810SRavi Bangoria bool err = false; 6705aae8a53SK.Prasad int rc = NOTIFY_STOP; 67174c68810SRavi Bangoria struct perf_event *bp[HBP_NUM_MAX] = { NULL }; 6725aae8a53SK.Prasad struct pt_regs *regs = args->regs; 67374c68810SRavi Bangoria struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL }; 67474c68810SRavi Bangoria int i; 67574c68810SRavi Bangoria int hit[HBP_NUM_MAX] = {0}; 67674c68810SRavi Bangoria int nr_hit = 0; 67774c68810SRavi Bangoria bool ptrace_bp = false; 67874c68810SRavi Bangoria struct ppc_inst instr = ppc_inst(0); 67974c68810SRavi Bangoria int type = 0; 68074c68810SRavi Bangoria int size = 0; 68174c68810SRavi Bangoria bool larx_stcx = false; 6825aae8a53SK.Prasad 6835aae8a53SK.Prasad /* Disable breakpoints during exception handling */ 6849422de3eSMichael Neuling hw_breakpoint_disable(); 685574cb248SPaul Mackerras 6865aae8a53SK.Prasad /* 6875aae8a53SK.Prasad * The counter may be concurrently released but that can only 6885aae8a53SK.Prasad * occur from a call_rcu() path. We can then safely fetch 6895aae8a53SK.Prasad * the breakpoint, use its callback, touch its counter 6905aae8a53SK.Prasad * while we are in an rcu_read_lock() path. 6915aae8a53SK.Prasad */ 6925aae8a53SK.Prasad rcu_read_lock(); 6935aae8a53SK.Prasad 69474c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx)) 69574c68810SRavi Bangoria get_instr_detail(regs, &instr, &type, &size, &larx_stcx); 69674c68810SRavi Bangoria 69774c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 69874c68810SRavi Bangoria bp[i] = __this_cpu_read(bp_per_reg[i]); 69974c68810SRavi Bangoria if (!bp[i]) 70074c68810SRavi Bangoria continue; 70174c68810SRavi Bangoria 70274c68810SRavi Bangoria info[i] = counter_arch_bp(bp[i]); 70374c68810SRavi Bangoria info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; 70474c68810SRavi Bangoria 70574c68810SRavi Bangoria if (check_constraints(regs, instr, type, size, info[i])) { 70674c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx) && 70774c68810SRavi Bangoria ppc_inst_equal(instr, ppc_inst(0))) { 70874c68810SRavi Bangoria handler_error(bp[i], info[i]); 70974c68810SRavi Bangoria info[i] = NULL; 71074c68810SRavi Bangoria err = 1; 71174c68810SRavi Bangoria continue; 71274c68810SRavi Bangoria } 71374c68810SRavi Bangoria 71474c68810SRavi Bangoria if (is_ptrace_bp(bp[i])) 71574c68810SRavi Bangoria ptrace_bp = true; 71674c68810SRavi Bangoria hit[i] = 1; 71774c68810SRavi Bangoria nr_hit++; 71874c68810SRavi Bangoria } 71974c68810SRavi Bangoria } 72074c68810SRavi Bangoria 72174c68810SRavi Bangoria if (err) 72274c68810SRavi Bangoria goto reset; 72374c68810SRavi Bangoria 72474c68810SRavi Bangoria if (!nr_hit) { 725c21a493aSRavi Bangoria rc = NOTIFY_DONE; 7265aae8a53SK.Prasad goto out; 727c21a493aSRavi Bangoria } 7285aae8a53SK.Prasad 7295aae8a53SK.Prasad /* 7305aae8a53SK.Prasad * Return early after invoking user-callback function without restoring 7315aae8a53SK.Prasad * DABR if the breakpoint is from ptrace which always operates in 7325aae8a53SK.Prasad * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal 7335aae8a53SK.Prasad * generated in do_dabr(). 7345aae8a53SK.Prasad */ 73574c68810SRavi Bangoria if (ptrace_bp) { 73674c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 73774c68810SRavi Bangoria if (!hit[i]) 73874c68810SRavi Bangoria continue; 73974c68810SRavi Bangoria perf_bp_event(bp[i], regs); 74074c68810SRavi Bangoria info[i] = NULL; 74174c68810SRavi Bangoria } 7425aae8a53SK.Prasad rc = NOTIFY_DONE; 74374c68810SRavi Bangoria goto reset; 7445aae8a53SK.Prasad } 7455aae8a53SK.Prasad 74674c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx)) { 74774c68810SRavi Bangoria if (larx_stcx) { 74874c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 74974c68810SRavi Bangoria if (!hit[i]) 75074c68810SRavi Bangoria continue; 75174c68810SRavi Bangoria larx_stcx_err(bp[i], info[i]); 75274c68810SRavi Bangoria info[i] = NULL; 75374c68810SRavi Bangoria } 75474c68810SRavi Bangoria goto reset; 75574c68810SRavi Bangoria } 75674c68810SRavi Bangoria 75774c68810SRavi Bangoria if (!stepping_handler(regs, bp, info, hit, instr)) 75874c68810SRavi Bangoria goto reset; 759e08658a6SRavi Bangoria } 7605aae8a53SK.Prasad 7615aae8a53SK.Prasad /* 7625aae8a53SK.Prasad * As a policy, the callback is invoked in a 'trigger-after-execute' 7635aae8a53SK.Prasad * fashion 7645aae8a53SK.Prasad */ 76574c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 76674c68810SRavi Bangoria if (!hit[i]) 76774c68810SRavi Bangoria continue; 76874c68810SRavi Bangoria if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 76974c68810SRavi Bangoria perf_bp_event(bp[i], regs); 77074c68810SRavi Bangoria } 7715aae8a53SK.Prasad 77274c68810SRavi Bangoria reset: 77374c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 77474c68810SRavi Bangoria if (!info[i]) 77574c68810SRavi Bangoria continue; 77674c68810SRavi Bangoria __set_breakpoint(i, info[i]); 77774c68810SRavi Bangoria } 77874c68810SRavi Bangoria 7795aae8a53SK.Prasad out: 7805aae8a53SK.Prasad rcu_read_unlock(); 7815aae8a53SK.Prasad return rc; 7825aae8a53SK.Prasad } 78303465f89SNicholas Piggin NOKPROBE_SYMBOL(hw_breakpoint_handler); 7845aae8a53SK.Prasad 7855aae8a53SK.Prasad /* 7865aae8a53SK.Prasad * Handle single-step exceptions following a DABR hit. 7875aae8a53SK.Prasad */ 78803465f89SNicholas Piggin static int single_step_dabr_instruction(struct die_args *args) 7895aae8a53SK.Prasad { 7905aae8a53SK.Prasad struct pt_regs *regs = args->regs; 7915aae8a53SK.Prasad struct perf_event *bp = NULL; 7923f4693eeSMichael Neuling struct arch_hw_breakpoint *info; 79374c68810SRavi Bangoria int i; 79474c68810SRavi Bangoria bool found = false; 7955aae8a53SK.Prasad 7965aae8a53SK.Prasad /* 7975aae8a53SK.Prasad * Check if we are single-stepping as a result of a 7985aae8a53SK.Prasad * previous HW Breakpoint exception 7995aae8a53SK.Prasad */ 80074c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 80174c68810SRavi Bangoria bp = current->thread.last_hit_ubp[i]; 8025aae8a53SK.Prasad 80374c68810SRavi Bangoria if (!bp) 80474c68810SRavi Bangoria continue; 80574c68810SRavi Bangoria 80674c68810SRavi Bangoria found = true; 8073f4693eeSMichael Neuling info = counter_arch_bp(bp); 8085aae8a53SK.Prasad 8095aae8a53SK.Prasad /* 81074c68810SRavi Bangoria * We shall invoke the user-defined callback function in the 81174c68810SRavi Bangoria * single stepping handler to confirm to 'trigger-after-execute' 81274c68810SRavi Bangoria * semantics 8135aae8a53SK.Prasad */ 8149422de3eSMichael Neuling if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 8155aae8a53SK.Prasad perf_bp_event(bp, regs); 81674c68810SRavi Bangoria current->thread.last_hit_ubp[i] = NULL; 81774c68810SRavi Bangoria } 8185aae8a53SK.Prasad 81974c68810SRavi Bangoria if (!found) 82074c68810SRavi Bangoria return NOTIFY_DONE; 82174c68810SRavi Bangoria 82274c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 82374c68810SRavi Bangoria bp = __this_cpu_read(bp_per_reg[i]); 82474c68810SRavi Bangoria if (!bp) 82574c68810SRavi Bangoria continue; 82674c68810SRavi Bangoria 82774c68810SRavi Bangoria info = counter_arch_bp(bp); 82874c68810SRavi Bangoria __set_breakpoint(i, info); 82974c68810SRavi Bangoria } 83076b0f133SPaul Mackerras 83176b0f133SPaul Mackerras /* 83276b0f133SPaul Mackerras * If the process was being single-stepped by ptrace, let the 83376b0f133SPaul Mackerras * other single-step actions occur (e.g. generate SIGTRAP). 83476b0f133SPaul Mackerras */ 83576b0f133SPaul Mackerras if (test_thread_flag(TIF_SINGLESTEP)) 83676b0f133SPaul Mackerras return NOTIFY_DONE; 83776b0f133SPaul Mackerras 8385aae8a53SK.Prasad return NOTIFY_STOP; 8395aae8a53SK.Prasad } 84003465f89SNicholas Piggin NOKPROBE_SYMBOL(single_step_dabr_instruction); 8415aae8a53SK.Prasad 8425aae8a53SK.Prasad /* 8435aae8a53SK.Prasad * Handle debug exception notifications. 8445aae8a53SK.Prasad */ 84503465f89SNicholas Piggin int hw_breakpoint_exceptions_notify( 8465aae8a53SK.Prasad struct notifier_block *unused, unsigned long val, void *data) 8475aae8a53SK.Prasad { 8485aae8a53SK.Prasad int ret = NOTIFY_DONE; 8495aae8a53SK.Prasad 8505aae8a53SK.Prasad switch (val) { 8515aae8a53SK.Prasad case DIE_DABR_MATCH: 8525aae8a53SK.Prasad ret = hw_breakpoint_handler(data); 8535aae8a53SK.Prasad break; 8545aae8a53SK.Prasad case DIE_SSTEP: 8555aae8a53SK.Prasad ret = single_step_dabr_instruction(data); 8565aae8a53SK.Prasad break; 8575aae8a53SK.Prasad } 8585aae8a53SK.Prasad 8595aae8a53SK.Prasad return ret; 8605aae8a53SK.Prasad } 86103465f89SNicholas Piggin NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify); 8625aae8a53SK.Prasad 8635aae8a53SK.Prasad /* 8645aae8a53SK.Prasad * Release the user breakpoints used by ptrace 8655aae8a53SK.Prasad */ 8665aae8a53SK.Prasad void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 8675aae8a53SK.Prasad { 8686b424efaSRavi Bangoria int i; 8695aae8a53SK.Prasad struct thread_struct *t = &tsk->thread; 8705aae8a53SK.Prasad 8716b424efaSRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 8726b424efaSRavi Bangoria unregister_hw_breakpoint(t->ptrace_bps[i]); 8736b424efaSRavi Bangoria t->ptrace_bps[i] = NULL; 8746b424efaSRavi Bangoria } 8755aae8a53SK.Prasad } 8765aae8a53SK.Prasad 8775aae8a53SK.Prasad void hw_breakpoint_pmu_read(struct perf_event *bp) 8785aae8a53SK.Prasad { 8795aae8a53SK.Prasad /* TODO */ 8805aae8a53SK.Prasad } 881ccbed90bSChristophe Leroy 882ccbed90bSChristophe Leroy void ptrace_triggered(struct perf_event *bp, 883ccbed90bSChristophe Leroy struct perf_sample_data *data, struct pt_regs *regs) 884ccbed90bSChristophe Leroy { 885ccbed90bSChristophe Leroy struct perf_event_attr attr; 886ccbed90bSChristophe Leroy 887ccbed90bSChristophe Leroy /* 888ccbed90bSChristophe Leroy * Disable the breakpoint request here since ptrace has defined a 889ccbed90bSChristophe Leroy * one-shot behaviour for breakpoint exceptions in PPC64. 890ccbed90bSChristophe Leroy * The SIGTRAP signal is generated automatically for us in do_dabr(). 891ccbed90bSChristophe Leroy * We don't have to do anything about that here 892ccbed90bSChristophe Leroy */ 893ccbed90bSChristophe Leroy attr = bp->attr; 894ccbed90bSChristophe Leroy attr.disabled = true; 895ccbed90bSChristophe Leroy modify_user_hw_breakpoint(bp, &attr); 896ccbed90bSChristophe Leroy } 897