1 /* 2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 3 * using the CPU's debug registers. Derived from 4 * "arch/x86/kernel/hw_breakpoint.c" 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * 20 * Copyright 2010 IBM Corporation 21 * Author: K.Prasad <prasad@linux.vnet.ibm.com> 22 * 23 */ 24 25 #include <linux/hw_breakpoint.h> 26 #include <linux/notifier.h> 27 #include <linux/kprobes.h> 28 #include <linux/percpu.h> 29 #include <linux/kernel.h> 30 #include <linux/sched.h> 31 #include <linux/smp.h> 32 33 #include <asm/hw_breakpoint.h> 34 #include <asm/processor.h> 35 #include <asm/sstep.h> 36 #include <asm/uaccess.h> 37 38 /* 39 * Stores the breakpoints currently in use on each breakpoint address 40 * register for every cpu 41 */ 42 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg); 43 44 /* 45 * Returns total number of data or instruction breakpoints available. 46 */ 47 int hw_breakpoint_slots(int type) 48 { 49 if (type == TYPE_DATA) 50 return HBP_NUM; 51 return 0; /* no instruction breakpoints available */ 52 } 53 54 /* 55 * Install a perf counter breakpoint. 56 * 57 * We seek a free debug address register and use it for this 58 * breakpoint. 59 * 60 * Atomic: we hold the counter->ctx->lock and we only handle variables 61 * and registers local to this cpu. 62 */ 63 int arch_install_hw_breakpoint(struct perf_event *bp) 64 { 65 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 66 struct perf_event **slot = &__get_cpu_var(bp_per_reg); 67 68 *slot = bp; 69 70 /* 71 * Do not install DABR values if the instruction must be single-stepped. 72 * If so, DABR will be populated in single_step_dabr_instruction(). 73 */ 74 if (current->thread.last_hit_ubp != bp) 75 __set_breakpoint(info); 76 77 return 0; 78 } 79 80 /* 81 * Uninstall the breakpoint contained in the given counter. 82 * 83 * First we search the debug address register it uses and then we disable 84 * it. 85 * 86 * Atomic: we hold the counter->ctx->lock and we only handle variables 87 * and registers local to this cpu. 88 */ 89 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 90 { 91 struct perf_event **slot = &__get_cpu_var(bp_per_reg); 92 93 if (*slot != bp) { 94 WARN_ONCE(1, "Can't find the breakpoint"); 95 return; 96 } 97 98 *slot = NULL; 99 hw_breakpoint_disable(); 100 } 101 102 /* 103 * Perform cleanup of arch-specific counters during unregistration 104 * of the perf-event 105 */ 106 void arch_unregister_hw_breakpoint(struct perf_event *bp) 107 { 108 /* 109 * If the breakpoint is unregistered between a hw_breakpoint_handler() 110 * and the single_step_dabr_instruction(), then cleanup the breakpoint 111 * restoration variables to prevent dangling pointers. 112 */ 113 if (bp->ctx && bp->ctx->task) 114 bp->ctx->task->thread.last_hit_ubp = NULL; 115 } 116 117 /* 118 * Check for virtual address in kernel space. 119 */ 120 int arch_check_bp_in_kernelspace(struct perf_event *bp) 121 { 122 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 123 124 return is_kernel_addr(info->address); 125 } 126 127 int arch_bp_generic_fields(int type, int *gen_bp_type) 128 { 129 *gen_bp_type = 0; 130 if (type & HW_BRK_TYPE_READ) 131 *gen_bp_type |= HW_BREAKPOINT_R; 132 if (type & HW_BRK_TYPE_WRITE) 133 *gen_bp_type |= HW_BREAKPOINT_W; 134 if (*gen_bp_type == 0) 135 return -EINVAL; 136 return 0; 137 } 138 139 /* 140 * Validate the arch-specific HW Breakpoint register settings 141 */ 142 int arch_validate_hwbkpt_settings(struct perf_event *bp) 143 { 144 int ret = -EINVAL, length_max; 145 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 146 147 if (!bp) 148 return ret; 149 150 info->type = HW_BRK_TYPE_TRANSLATE; 151 if (bp->attr.bp_type & HW_BREAKPOINT_R) 152 info->type |= HW_BRK_TYPE_READ; 153 if (bp->attr.bp_type & HW_BREAKPOINT_W) 154 info->type |= HW_BRK_TYPE_WRITE; 155 if (info->type == HW_BRK_TYPE_TRANSLATE) 156 /* must set alteast read or write */ 157 return ret; 158 if (!(bp->attr.exclude_user)) 159 info->type |= HW_BRK_TYPE_USER; 160 if (!(bp->attr.exclude_kernel)) 161 info->type |= HW_BRK_TYPE_KERNEL; 162 if (!(bp->attr.exclude_hv)) 163 info->type |= HW_BRK_TYPE_HYP; 164 info->address = bp->attr.bp_addr; 165 info->len = bp->attr.bp_len; 166 167 /* 168 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) 169 * and breakpoint addresses are aligned to nearest double-word 170 * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the 171 * 'symbolsize' should satisfy the check below. 172 */ 173 length_max = 8; /* DABR */ 174 if (cpu_has_feature(CPU_FTR_DAWR)) { 175 length_max = 512 ; /* 64 doublewords */ 176 /* DAWR region can't cross 512 boundary */ 177 if ((bp->attr.bp_addr >> 10) != 178 ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) 179 return -EINVAL; 180 } 181 if (info->len > 182 (length_max - (info->address & HW_BREAKPOINT_ALIGN))) 183 return -EINVAL; 184 return 0; 185 } 186 187 /* 188 * Restores the breakpoint on the debug registers. 189 * Invoke this function if it is known that the execution context is 190 * about to change to cause loss of MSR_SE settings. 191 */ 192 void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) 193 { 194 struct arch_hw_breakpoint *info; 195 196 if (likely(!tsk->thread.last_hit_ubp)) 197 return; 198 199 info = counter_arch_bp(tsk->thread.last_hit_ubp); 200 regs->msr &= ~MSR_SE; 201 __set_breakpoint(info); 202 tsk->thread.last_hit_ubp = NULL; 203 } 204 205 /* 206 * Handle debug exception notifications. 207 */ 208 int __kprobes hw_breakpoint_handler(struct die_args *args) 209 { 210 int rc = NOTIFY_STOP; 211 struct perf_event *bp; 212 struct pt_regs *regs = args->regs; 213 int stepped = 1; 214 struct arch_hw_breakpoint *info; 215 unsigned int instr; 216 unsigned long dar = regs->dar; 217 218 /* Disable breakpoints during exception handling */ 219 hw_breakpoint_disable(); 220 221 /* 222 * The counter may be concurrently released but that can only 223 * occur from a call_rcu() path. We can then safely fetch 224 * the breakpoint, use its callback, touch its counter 225 * while we are in an rcu_read_lock() path. 226 */ 227 rcu_read_lock(); 228 229 bp = __get_cpu_var(bp_per_reg); 230 if (!bp) 231 goto out; 232 info = counter_arch_bp(bp); 233 234 /* 235 * Return early after invoking user-callback function without restoring 236 * DABR if the breakpoint is from ptrace which always operates in 237 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal 238 * generated in do_dabr(). 239 */ 240 if (bp->overflow_handler == ptrace_triggered) { 241 perf_bp_event(bp, regs); 242 rc = NOTIFY_DONE; 243 goto out; 244 } 245 246 /* 247 * Verify if dar lies within the address range occupied by the symbol 248 * being watched to filter extraneous exceptions. If it doesn't, 249 * we still need to single-step the instruction, but we don't 250 * generate an event. 251 */ 252 info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; 253 if (!((bp->attr.bp_addr <= dar) && 254 (dar - bp->attr.bp_addr < bp->attr.bp_len))) 255 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 256 257 /* Do not emulate user-space instructions, instead single-step them */ 258 if (user_mode(regs)) { 259 current->thread.last_hit_ubp = bp; 260 regs->msr |= MSR_SE; 261 goto out; 262 } 263 264 stepped = 0; 265 instr = 0; 266 if (!__get_user_inatomic(instr, (unsigned int *) regs->nip)) 267 stepped = emulate_step(regs, instr); 268 269 /* 270 * emulate_step() could not execute it. We've failed in reliably 271 * handling the hw-breakpoint. Unregister it and throw a warning 272 * message to let the user know about it. 273 */ 274 if (!stepped) { 275 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " 276 "0x%lx will be disabled.", info->address); 277 perf_event_disable(bp); 278 goto out; 279 } 280 /* 281 * As a policy, the callback is invoked in a 'trigger-after-execute' 282 * fashion 283 */ 284 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 285 perf_bp_event(bp, regs); 286 287 __set_breakpoint(info); 288 out: 289 rcu_read_unlock(); 290 return rc; 291 } 292 293 /* 294 * Handle single-step exceptions following a DABR hit. 295 */ 296 int __kprobes single_step_dabr_instruction(struct die_args *args) 297 { 298 struct pt_regs *regs = args->regs; 299 struct perf_event *bp = NULL; 300 struct arch_hw_breakpoint *info; 301 302 bp = current->thread.last_hit_ubp; 303 /* 304 * Check if we are single-stepping as a result of a 305 * previous HW Breakpoint exception 306 */ 307 if (!bp) 308 return NOTIFY_DONE; 309 310 info = counter_arch_bp(bp); 311 312 /* 313 * We shall invoke the user-defined callback function in the single 314 * stepping handler to confirm to 'trigger-after-execute' semantics 315 */ 316 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 317 perf_bp_event(bp, regs); 318 319 __set_breakpoint(info); 320 current->thread.last_hit_ubp = NULL; 321 322 /* 323 * If the process was being single-stepped by ptrace, let the 324 * other single-step actions occur (e.g. generate SIGTRAP). 325 */ 326 if (test_thread_flag(TIF_SINGLESTEP)) 327 return NOTIFY_DONE; 328 329 return NOTIFY_STOP; 330 } 331 332 /* 333 * Handle debug exception notifications. 334 */ 335 int __kprobes hw_breakpoint_exceptions_notify( 336 struct notifier_block *unused, unsigned long val, void *data) 337 { 338 int ret = NOTIFY_DONE; 339 340 switch (val) { 341 case DIE_DABR_MATCH: 342 ret = hw_breakpoint_handler(data); 343 break; 344 case DIE_SSTEP: 345 ret = single_step_dabr_instruction(data); 346 break; 347 } 348 349 return ret; 350 } 351 352 /* 353 * Release the user breakpoints used by ptrace 354 */ 355 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 356 { 357 struct thread_struct *t = &tsk->thread; 358 359 unregister_hw_breakpoint(t->ptrace_bps[0]); 360 t->ptrace_bps[0] = NULL; 361 } 362 363 void hw_breakpoint_pmu_read(struct perf_event *bp) 364 { 365 /* TODO */ 366 } 367