1 /* 2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 3 * using the CPU's debug registers. Derived from 4 * "arch/x86/kernel/hw_breakpoint.c" 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * 20 * Copyright 2010 IBM Corporation 21 * Author: K.Prasad <prasad@linux.vnet.ibm.com> 22 * 23 */ 24 25 #include <linux/hw_breakpoint.h> 26 #include <linux/notifier.h> 27 #include <linux/kprobes.h> 28 #include <linux/percpu.h> 29 #include <linux/kernel.h> 30 #include <linux/sched.h> 31 #include <linux/smp.h> 32 33 #include <asm/hw_breakpoint.h> 34 #include <asm/processor.h> 35 #include <asm/sstep.h> 36 #include <asm/uaccess.h> 37 38 /* 39 * Stores the breakpoints currently in use on each breakpoint address 40 * register for every cpu 41 */ 42 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg); 43 44 /* 45 * Returns total number of data or instruction breakpoints available. 46 */ 47 int hw_breakpoint_slots(int type) 48 { 49 if (type == TYPE_DATA) 50 return HBP_NUM; 51 return 0; /* no instruction breakpoints available */ 52 } 53 54 /* 55 * Install a perf counter breakpoint. 56 * 57 * We seek a free debug address register and use it for this 58 * breakpoint. 59 * 60 * Atomic: we hold the counter->ctx->lock and we only handle variables 61 * and registers local to this cpu. 62 */ 63 int arch_install_hw_breakpoint(struct perf_event *bp) 64 { 65 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 66 struct perf_event **slot = this_cpu_ptr(&bp_per_reg); 67 68 *slot = bp; 69 70 /* 71 * Do not install DABR values if the instruction must be single-stepped. 72 * If so, DABR will be populated in single_step_dabr_instruction(). 73 */ 74 if (current->thread.last_hit_ubp != bp) 75 __set_breakpoint(info); 76 77 return 0; 78 } 79 80 /* 81 * Uninstall the breakpoint contained in the given counter. 82 * 83 * First we search the debug address register it uses and then we disable 84 * it. 85 * 86 * Atomic: we hold the counter->ctx->lock and we only handle variables 87 * and registers local to this cpu. 88 */ 89 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 90 { 91 struct perf_event **slot = this_cpu_ptr(&bp_per_reg); 92 93 if (*slot != bp) { 94 WARN_ONCE(1, "Can't find the breakpoint"); 95 return; 96 } 97 98 *slot = NULL; 99 hw_breakpoint_disable(); 100 } 101 102 /* 103 * Perform cleanup of arch-specific counters during unregistration 104 * of the perf-event 105 */ 106 void arch_unregister_hw_breakpoint(struct perf_event *bp) 107 { 108 /* 109 * If the breakpoint is unregistered between a hw_breakpoint_handler() 110 * and the single_step_dabr_instruction(), then cleanup the breakpoint 111 * restoration variables to prevent dangling pointers. 112 * FIXME, this should not be using bp->ctx at all! Sayeth peterz. 113 */ 114 if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) 115 bp->ctx->task->thread.last_hit_ubp = NULL; 116 } 117 118 /* 119 * Check for virtual address in kernel space. 120 */ 121 int arch_check_bp_in_kernelspace(struct perf_event *bp) 122 { 123 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 124 125 return is_kernel_addr(info->address); 126 } 127 128 int arch_bp_generic_fields(int type, int *gen_bp_type) 129 { 130 *gen_bp_type = 0; 131 if (type & HW_BRK_TYPE_READ) 132 *gen_bp_type |= HW_BREAKPOINT_R; 133 if (type & HW_BRK_TYPE_WRITE) 134 *gen_bp_type |= HW_BREAKPOINT_W; 135 if (*gen_bp_type == 0) 136 return -EINVAL; 137 return 0; 138 } 139 140 /* 141 * Validate the arch-specific HW Breakpoint register settings 142 */ 143 int arch_validate_hwbkpt_settings(struct perf_event *bp) 144 { 145 int ret = -EINVAL, length_max; 146 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 147 148 if (!bp) 149 return ret; 150 151 info->type = HW_BRK_TYPE_TRANSLATE; 152 if (bp->attr.bp_type & HW_BREAKPOINT_R) 153 info->type |= HW_BRK_TYPE_READ; 154 if (bp->attr.bp_type & HW_BREAKPOINT_W) 155 info->type |= HW_BRK_TYPE_WRITE; 156 if (info->type == HW_BRK_TYPE_TRANSLATE) 157 /* must set alteast read or write */ 158 return ret; 159 if (!(bp->attr.exclude_user)) 160 info->type |= HW_BRK_TYPE_USER; 161 if (!(bp->attr.exclude_kernel)) 162 info->type |= HW_BRK_TYPE_KERNEL; 163 if (!(bp->attr.exclude_hv)) 164 info->type |= HW_BRK_TYPE_HYP; 165 info->address = bp->attr.bp_addr; 166 info->len = bp->attr.bp_len; 167 168 /* 169 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) 170 * and breakpoint addresses are aligned to nearest double-word 171 * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the 172 * 'symbolsize' should satisfy the check below. 173 */ 174 length_max = 8; /* DABR */ 175 if (cpu_has_feature(CPU_FTR_DAWR)) { 176 length_max = 512 ; /* 64 doublewords */ 177 /* DAWR region can't cross 512 boundary */ 178 if ((bp->attr.bp_addr >> 10) != 179 ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) 180 return -EINVAL; 181 } 182 if (info->len > 183 (length_max - (info->address & HW_BREAKPOINT_ALIGN))) 184 return -EINVAL; 185 return 0; 186 } 187 188 /* 189 * Restores the breakpoint on the debug registers. 190 * Invoke this function if it is known that the execution context is 191 * about to change to cause loss of MSR_SE settings. 192 */ 193 void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) 194 { 195 struct arch_hw_breakpoint *info; 196 197 if (likely(!tsk->thread.last_hit_ubp)) 198 return; 199 200 info = counter_arch_bp(tsk->thread.last_hit_ubp); 201 regs->msr &= ~MSR_SE; 202 __set_breakpoint(info); 203 tsk->thread.last_hit_ubp = NULL; 204 } 205 206 /* 207 * Handle debug exception notifications. 208 */ 209 int __kprobes hw_breakpoint_handler(struct die_args *args) 210 { 211 int rc = NOTIFY_STOP; 212 struct perf_event *bp; 213 struct pt_regs *regs = args->regs; 214 int stepped = 1; 215 struct arch_hw_breakpoint *info; 216 unsigned int instr; 217 unsigned long dar = regs->dar; 218 219 /* Disable breakpoints during exception handling */ 220 hw_breakpoint_disable(); 221 222 /* 223 * The counter may be concurrently released but that can only 224 * occur from a call_rcu() path. We can then safely fetch 225 * the breakpoint, use its callback, touch its counter 226 * while we are in an rcu_read_lock() path. 227 */ 228 rcu_read_lock(); 229 230 bp = __this_cpu_read(bp_per_reg); 231 if (!bp) 232 goto out; 233 info = counter_arch_bp(bp); 234 235 /* 236 * Return early after invoking user-callback function without restoring 237 * DABR if the breakpoint is from ptrace which always operates in 238 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal 239 * generated in do_dabr(). 240 */ 241 if (bp->overflow_handler == ptrace_triggered) { 242 perf_bp_event(bp, regs); 243 rc = NOTIFY_DONE; 244 goto out; 245 } 246 247 /* 248 * Verify if dar lies within the address range occupied by the symbol 249 * being watched to filter extraneous exceptions. If it doesn't, 250 * we still need to single-step the instruction, but we don't 251 * generate an event. 252 */ 253 info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; 254 if (!((bp->attr.bp_addr <= dar) && 255 (dar - bp->attr.bp_addr < bp->attr.bp_len))) 256 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 257 258 /* Do not emulate user-space instructions, instead single-step them */ 259 if (user_mode(regs)) { 260 current->thread.last_hit_ubp = bp; 261 regs->msr |= MSR_SE; 262 goto out; 263 } 264 265 stepped = 0; 266 instr = 0; 267 if (!__get_user_inatomic(instr, (unsigned int *) regs->nip)) 268 stepped = emulate_step(regs, instr); 269 270 /* 271 * emulate_step() could not execute it. We've failed in reliably 272 * handling the hw-breakpoint. Unregister it and throw a warning 273 * message to let the user know about it. 274 */ 275 if (!stepped) { 276 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " 277 "0x%lx will be disabled.", info->address); 278 perf_event_disable(bp); 279 goto out; 280 } 281 /* 282 * As a policy, the callback is invoked in a 'trigger-after-execute' 283 * fashion 284 */ 285 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 286 perf_bp_event(bp, regs); 287 288 __set_breakpoint(info); 289 out: 290 rcu_read_unlock(); 291 return rc; 292 } 293 294 /* 295 * Handle single-step exceptions following a DABR hit. 296 */ 297 static int __kprobes single_step_dabr_instruction(struct die_args *args) 298 { 299 struct pt_regs *regs = args->regs; 300 struct perf_event *bp = NULL; 301 struct arch_hw_breakpoint *info; 302 303 bp = current->thread.last_hit_ubp; 304 /* 305 * Check if we are single-stepping as a result of a 306 * previous HW Breakpoint exception 307 */ 308 if (!bp) 309 return NOTIFY_DONE; 310 311 info = counter_arch_bp(bp); 312 313 /* 314 * We shall invoke the user-defined callback function in the single 315 * stepping handler to confirm to 'trigger-after-execute' semantics 316 */ 317 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 318 perf_bp_event(bp, regs); 319 320 __set_breakpoint(info); 321 current->thread.last_hit_ubp = NULL; 322 323 /* 324 * If the process was being single-stepped by ptrace, let the 325 * other single-step actions occur (e.g. generate SIGTRAP). 326 */ 327 if (test_thread_flag(TIF_SINGLESTEP)) 328 return NOTIFY_DONE; 329 330 return NOTIFY_STOP; 331 } 332 333 /* 334 * Handle debug exception notifications. 335 */ 336 int __kprobes hw_breakpoint_exceptions_notify( 337 struct notifier_block *unused, unsigned long val, void *data) 338 { 339 int ret = NOTIFY_DONE; 340 341 switch (val) { 342 case DIE_DABR_MATCH: 343 ret = hw_breakpoint_handler(data); 344 break; 345 case DIE_SSTEP: 346 ret = single_step_dabr_instruction(data); 347 break; 348 } 349 350 return ret; 351 } 352 353 /* 354 * Release the user breakpoints used by ptrace 355 */ 356 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 357 { 358 struct thread_struct *t = &tsk->thread; 359 360 unregister_hw_breakpoint(t->ptrace_bps[0]); 361 t->ptrace_bps[0] = NULL; 362 } 363 364 void hw_breakpoint_pmu_read(struct perf_event *bp) 365 { 366 /* TODO */ 367 } 368