xref: /openbmc/linux/arch/x86/kernel/hw_breakpoint.c (revision 2c31b795)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15  *
16  * Copyright (C) 2007 Alan Stern
17  * Copyright (C) 2009 IBM Corporation
18  * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com>
19  *
20  * Authors: Alan Stern <stern@rowland.harvard.edu>
21  *          K.Prasad <prasad@linux.vnet.ibm.com>
22  *          Frederic Weisbecker <fweisbec@gmail.com>
23  */
24 
25 /*
26  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
27  * using the CPU's debug registers.
28  */
29 
30 #include <linux/perf_event.h>
31 #include <linux/hw_breakpoint.h>
32 #include <linux/irqflags.h>
33 #include <linux/notifier.h>
34 #include <linux/kallsyms.h>
35 #include <linux/kprobes.h>
36 #include <linux/percpu.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/sched.h>
41 #include <linux/init.h>
42 #include <linux/smp.h>
43 
44 #include <asm/hw_breakpoint.h>
45 #include <asm/processor.h>
46 #include <asm/debugreg.h>
47 
48 /* Per cpu debug control register value */
49 DEFINE_PER_CPU(unsigned long, cpu_dr7);
50 EXPORT_PER_CPU_SYMBOL(cpu_dr7);
51 
52 /* Per cpu debug address registers values */
53 static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
54 
55 /*
56  * Stores the breakpoints currently in use on each breakpoint address
57  * register for each cpus
58  */
59 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
60 
61 
62 static inline unsigned long
63 __encode_dr7(int drnum, unsigned int len, unsigned int type)
64 {
65 	unsigned long bp_info;
66 
67 	bp_info = (len | type) & 0xf;
68 	bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
69 	bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
70 
71 	return bp_info;
72 }
73 
74 /*
75  * Encode the length, type, Exact, and Enable bits for a particular breakpoint
76  * as stored in debug register 7.
77  */
78 unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
79 {
80 	return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN;
81 }
82 
83 /*
84  * Decode the length and type bits for a particular breakpoint as
85  * stored in debug register 7.  Return the "enabled" status.
86  */
87 int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
88 {
89 	int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
90 
91 	*len = (bp_info & 0xc) | 0x40;
92 	*type = (bp_info & 0x3) | 0x80;
93 
94 	return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
95 }
96 
97 /*
98  * Install a perf counter breakpoint.
99  *
100  * We seek a free debug address register and use it for this
101  * breakpoint. Eventually we enable it in the debug control register.
102  *
103  * Atomic: we hold the counter->ctx->lock and we only handle variables
104  * and registers local to this cpu.
105  */
106 int arch_install_hw_breakpoint(struct perf_event *bp)
107 {
108 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
109 	unsigned long *dr7;
110 	int i;
111 
112 	for (i = 0; i < HBP_NUM; i++) {
113 		struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
114 
115 		if (!*slot) {
116 			*slot = bp;
117 			break;
118 		}
119 	}
120 
121 	if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
122 		return -EBUSY;
123 
124 	set_debugreg(info->address, i);
125 	__get_cpu_var(cpu_debugreg[i]) = info->address;
126 
127 	dr7 = &__get_cpu_var(cpu_dr7);
128 	*dr7 |= encode_dr7(i, info->len, info->type);
129 
130 	set_debugreg(*dr7, 7);
131 
132 	return 0;
133 }
134 
135 /*
136  * Uninstall the breakpoint contained in the given counter.
137  *
138  * First we search the debug address register it uses and then we disable
139  * it.
140  *
141  * Atomic: we hold the counter->ctx->lock and we only handle variables
142  * and registers local to this cpu.
143  */
144 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
145 {
146 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
147 	unsigned long *dr7;
148 	int i;
149 
150 	for (i = 0; i < HBP_NUM; i++) {
151 		struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
152 
153 		if (*slot == bp) {
154 			*slot = NULL;
155 			break;
156 		}
157 	}
158 
159 	if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
160 		return;
161 
162 	dr7 = &__get_cpu_var(cpu_dr7);
163 	*dr7 &= ~__encode_dr7(i, info->len, info->type);
164 
165 	set_debugreg(*dr7, 7);
166 }
167 
168 static int get_hbp_len(u8 hbp_len)
169 {
170 	unsigned int len_in_bytes = 0;
171 
172 	switch (hbp_len) {
173 	case X86_BREAKPOINT_LEN_1:
174 		len_in_bytes = 1;
175 		break;
176 	case X86_BREAKPOINT_LEN_2:
177 		len_in_bytes = 2;
178 		break;
179 	case X86_BREAKPOINT_LEN_4:
180 		len_in_bytes = 4;
181 		break;
182 #ifdef CONFIG_X86_64
183 	case X86_BREAKPOINT_LEN_8:
184 		len_in_bytes = 8;
185 		break;
186 #endif
187 	}
188 	return len_in_bytes;
189 }
190 
191 /*
192  * Check for virtual address in user space.
193  */
194 int arch_check_va_in_userspace(unsigned long va, u8 hbp_len)
195 {
196 	unsigned int len;
197 
198 	len = get_hbp_len(hbp_len);
199 
200 	return (va <= TASK_SIZE - len);
201 }
202 
203 /*
204  * Check for virtual address in kernel space.
205  */
206 static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
207 {
208 	unsigned int len;
209 
210 	len = get_hbp_len(hbp_len);
211 
212 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
213 }
214 
215 /*
216  * Store a breakpoint's encoded address, length, and type.
217  */
218 static int arch_store_info(struct perf_event *bp)
219 {
220 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
221 	/*
222 	 * For kernel-addresses, either the address or symbol name can be
223 	 * specified.
224 	 */
225 	if (info->name)
226 		info->address = (unsigned long)
227 				kallsyms_lookup_name(info->name);
228 	if (info->address)
229 		return 0;
230 
231 	return -EINVAL;
232 }
233 
234 int arch_bp_generic_fields(int x86_len, int x86_type,
235 			   int *gen_len, int *gen_type)
236 {
237 	/* Len */
238 	switch (x86_len) {
239 	case X86_BREAKPOINT_LEN_1:
240 		*gen_len = HW_BREAKPOINT_LEN_1;
241 		break;
242 	case X86_BREAKPOINT_LEN_2:
243 		*gen_len = HW_BREAKPOINT_LEN_2;
244 		break;
245 	case X86_BREAKPOINT_LEN_4:
246 		*gen_len = HW_BREAKPOINT_LEN_4;
247 		break;
248 #ifdef CONFIG_X86_64
249 	case X86_BREAKPOINT_LEN_8:
250 		*gen_len = HW_BREAKPOINT_LEN_8;
251 		break;
252 #endif
253 	default:
254 		return -EINVAL;
255 	}
256 
257 	/* Type */
258 	switch (x86_type) {
259 	case X86_BREAKPOINT_EXECUTE:
260 		*gen_type = HW_BREAKPOINT_X;
261 		break;
262 	case X86_BREAKPOINT_WRITE:
263 		*gen_type = HW_BREAKPOINT_W;
264 		break;
265 	case X86_BREAKPOINT_RW:
266 		*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
267 		break;
268 	default:
269 		return -EINVAL;
270 	}
271 
272 	return 0;
273 }
274 
275 
276 static int arch_build_bp_info(struct perf_event *bp)
277 {
278 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
279 
280 	info->address = bp->attr.bp_addr;
281 
282 	/* Len */
283 	switch (bp->attr.bp_len) {
284 	case HW_BREAKPOINT_LEN_1:
285 		info->len = X86_BREAKPOINT_LEN_1;
286 		break;
287 	case HW_BREAKPOINT_LEN_2:
288 		info->len = X86_BREAKPOINT_LEN_2;
289 		break;
290 	case HW_BREAKPOINT_LEN_4:
291 		info->len = X86_BREAKPOINT_LEN_4;
292 		break;
293 #ifdef CONFIG_X86_64
294 	case HW_BREAKPOINT_LEN_8:
295 		info->len = X86_BREAKPOINT_LEN_8;
296 		break;
297 #endif
298 	default:
299 		return -EINVAL;
300 	}
301 
302 	/* Type */
303 	switch (bp->attr.bp_type) {
304 	case HW_BREAKPOINT_W:
305 		info->type = X86_BREAKPOINT_WRITE;
306 		break;
307 	case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
308 		info->type = X86_BREAKPOINT_RW;
309 		break;
310 	case HW_BREAKPOINT_X:
311 		info->type = X86_BREAKPOINT_EXECUTE;
312 		break;
313 	default:
314 		return -EINVAL;
315 	}
316 
317 	return 0;
318 }
319 /*
320  * Validate the arch-specific HW Breakpoint register settings
321  */
322 int arch_validate_hwbkpt_settings(struct perf_event *bp,
323 				  struct task_struct *tsk)
324 {
325 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
326 	unsigned int align;
327 	int ret;
328 
329 
330 	ret = arch_build_bp_info(bp);
331 	if (ret)
332 		return ret;
333 
334 	ret = -EINVAL;
335 
336 	if (info->type == X86_BREAKPOINT_EXECUTE)
337 		/*
338 		 * Ptrace-refactoring code
339 		 * For now, we'll allow instruction breakpoint only for user-space
340 		 * addresses
341 		 */
342 		if ((!arch_check_va_in_userspace(info->address, info->len)) &&
343 			info->len != X86_BREAKPOINT_EXECUTE)
344 			return ret;
345 
346 	switch (info->len) {
347 	case X86_BREAKPOINT_LEN_1:
348 		align = 0;
349 		break;
350 	case X86_BREAKPOINT_LEN_2:
351 		align = 1;
352 		break;
353 	case X86_BREAKPOINT_LEN_4:
354 		align = 3;
355 		break;
356 #ifdef CONFIG_X86_64
357 	case X86_BREAKPOINT_LEN_8:
358 		align = 7;
359 		break;
360 #endif
361 	default:
362 		return ret;
363 	}
364 
365 	if (bp->callback)
366 		ret = arch_store_info(bp);
367 
368 	if (ret < 0)
369 		return ret;
370 	/*
371 	 * Check that the low-order bits of the address are appropriate
372 	 * for the alignment implied by len.
373 	 */
374 	if (info->address & align)
375 		return -EINVAL;
376 
377 	/* Check that the virtual address is in the proper range */
378 	if (tsk) {
379 		if (!arch_check_va_in_userspace(info->address, info->len))
380 			return -EFAULT;
381 	} else {
382 		if (!arch_check_va_in_kernelspace(info->address, info->len))
383 			return -EFAULT;
384 	}
385 
386 	return 0;
387 }
388 
389 /*
390  * Dump the debug register contents to the user.
391  * We can't dump our per cpu values because it
392  * may contain cpu wide breakpoint, something that
393  * doesn't belong to the current task.
394  *
395  * TODO: include non-ptrace user breakpoints (perf)
396  */
397 void aout_dump_debugregs(struct user *dump)
398 {
399 	int i;
400 	int dr7 = 0;
401 	struct perf_event *bp;
402 	struct arch_hw_breakpoint *info;
403 	struct thread_struct *thread = &current->thread;
404 
405 	for (i = 0; i < HBP_NUM; i++) {
406 		bp = thread->ptrace_bps[i];
407 
408 		if (bp && !bp->attr.disabled) {
409 			dump->u_debugreg[i] = bp->attr.bp_addr;
410 			info = counter_arch_bp(bp);
411 			dr7 |= encode_dr7(i, info->len, info->type);
412 		} else {
413 			dump->u_debugreg[i] = 0;
414 		}
415 	}
416 
417 	dump->u_debugreg[4] = 0;
418 	dump->u_debugreg[5] = 0;
419 	dump->u_debugreg[6] = current->thread.debugreg6;
420 
421 	dump->u_debugreg[7] = dr7;
422 }
423 EXPORT_SYMBOL_GPL(aout_dump_debugregs);
424 
425 /*
426  * Release the user breakpoints used by ptrace
427  */
428 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
429 {
430 	int i;
431 	struct thread_struct *t = &tsk->thread;
432 
433 	for (i = 0; i < HBP_NUM; i++) {
434 		unregister_hw_breakpoint(t->ptrace_bps[i]);
435 		t->ptrace_bps[i] = NULL;
436 	}
437 }
438 
439 void hw_breakpoint_restore(void)
440 {
441 	set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0);
442 	set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1);
443 	set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2);
444 	set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3);
445 	set_debugreg(current->thread.debugreg6, 6);
446 	set_debugreg(__get_cpu_var(cpu_dr7), 7);
447 }
448 EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
449 
450 /*
451  * Handle debug exception notifications.
452  *
453  * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.
454  *
455  * NOTIFY_DONE returned if one of the following conditions is true.
456  * i) When the causative address is from user-space and the exception
457  * is a valid one, i.e. not triggered as a result of lazy debug register
458  * switching
459  * ii) When there are more bits than trap<n> set in DR6 register (such
460  * as BD, BS or BT) indicating that more than one debug condition is
461  * met and requires some more action in do_debug().
462  *
463  * NOTIFY_STOP returned for all other cases
464  *
465  */
466 static int __kprobes hw_breakpoint_handler(struct die_args *args)
467 {
468 	int i, cpu, rc = NOTIFY_STOP;
469 	struct perf_event *bp;
470 	unsigned long dr7, dr6;
471 	unsigned long *dr6_p;
472 
473 	/* The DR6 value is pointed by args->err */
474 	dr6_p = (unsigned long *)ERR_PTR(args->err);
475 	dr6 = *dr6_p;
476 
477 	/* Do an early return if no trap bits are set in DR6 */
478 	if ((dr6 & DR_TRAP_BITS) == 0)
479 		return NOTIFY_DONE;
480 
481 	get_debugreg(dr7, 7);
482 	/* Disable breakpoints during exception handling */
483 	set_debugreg(0UL, 7);
484 	/*
485 	 * Assert that local interrupts are disabled
486 	 * Reset the DRn bits in the virtualized register value.
487 	 * The ptrace trigger routine will add in whatever is needed.
488 	 */
489 	current->thread.debugreg6 &= ~DR_TRAP_BITS;
490 	cpu = get_cpu();
491 
492 	/* Handle all the breakpoints that were triggered */
493 	for (i = 0; i < HBP_NUM; ++i) {
494 		if (likely(!(dr6 & (DR_TRAP0 << i))))
495 			continue;
496 
497 		/*
498 		 * The counter may be concurrently released but that can only
499 		 * occur from a call_rcu() path. We can then safely fetch
500 		 * the breakpoint, use its callback, touch its counter
501 		 * while we are in an rcu_read_lock() path.
502 		 */
503 		rcu_read_lock();
504 
505 		bp = per_cpu(bp_per_reg[i], cpu);
506 		if (bp)
507 			rc = NOTIFY_DONE;
508 		/*
509 		 * Reset the 'i'th TRAP bit in dr6 to denote completion of
510 		 * exception handling
511 		 */
512 		(*dr6_p) &= ~(DR_TRAP0 << i);
513 		/*
514 		 * bp can be NULL due to lazy debug register switching
515 		 * or due to concurrent perf counter removing.
516 		 */
517 		if (!bp) {
518 			rcu_read_unlock();
519 			break;
520 		}
521 
522 		(bp->callback)(bp, args->regs);
523 
524 		rcu_read_unlock();
525 	}
526 	if (dr6 & (~DR_TRAP_BITS))
527 		rc = NOTIFY_DONE;
528 
529 	set_debugreg(dr7, 7);
530 	put_cpu();
531 
532 	return rc;
533 }
534 
535 /*
536  * Handle debug exception notifications.
537  */
538 int __kprobes hw_breakpoint_exceptions_notify(
539 		struct notifier_block *unused, unsigned long val, void *data)
540 {
541 	if (val != DIE_DEBUG)
542 		return NOTIFY_DONE;
543 
544 	return hw_breakpoint_handler(data);
545 }
546 
547 void hw_breakpoint_pmu_read(struct perf_event *bp)
548 {
549 	/* TODO */
550 }
551 
552 void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
553 {
554 	/* TODO */
555 }
556