xref: /openbmc/linux/arch/sh/kernel/hw_breakpoint.c (revision 565d76cb)
1 /*
2  * arch/sh/kernel/hw_breakpoint.c
3  *
4  * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5  *
6  * Copyright (C) 2009 - 2010  Paul Mundt
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/percpu.h>
16 #include <linux/kallsyms.h>
17 #include <linux/notifier.h>
18 #include <linux/kprobes.h>
19 #include <linux/kdebug.h>
20 #include <linux/io.h>
21 #include <linux/clk.h>
22 #include <asm/hw_breakpoint.h>
23 #include <asm/mmu_context.h>
24 #include <asm/ptrace.h>
25 
26 /*
27  * Stores the breakpoints currently in use on each breakpoint address
28  * register for each cpus
29  */
30 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
31 
32 /*
33  * A dummy placeholder for early accesses until the CPUs get a chance to
34  * register their UBCs later in the boot process.
35  */
36 static struct sh_ubc ubc_dummy = { .num_events = 0 };
37 
38 static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
39 
40 /*
41  * Install a perf counter breakpoint.
42  *
43  * We seek a free UBC channel and use it for this breakpoint.
44  *
45  * Atomic: we hold the counter->ctx->lock and we only handle variables
46  * and registers local to this cpu.
47  */
48 int arch_install_hw_breakpoint(struct perf_event *bp)
49 {
50 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
51 	int i;
52 
53 	for (i = 0; i < sh_ubc->num_events; i++) {
54 		struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
55 
56 		if (!*slot) {
57 			*slot = bp;
58 			break;
59 		}
60 	}
61 
62 	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
63 		return -EBUSY;
64 
65 	clk_enable(sh_ubc->clk);
66 	sh_ubc->enable(info, i);
67 
68 	return 0;
69 }
70 
71 /*
72  * Uninstall the breakpoint contained in the given counter.
73  *
74  * First we search the debug address register it uses and then we disable
75  * it.
76  *
77  * Atomic: we hold the counter->ctx->lock and we only handle variables
78  * and registers local to this cpu.
79  */
80 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
81 {
82 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
83 	int i;
84 
85 	for (i = 0; i < sh_ubc->num_events; i++) {
86 		struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
87 
88 		if (*slot == bp) {
89 			*slot = NULL;
90 			break;
91 		}
92 	}
93 
94 	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
95 		return;
96 
97 	sh_ubc->disable(info, i);
98 	clk_disable(sh_ubc->clk);
99 }
100 
101 static int get_hbp_len(u16 hbp_len)
102 {
103 	unsigned int len_in_bytes = 0;
104 
105 	switch (hbp_len) {
106 	case SH_BREAKPOINT_LEN_1:
107 		len_in_bytes = 1;
108 		break;
109 	case SH_BREAKPOINT_LEN_2:
110 		len_in_bytes = 2;
111 		break;
112 	case SH_BREAKPOINT_LEN_4:
113 		len_in_bytes = 4;
114 		break;
115 	case SH_BREAKPOINT_LEN_8:
116 		len_in_bytes = 8;
117 		break;
118 	}
119 	return len_in_bytes;
120 }
121 
122 /*
123  * Check for virtual address in kernel space.
124  */
125 int arch_check_bp_in_kernelspace(struct perf_event *bp)
126 {
127 	unsigned int len;
128 	unsigned long va;
129 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
130 
131 	va = info->address;
132 	len = get_hbp_len(info->len);
133 
134 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
135 }
136 
137 int arch_bp_generic_fields(int sh_len, int sh_type,
138 			   int *gen_len, int *gen_type)
139 {
140 	/* Len */
141 	switch (sh_len) {
142 	case SH_BREAKPOINT_LEN_1:
143 		*gen_len = HW_BREAKPOINT_LEN_1;
144 		break;
145 	case SH_BREAKPOINT_LEN_2:
146 		*gen_len = HW_BREAKPOINT_LEN_2;
147 		break;
148 	case SH_BREAKPOINT_LEN_4:
149 		*gen_len = HW_BREAKPOINT_LEN_4;
150 		break;
151 	case SH_BREAKPOINT_LEN_8:
152 		*gen_len = HW_BREAKPOINT_LEN_8;
153 		break;
154 	default:
155 		return -EINVAL;
156 	}
157 
158 	/* Type */
159 	switch (sh_type) {
160 	case SH_BREAKPOINT_READ:
161 		*gen_type = HW_BREAKPOINT_R;
162 	case SH_BREAKPOINT_WRITE:
163 		*gen_type = HW_BREAKPOINT_W;
164 		break;
165 	case SH_BREAKPOINT_RW:
166 		*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
167 		break;
168 	default:
169 		return -EINVAL;
170 	}
171 
172 	return 0;
173 }
174 
175 static int arch_build_bp_info(struct perf_event *bp)
176 {
177 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
178 
179 	info->address = bp->attr.bp_addr;
180 
181 	/* Len */
182 	switch (bp->attr.bp_len) {
183 	case HW_BREAKPOINT_LEN_1:
184 		info->len = SH_BREAKPOINT_LEN_1;
185 		break;
186 	case HW_BREAKPOINT_LEN_2:
187 		info->len = SH_BREAKPOINT_LEN_2;
188 		break;
189 	case HW_BREAKPOINT_LEN_4:
190 		info->len = SH_BREAKPOINT_LEN_4;
191 		break;
192 	case HW_BREAKPOINT_LEN_8:
193 		info->len = SH_BREAKPOINT_LEN_8;
194 		break;
195 	default:
196 		return -EINVAL;
197 	}
198 
199 	/* Type */
200 	switch (bp->attr.bp_type) {
201 	case HW_BREAKPOINT_R:
202 		info->type = SH_BREAKPOINT_READ;
203 		break;
204 	case HW_BREAKPOINT_W:
205 		info->type = SH_BREAKPOINT_WRITE;
206 		break;
207 	case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
208 		info->type = SH_BREAKPOINT_RW;
209 		break;
210 	default:
211 		return -EINVAL;
212 	}
213 
214 	return 0;
215 }
216 
217 /*
218  * Validate the arch-specific HW Breakpoint register settings
219  */
220 int arch_validate_hwbkpt_settings(struct perf_event *bp)
221 {
222 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
223 	unsigned int align;
224 	int ret;
225 
226 	ret = arch_build_bp_info(bp);
227 	if (ret)
228 		return ret;
229 
230 	ret = -EINVAL;
231 
232 	switch (info->len) {
233 	case SH_BREAKPOINT_LEN_1:
234 		align = 0;
235 		break;
236 	case SH_BREAKPOINT_LEN_2:
237 		align = 1;
238 		break;
239 	case SH_BREAKPOINT_LEN_4:
240 		align = 3;
241 		break;
242 	case SH_BREAKPOINT_LEN_8:
243 		align = 7;
244 		break;
245 	default:
246 		return ret;
247 	}
248 
249 	/*
250 	 * For kernel-addresses, either the address or symbol name can be
251 	 * specified.
252 	 */
253 	if (info->name)
254 		info->address = (unsigned long)kallsyms_lookup_name(info->name);
255 
256 	/*
257 	 * Check that the low-order bits of the address are appropriate
258 	 * for the alignment implied by len.
259 	 */
260 	if (info->address & align)
261 		return -EINVAL;
262 
263 	return 0;
264 }
265 
266 /*
267  * Release the user breakpoints used by ptrace
268  */
269 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
270 {
271 	int i;
272 	struct thread_struct *t = &tsk->thread;
273 
274 	for (i = 0; i < sh_ubc->num_events; i++) {
275 		unregister_hw_breakpoint(t->ptrace_bps[i]);
276 		t->ptrace_bps[i] = NULL;
277 	}
278 }
279 
280 static int __kprobes hw_breakpoint_handler(struct die_args *args)
281 {
282 	int cpu, i, rc = NOTIFY_STOP;
283 	struct perf_event *bp;
284 	unsigned int cmf, resume_mask;
285 
286 	/*
287 	 * Do an early return if none of the channels triggered.
288 	 */
289 	cmf = sh_ubc->triggered_mask();
290 	if (unlikely(!cmf))
291 		return NOTIFY_DONE;
292 
293 	/*
294 	 * By default, resume all of the active channels.
295 	 */
296 	resume_mask = sh_ubc->active_mask();
297 
298 	/*
299 	 * Disable breakpoints during exception handling.
300 	 */
301 	sh_ubc->disable_all();
302 
303 	cpu = get_cpu();
304 	for (i = 0; i < sh_ubc->num_events; i++) {
305 		unsigned long event_mask = (1 << i);
306 
307 		if (likely(!(cmf & event_mask)))
308 			continue;
309 
310 		/*
311 		 * The counter may be concurrently released but that can only
312 		 * occur from a call_rcu() path. We can then safely fetch
313 		 * the breakpoint, use its callback, touch its counter
314 		 * while we are in an rcu_read_lock() path.
315 		 */
316 		rcu_read_lock();
317 
318 		bp = per_cpu(bp_per_reg[i], cpu);
319 		if (bp)
320 			rc = NOTIFY_DONE;
321 
322 		/*
323 		 * Reset the condition match flag to denote completion of
324 		 * exception handling.
325 		 */
326 		sh_ubc->clear_triggered_mask(event_mask);
327 
328 		/*
329 		 * bp can be NULL due to concurrent perf counter
330 		 * removing.
331 		 */
332 		if (!bp) {
333 			rcu_read_unlock();
334 			break;
335 		}
336 
337 		/*
338 		 * Don't restore the channel if the breakpoint is from
339 		 * ptrace, as it always operates in one-shot mode.
340 		 */
341 		if (bp->overflow_handler == ptrace_triggered)
342 			resume_mask &= ~(1 << i);
343 
344 		perf_bp_event(bp, args->regs);
345 
346 		/* Deliver the signal to userspace */
347 		if (!arch_check_bp_in_kernelspace(bp)) {
348 			siginfo_t info;
349 
350 			info.si_signo = args->signr;
351 			info.si_errno = notifier_to_errno(rc);
352 			info.si_code = TRAP_HWBKPT;
353 
354 			force_sig_info(args->signr, &info, current);
355 		}
356 
357 		rcu_read_unlock();
358 	}
359 
360 	if (cmf == 0)
361 		rc = NOTIFY_DONE;
362 
363 	sh_ubc->enable_all(resume_mask);
364 
365 	put_cpu();
366 
367 	return rc;
368 }
369 
370 BUILD_TRAP_HANDLER(breakpoint)
371 {
372 	unsigned long ex = lookup_exception_vector();
373 	TRAP_HANDLER_DECL;
374 
375 	notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
376 }
377 
378 /*
379  * Handle debug exception notifications.
380  */
381 int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
382 				    unsigned long val, void *data)
383 {
384 	struct die_args *args = data;
385 
386 	if (val != DIE_BREAKPOINT)
387 		return NOTIFY_DONE;
388 
389 	/*
390 	 * If the breakpoint hasn't been triggered by the UBC, it's
391 	 * probably from a debugger, so don't do anything more here.
392 	 *
393 	 * This also permits the UBC interface clock to remain off for
394 	 * non-UBC breakpoints, as we don't need to check the triggered
395 	 * or active channel masks.
396 	 */
397 	if (args->trapnr != sh_ubc->trap_nr)
398 		return NOTIFY_DONE;
399 
400 	return hw_breakpoint_handler(data);
401 }
402 
403 void hw_breakpoint_pmu_read(struct perf_event *bp)
404 {
405 	/* TODO */
406 }
407 
408 int register_sh_ubc(struct sh_ubc *ubc)
409 {
410 	/* Bail if it's already assigned */
411 	if (sh_ubc != &ubc_dummy)
412 		return -EBUSY;
413 	sh_ubc = ubc;
414 
415 	pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
416 
417 	WARN_ON(ubc->num_events > HBP_NUM);
418 
419 	return 0;
420 }
421