xref: /openbmc/linux/arch/arm/kernel/hw_breakpoint.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
14  *
15  * Copyright (C) 2009, 2010 ARM Limited
16  *
17  * Author: Will Deacon <will.deacon@arm.com>
18  */
19 
20 /*
21  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
22  * using the CPU's debug registers.
23  */
24 #define pr_fmt(fmt) "hw-breakpoint: " fmt
25 
26 #include <linux/errno.h>
27 #include <linux/hardirq.h>
28 #include <linux/perf_event.h>
29 #include <linux/hw_breakpoint.h>
30 #include <linux/smp.h>
31 
32 #include <asm/cacheflush.h>
33 #include <asm/cputype.h>
34 #include <asm/current.h>
35 #include <asm/hw_breakpoint.h>
36 #include <asm/kdebug.h>
37 #include <asm/system.h>
38 #include <asm/traps.h>
39 
40 /* Breakpoint currently in use for each BRP. */
41 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
42 
43 /* Watchpoint currently in use for each WRP. */
44 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
45 
46 /* Number of BRP/WRP registers on this CPU. */
47 static int core_num_brps;
48 static int core_num_reserved_brps;
49 static int core_num_wrps;
50 
51 /* Debug architecture version. */
52 static u8 debug_arch;
53 
54 /* Maximum supported watchpoint length. */
55 static u8 max_watchpoint_len;
56 
57 #define READ_WB_REG_CASE(OP2, M, VAL)		\
58 	case ((OP2 << 4) + M):			\
59 		ARM_DBG_READ(c ## M, OP2, VAL); \
60 		break
61 
62 #define WRITE_WB_REG_CASE(OP2, M, VAL)		\
63 	case ((OP2 << 4) + M):			\
64 		ARM_DBG_WRITE(c ## M, OP2, VAL);\
65 		break
66 
67 #define GEN_READ_WB_REG_CASES(OP2, VAL)		\
68 	READ_WB_REG_CASE(OP2, 0, VAL);		\
69 	READ_WB_REG_CASE(OP2, 1, VAL);		\
70 	READ_WB_REG_CASE(OP2, 2, VAL);		\
71 	READ_WB_REG_CASE(OP2, 3, VAL);		\
72 	READ_WB_REG_CASE(OP2, 4, VAL);		\
73 	READ_WB_REG_CASE(OP2, 5, VAL);		\
74 	READ_WB_REG_CASE(OP2, 6, VAL);		\
75 	READ_WB_REG_CASE(OP2, 7, VAL);		\
76 	READ_WB_REG_CASE(OP2, 8, VAL);		\
77 	READ_WB_REG_CASE(OP2, 9, VAL);		\
78 	READ_WB_REG_CASE(OP2, 10, VAL);		\
79 	READ_WB_REG_CASE(OP2, 11, VAL);		\
80 	READ_WB_REG_CASE(OP2, 12, VAL);		\
81 	READ_WB_REG_CASE(OP2, 13, VAL);		\
82 	READ_WB_REG_CASE(OP2, 14, VAL);		\
83 	READ_WB_REG_CASE(OP2, 15, VAL)
84 
85 #define GEN_WRITE_WB_REG_CASES(OP2, VAL)	\
86 	WRITE_WB_REG_CASE(OP2, 0, VAL);		\
87 	WRITE_WB_REG_CASE(OP2, 1, VAL);		\
88 	WRITE_WB_REG_CASE(OP2, 2, VAL);		\
89 	WRITE_WB_REG_CASE(OP2, 3, VAL);		\
90 	WRITE_WB_REG_CASE(OP2, 4, VAL);		\
91 	WRITE_WB_REG_CASE(OP2, 5, VAL);		\
92 	WRITE_WB_REG_CASE(OP2, 6, VAL);		\
93 	WRITE_WB_REG_CASE(OP2, 7, VAL);		\
94 	WRITE_WB_REG_CASE(OP2, 8, VAL);		\
95 	WRITE_WB_REG_CASE(OP2, 9, VAL);		\
96 	WRITE_WB_REG_CASE(OP2, 10, VAL);	\
97 	WRITE_WB_REG_CASE(OP2, 11, VAL);	\
98 	WRITE_WB_REG_CASE(OP2, 12, VAL);	\
99 	WRITE_WB_REG_CASE(OP2, 13, VAL);	\
100 	WRITE_WB_REG_CASE(OP2, 14, VAL);	\
101 	WRITE_WB_REG_CASE(OP2, 15, VAL)
102 
103 static u32 read_wb_reg(int n)
104 {
105 	u32 val = 0;
106 
107 	switch (n) {
108 	GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
109 	GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
110 	GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
111 	GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
112 	default:
113 		pr_warning("attempt to read from unknown breakpoint "
114 				"register %d\n", n);
115 	}
116 
117 	return val;
118 }
119 
120 static void write_wb_reg(int n, u32 val)
121 {
122 	switch (n) {
123 	GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
124 	GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
125 	GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
126 	GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
127 	default:
128 		pr_warning("attempt to write to unknown breakpoint "
129 				"register %d\n", n);
130 	}
131 	isb();
132 }
133 
134 /* Determine debug architecture. */
135 static u8 get_debug_arch(void)
136 {
137 	u32 didr;
138 
139 	/* Do we implement the extended CPUID interface? */
140 	if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
141 		pr_warning("CPUID feature registers not supported. "
142 				"Assuming v6 debug is present.\n");
143 		return ARM_DEBUG_ARCH_V6;
144 	}
145 
146 	ARM_DBG_READ(c0, 0, didr);
147 	return (didr >> 16) & 0xf;
148 }
149 
150 u8 arch_get_debug_arch(void)
151 {
152 	return debug_arch;
153 }
154 
155 /* Determine number of BRP register available. */
156 static int get_num_brp_resources(void)
157 {
158 	u32 didr;
159 	ARM_DBG_READ(c0, 0, didr);
160 	return ((didr >> 24) & 0xf) + 1;
161 }
162 
163 /* Does this core support mismatch breakpoints? */
164 static int core_has_mismatch_brps(void)
165 {
166 	return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
167 		get_num_brp_resources() > 1);
168 }
169 
170 /* Determine number of usable WRPs available. */
171 static int get_num_wrps(void)
172 {
173 	/*
174 	 * FIXME: When a watchpoint fires, the only way to work out which
175 	 * watchpoint it was is by disassembling the faulting instruction
176 	 * and working out the address of the memory access.
177 	 *
178 	 * Furthermore, we can only do this if the watchpoint was precise
179 	 * since imprecise watchpoints prevent us from calculating register
180 	 * based addresses.
181 	 *
182 	 * Providing we have more than 1 breakpoint register, we only report
183 	 * a single watchpoint register for the time being. This way, we always
184 	 * know which watchpoint fired. In the future we can either add a
185 	 * disassembler and address generation emulator, or we can insert a
186 	 * check to see if the DFAR is set on watchpoint exception entry
187 	 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
188 	 * that it is set on some implementations].
189 	 */
190 
191 #if 0
192 	int wrps;
193 	u32 didr;
194 	ARM_DBG_READ(c0, 0, didr);
195 	wrps = ((didr >> 28) & 0xf) + 1;
196 #endif
197 	int wrps = 1;
198 
199 	if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
200 		wrps = get_num_brp_resources() - 1;
201 
202 	return wrps;
203 }
204 
205 /* We reserve one breakpoint for each watchpoint. */
206 static int get_num_reserved_brps(void)
207 {
208 	if (core_has_mismatch_brps())
209 		return get_num_wrps();
210 	return 0;
211 }
212 
213 /* Determine number of usable BRPs available. */
214 static int get_num_brps(void)
215 {
216 	int brps = get_num_brp_resources();
217 	if (core_has_mismatch_brps())
218 		brps -= get_num_reserved_brps();
219 	return brps;
220 }
221 
222 /*
223  * In order to access the breakpoint/watchpoint control registers,
224  * we must be running in debug monitor mode. Unfortunately, we can
225  * be put into halting debug mode at any time by an external debugger
226  * but there is nothing we can do to prevent that.
227  */
228 static int enable_monitor_mode(void)
229 {
230 	u32 dscr;
231 	int ret = 0;
232 
233 	ARM_DBG_READ(c1, 0, dscr);
234 
235 	/* Ensure that halting mode is disabled. */
236 	if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, "halting debug mode enabled."
237 				"Unable to access hardware resources.")) {
238 		ret = -EPERM;
239 		goto out;
240 	}
241 
242 	/* If monitor mode is already enabled, just return. */
243 	if (dscr & ARM_DSCR_MDBGEN)
244 		goto out;
245 
246 	/* Write to the corresponding DSCR. */
247 	switch (get_debug_arch()) {
248 	case ARM_DEBUG_ARCH_V6:
249 	case ARM_DEBUG_ARCH_V6_1:
250 		ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
251 		break;
252 	case ARM_DEBUG_ARCH_V7_ECP14:
253 		ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
254 		break;
255 	default:
256 		ret = -ENODEV;
257 		goto out;
258 	}
259 
260 	/* Check that the write made it through. */
261 	ARM_DBG_READ(c1, 0, dscr);
262 	if (!(dscr & ARM_DSCR_MDBGEN))
263 		ret = -EPERM;
264 
265 out:
266 	return ret;
267 }
268 
269 int hw_breakpoint_slots(int type)
270 {
271 	/*
272 	 * We can be called early, so don't rely on
273 	 * our static variables being initialised.
274 	 */
275 	switch (type) {
276 	case TYPE_INST:
277 		return get_num_brps();
278 	case TYPE_DATA:
279 		return get_num_wrps();
280 	default:
281 		pr_warning("unknown slot type: %d\n", type);
282 		return 0;
283 	}
284 }
285 
286 /*
287  * Check if 8-bit byte-address select is available.
288  * This clobbers WRP 0.
289  */
290 static u8 get_max_wp_len(void)
291 {
292 	u32 ctrl_reg;
293 	struct arch_hw_breakpoint_ctrl ctrl;
294 	u8 size = 4;
295 
296 	if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
297 		goto out;
298 
299 	memset(&ctrl, 0, sizeof(ctrl));
300 	ctrl.len = ARM_BREAKPOINT_LEN_8;
301 	ctrl_reg = encode_ctrl_reg(ctrl);
302 
303 	write_wb_reg(ARM_BASE_WVR, 0);
304 	write_wb_reg(ARM_BASE_WCR, ctrl_reg);
305 	if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
306 		size = 8;
307 
308 out:
309 	return size;
310 }
311 
312 u8 arch_get_max_wp_len(void)
313 {
314 	return max_watchpoint_len;
315 }
316 
317 /*
318  * Install a perf counter breakpoint.
319  */
320 int arch_install_hw_breakpoint(struct perf_event *bp)
321 {
322 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
323 	struct perf_event **slot, **slots;
324 	int i, max_slots, ctrl_base, val_base, ret = 0;
325 	u32 addr, ctrl;
326 
327 	/* Ensure that we are in monitor mode and halting mode is disabled. */
328 	ret = enable_monitor_mode();
329 	if (ret)
330 		goto out;
331 
332 	addr = info->address;
333 	ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
334 
335 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
336 		/* Breakpoint */
337 		ctrl_base = ARM_BASE_BCR;
338 		val_base = ARM_BASE_BVR;
339 		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
340 		max_slots = core_num_brps;
341 		if (info->step_ctrl.enabled) {
342 			/* Override the breakpoint data with the step data. */
343 			addr = info->trigger & ~0x3;
344 			ctrl = encode_ctrl_reg(info->step_ctrl);
345 		}
346 	} else {
347 		/* Watchpoint */
348 		if (info->step_ctrl.enabled) {
349 			/* Install into the reserved breakpoint region. */
350 			ctrl_base = ARM_BASE_BCR + core_num_brps;
351 			val_base = ARM_BASE_BVR + core_num_brps;
352 			/* Override the watchpoint data with the step data. */
353 			addr = info->trigger & ~0x3;
354 			ctrl = encode_ctrl_reg(info->step_ctrl);
355 		} else {
356 			ctrl_base = ARM_BASE_WCR;
357 			val_base = ARM_BASE_WVR;
358 		}
359 		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
360 		max_slots = core_num_wrps;
361 	}
362 
363 	for (i = 0; i < max_slots; ++i) {
364 		slot = &slots[i];
365 
366 		if (!*slot) {
367 			*slot = bp;
368 			break;
369 		}
370 	}
371 
372 	if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) {
373 		ret = -EBUSY;
374 		goto out;
375 	}
376 
377 	/* Setup the address register. */
378 	write_wb_reg(val_base + i, addr);
379 
380 	/* Setup the control register. */
381 	write_wb_reg(ctrl_base + i, ctrl);
382 
383 out:
384 	return ret;
385 }
386 
387 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
388 {
389 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
390 	struct perf_event **slot, **slots;
391 	int i, max_slots, base;
392 
393 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
394 		/* Breakpoint */
395 		base = ARM_BASE_BCR;
396 		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
397 		max_slots = core_num_brps;
398 	} else {
399 		/* Watchpoint */
400 		if (info->step_ctrl.enabled)
401 			base = ARM_BASE_BCR + core_num_brps;
402 		else
403 			base = ARM_BASE_WCR;
404 		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
405 		max_slots = core_num_wrps;
406 	}
407 
408 	/* Remove the breakpoint. */
409 	for (i = 0; i < max_slots; ++i) {
410 		slot = &slots[i];
411 
412 		if (*slot == bp) {
413 			*slot = NULL;
414 			break;
415 		}
416 	}
417 
418 	if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
419 		return;
420 
421 	/* Reset the control register. */
422 	write_wb_reg(base + i, 0);
423 }
424 
425 static int get_hbp_len(u8 hbp_len)
426 {
427 	unsigned int len_in_bytes = 0;
428 
429 	switch (hbp_len) {
430 	case ARM_BREAKPOINT_LEN_1:
431 		len_in_bytes = 1;
432 		break;
433 	case ARM_BREAKPOINT_LEN_2:
434 		len_in_bytes = 2;
435 		break;
436 	case ARM_BREAKPOINT_LEN_4:
437 		len_in_bytes = 4;
438 		break;
439 	case ARM_BREAKPOINT_LEN_8:
440 		len_in_bytes = 8;
441 		break;
442 	}
443 
444 	return len_in_bytes;
445 }
446 
447 /*
448  * Check whether bp virtual address is in kernel space.
449  */
450 int arch_check_bp_in_kernelspace(struct perf_event *bp)
451 {
452 	unsigned int len;
453 	unsigned long va;
454 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
455 
456 	va = info->address;
457 	len = get_hbp_len(info->ctrl.len);
458 
459 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
460 }
461 
462 /*
463  * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
464  * Hopefully this will disappear when ptrace can bypass the conversion
465  * to generic breakpoint descriptions.
466  */
467 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
468 			   int *gen_len, int *gen_type)
469 {
470 	/* Type */
471 	switch (ctrl.type) {
472 	case ARM_BREAKPOINT_EXECUTE:
473 		*gen_type = HW_BREAKPOINT_X;
474 		break;
475 	case ARM_BREAKPOINT_LOAD:
476 		*gen_type = HW_BREAKPOINT_R;
477 		break;
478 	case ARM_BREAKPOINT_STORE:
479 		*gen_type = HW_BREAKPOINT_W;
480 		break;
481 	case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
482 		*gen_type = HW_BREAKPOINT_RW;
483 		break;
484 	default:
485 		return -EINVAL;
486 	}
487 
488 	/* Len */
489 	switch (ctrl.len) {
490 	case ARM_BREAKPOINT_LEN_1:
491 		*gen_len = HW_BREAKPOINT_LEN_1;
492 		break;
493 	case ARM_BREAKPOINT_LEN_2:
494 		*gen_len = HW_BREAKPOINT_LEN_2;
495 		break;
496 	case ARM_BREAKPOINT_LEN_4:
497 		*gen_len = HW_BREAKPOINT_LEN_4;
498 		break;
499 	case ARM_BREAKPOINT_LEN_8:
500 		*gen_len = HW_BREAKPOINT_LEN_8;
501 		break;
502 	default:
503 		return -EINVAL;
504 	}
505 
506 	return 0;
507 }
508 
509 /*
510  * Construct an arch_hw_breakpoint from a perf_event.
511  */
512 static int arch_build_bp_info(struct perf_event *bp)
513 {
514 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
515 
516 	/* Type */
517 	switch (bp->attr.bp_type) {
518 	case HW_BREAKPOINT_X:
519 		info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
520 		break;
521 	case HW_BREAKPOINT_R:
522 		info->ctrl.type = ARM_BREAKPOINT_LOAD;
523 		break;
524 	case HW_BREAKPOINT_W:
525 		info->ctrl.type = ARM_BREAKPOINT_STORE;
526 		break;
527 	case HW_BREAKPOINT_RW:
528 		info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
529 		break;
530 	default:
531 		return -EINVAL;
532 	}
533 
534 	/* Len */
535 	switch (bp->attr.bp_len) {
536 	case HW_BREAKPOINT_LEN_1:
537 		info->ctrl.len = ARM_BREAKPOINT_LEN_1;
538 		break;
539 	case HW_BREAKPOINT_LEN_2:
540 		info->ctrl.len = ARM_BREAKPOINT_LEN_2;
541 		break;
542 	case HW_BREAKPOINT_LEN_4:
543 		info->ctrl.len = ARM_BREAKPOINT_LEN_4;
544 		break;
545 	case HW_BREAKPOINT_LEN_8:
546 		info->ctrl.len = ARM_BREAKPOINT_LEN_8;
547 		if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
548 			&& max_watchpoint_len >= 8)
549 			break;
550 	default:
551 		return -EINVAL;
552 	}
553 
554 	/*
555 	 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
556 	 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
557 	 * by the hardware and must be aligned to the appropriate number of
558 	 * bytes.
559 	 */
560 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
561 	    info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
562 	    info->ctrl.len != ARM_BREAKPOINT_LEN_4)
563 		return -EINVAL;
564 
565 	/* Address */
566 	info->address = bp->attr.bp_addr;
567 
568 	/* Privilege */
569 	info->ctrl.privilege = ARM_BREAKPOINT_USER;
570 	if (arch_check_bp_in_kernelspace(bp))
571 		info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
572 
573 	/* Enabled? */
574 	info->ctrl.enabled = !bp->attr.disabled;
575 
576 	/* Mismatch */
577 	info->ctrl.mismatch = 0;
578 
579 	return 0;
580 }
581 
582 /*
583  * Validate the arch-specific HW Breakpoint register settings.
584  */
585 int arch_validate_hwbkpt_settings(struct perf_event *bp)
586 {
587 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
588 	int ret = 0;
589 	u32 offset, alignment_mask = 0x3;
590 
591 	/* Build the arch_hw_breakpoint. */
592 	ret = arch_build_bp_info(bp);
593 	if (ret)
594 		goto out;
595 
596 	/* Check address alignment. */
597 	if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
598 		alignment_mask = 0x7;
599 	offset = info->address & alignment_mask;
600 	switch (offset) {
601 	case 0:
602 		/* Aligned */
603 		break;
604 	case 1:
605 		/* Allow single byte watchpoint. */
606 		if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
607 			break;
608 	case 2:
609 		/* Allow halfword watchpoints and breakpoints. */
610 		if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
611 			break;
612 	default:
613 		ret = -EINVAL;
614 		goto out;
615 	}
616 
617 	info->address &= ~alignment_mask;
618 	info->ctrl.len <<= offset;
619 
620 	/*
621 	 * Currently we rely on an overflow handler to take
622 	 * care of single-stepping the breakpoint when it fires.
623 	 * In the case of userspace breakpoints on a core with V7 debug,
624 	 * we can use the mismatch feature as a poor-man's hardware
625 	 * single-step, but this only works for per-task breakpoints.
626 	 */
627 	if (WARN_ONCE(!bp->overflow_handler &&
628 		(arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps()
629 		 || !bp->hw.bp_target),
630 			"overflow handler required but none found")) {
631 		ret = -EINVAL;
632 	}
633 out:
634 	return ret;
635 }
636 
637 /*
638  * Enable/disable single-stepping over the breakpoint bp at address addr.
639  */
640 static void enable_single_step(struct perf_event *bp, u32 addr)
641 {
642 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
643 
644 	arch_uninstall_hw_breakpoint(bp);
645 	info->step_ctrl.mismatch  = 1;
646 	info->step_ctrl.len	  = ARM_BREAKPOINT_LEN_4;
647 	info->step_ctrl.type	  = ARM_BREAKPOINT_EXECUTE;
648 	info->step_ctrl.privilege = info->ctrl.privilege;
649 	info->step_ctrl.enabled	  = 1;
650 	info->trigger		  = addr;
651 	arch_install_hw_breakpoint(bp);
652 }
653 
654 static void disable_single_step(struct perf_event *bp)
655 {
656 	arch_uninstall_hw_breakpoint(bp);
657 	counter_arch_bp(bp)->step_ctrl.enabled = 0;
658 	arch_install_hw_breakpoint(bp);
659 }
660 
661 static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
662 {
663 	int i;
664 	struct perf_event *wp, **slots;
665 	struct arch_hw_breakpoint *info;
666 
667 	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
668 
669 	/* Without a disassembler, we can only handle 1 watchpoint. */
670 	BUG_ON(core_num_wrps > 1);
671 
672 	for (i = 0; i < core_num_wrps; ++i) {
673 		rcu_read_lock();
674 
675 		wp = slots[i];
676 
677 		if (wp == NULL) {
678 			rcu_read_unlock();
679 			continue;
680 		}
681 
682 		/*
683 		 * The DFAR is an unknown value. Since we only allow a
684 		 * single watchpoint, we can set the trigger to the lowest
685 		 * possible faulting address.
686 		 */
687 		info = counter_arch_bp(wp);
688 		info->trigger = wp->attr.bp_addr;
689 		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
690 		perf_bp_event(wp, regs);
691 
692 		/*
693 		 * If no overflow handler is present, insert a temporary
694 		 * mismatch breakpoint so we can single-step over the
695 		 * watchpoint trigger.
696 		 */
697 		if (!wp->overflow_handler)
698 			enable_single_step(wp, instruction_pointer(regs));
699 
700 		rcu_read_unlock();
701 	}
702 }
703 
704 static void watchpoint_single_step_handler(unsigned long pc)
705 {
706 	int i;
707 	struct perf_event *wp, **slots;
708 	struct arch_hw_breakpoint *info;
709 
710 	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
711 
712 	for (i = 0; i < core_num_reserved_brps; ++i) {
713 		rcu_read_lock();
714 
715 		wp = slots[i];
716 
717 		if (wp == NULL)
718 			goto unlock;
719 
720 		info = counter_arch_bp(wp);
721 		if (!info->step_ctrl.enabled)
722 			goto unlock;
723 
724 		/*
725 		 * Restore the original watchpoint if we've completed the
726 		 * single-step.
727 		 */
728 		if (info->trigger != pc)
729 			disable_single_step(wp);
730 
731 unlock:
732 		rcu_read_unlock();
733 	}
734 }
735 
736 static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
737 {
738 	int i;
739 	u32 ctrl_reg, val, addr;
740 	struct perf_event *bp, **slots;
741 	struct arch_hw_breakpoint *info;
742 	struct arch_hw_breakpoint_ctrl ctrl;
743 
744 	slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
745 
746 	/* The exception entry code places the amended lr in the PC. */
747 	addr = regs->ARM_pc;
748 
749 	/* Check the currently installed breakpoints first. */
750 	for (i = 0; i < core_num_brps; ++i) {
751 		rcu_read_lock();
752 
753 		bp = slots[i];
754 
755 		if (bp == NULL)
756 			goto unlock;
757 
758 		info = counter_arch_bp(bp);
759 
760 		/* Check if the breakpoint value matches. */
761 		val = read_wb_reg(ARM_BASE_BVR + i);
762 		if (val != (addr & ~0x3))
763 			goto mismatch;
764 
765 		/* Possible match, check the byte address select to confirm. */
766 		ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
767 		decode_ctrl_reg(ctrl_reg, &ctrl);
768 		if ((1 << (addr & 0x3)) & ctrl.len) {
769 			info->trigger = addr;
770 			pr_debug("breakpoint fired: address = 0x%x\n", addr);
771 			perf_bp_event(bp, regs);
772 			if (!bp->overflow_handler)
773 				enable_single_step(bp, addr);
774 			goto unlock;
775 		}
776 
777 mismatch:
778 		/* If we're stepping a breakpoint, it can now be restored. */
779 		if (info->step_ctrl.enabled)
780 			disable_single_step(bp);
781 unlock:
782 		rcu_read_unlock();
783 	}
784 
785 	/* Handle any pending watchpoint single-step breakpoints. */
786 	watchpoint_single_step_handler(addr);
787 }
788 
789 /*
790  * Called from either the Data Abort Handler [watchpoint] or the
791  * Prefetch Abort Handler [breakpoint] with preemption disabled.
792  */
793 static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
794 				 struct pt_regs *regs)
795 {
796 	int ret = 0;
797 	u32 dscr;
798 
799 	/* We must be called with preemption disabled. */
800 	WARN_ON(preemptible());
801 
802 	/* We only handle watchpoints and hardware breakpoints. */
803 	ARM_DBG_READ(c1, 0, dscr);
804 
805 	/* Perform perf callbacks. */
806 	switch (ARM_DSCR_MOE(dscr)) {
807 	case ARM_ENTRY_BREAKPOINT:
808 		breakpoint_handler(addr, regs);
809 		break;
810 	case ARM_ENTRY_ASYNC_WATCHPOINT:
811 		WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
812 	case ARM_ENTRY_SYNC_WATCHPOINT:
813 		watchpoint_handler(addr, regs);
814 		break;
815 	default:
816 		ret = 1; /* Unhandled fault. */
817 	}
818 
819 	/*
820 	 * Re-enable preemption after it was disabled in the
821 	 * low-level exception handling code.
822 	 */
823 	preempt_enable();
824 
825 	return ret;
826 }
827 
828 /*
829  * One-time initialisation.
830  */
831 static void reset_ctrl_regs(void *unused)
832 {
833 	int i;
834 
835 	/*
836 	 * v7 debug contains save and restore registers so that debug state
837 	 * can be maintained across low-power modes without leaving
838 	 * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
839 	 * we can write to the debug registers out of reset, so we must
840 	 * unlock the OS Lock Access Register to avoid taking undefined
841 	 * instruction exceptions later on.
842 	 */
843 	if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
844 		/*
845 		 * Unconditionally clear the lock by writing a value
846 		 * other than 0xC5ACCE55 to the access register.
847 		 */
848 		asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
849 		isb();
850 	}
851 
852 	if (enable_monitor_mode())
853 		return;
854 
855 	/* We must also reset any reserved registers. */
856 	for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) {
857 		write_wb_reg(ARM_BASE_BCR + i, 0UL);
858 		write_wb_reg(ARM_BASE_BVR + i, 0UL);
859 	}
860 
861 	for (i = 0; i < core_num_wrps; ++i) {
862 		write_wb_reg(ARM_BASE_WCR + i, 0UL);
863 		write_wb_reg(ARM_BASE_WVR + i, 0UL);
864 	}
865 }
866 
867 static int __cpuinit dbg_reset_notify(struct notifier_block *self,
868 				      unsigned long action, void *cpu)
869 {
870 	if (action == CPU_ONLINE)
871 		smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
872 	return NOTIFY_OK;
873 }
874 
875 static struct notifier_block __cpuinitdata dbg_reset_nb = {
876 	.notifier_call = dbg_reset_notify,
877 };
878 
879 static int __init arch_hw_breakpoint_init(void)
880 {
881 	u32 dscr;
882 
883 	debug_arch = get_debug_arch();
884 
885 	if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
886 		pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
887 		return 0;
888 	}
889 
890 	/* Determine how many BRPs/WRPs are available. */
891 	core_num_brps = get_num_brps();
892 	core_num_reserved_brps = get_num_reserved_brps();
893 	core_num_wrps = get_num_wrps();
894 
895 	pr_info("found %d breakpoint and %d watchpoint registers.\n",
896 		core_num_brps + core_num_reserved_brps, core_num_wrps);
897 
898 	if (core_num_reserved_brps)
899 		pr_info("%d breakpoint(s) reserved for watchpoint "
900 				"single-step.\n", core_num_reserved_brps);
901 
902 	ARM_DBG_READ(c1, 0, dscr);
903 	if (dscr & ARM_DSCR_HDBGEN) {
904 		pr_warning("halting debug mode enabled. Assuming maximum "
905 				"watchpoint size of 4 bytes.");
906 	} else {
907 		/*
908 		 * Reset the breakpoint resources. We assume that a halting
909 		 * debugger will leave the world in a nice state for us.
910 		 */
911 		smp_call_function(reset_ctrl_regs, NULL, 1);
912 		reset_ctrl_regs(NULL);
913 
914 		/* Work out the maximum supported watchpoint length. */
915 		max_watchpoint_len = get_max_wp_len();
916 		pr_info("maximum watchpoint size is %u bytes.\n",
917 				max_watchpoint_len);
918 	}
919 
920 	/* Register debug fault handler. */
921 	hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
922 			"watchpoint debug exception");
923 	hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
924 			"breakpoint debug exception");
925 
926 	/* Register hotplug notifier. */
927 	register_cpu_notifier(&dbg_reset_nb);
928 	return 0;
929 }
930 arch_initcall(arch_hw_breakpoint_init);
931 
932 void hw_breakpoint_pmu_read(struct perf_event *bp)
933 {
934 }
935 
936 /*
937  * Dummy function to register with die_notifier.
938  */
939 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
940 					unsigned long val, void *data)
941 {
942 	return NOTIFY_DONE;
943 }
944