xref: /openbmc/linux/arch/arm/kernel/hw_breakpoint.c (revision 63dc02bd)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
14  *
15  * Copyright (C) 2009, 2010 ARM Limited
16  *
17  * Author: Will Deacon <will.deacon@arm.com>
18  */
19 
20 /*
21  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
22  * using the CPU's debug registers.
23  */
24 #define pr_fmt(fmt) "hw-breakpoint: " fmt
25 
26 #include <linux/errno.h>
27 #include <linux/hardirq.h>
28 #include <linux/perf_event.h>
29 #include <linux/hw_breakpoint.h>
30 #include <linux/smp.h>
31 
32 #include <asm/cacheflush.h>
33 #include <asm/cputype.h>
34 #include <asm/current.h>
35 #include <asm/hw_breakpoint.h>
36 #include <asm/kdebug.h>
37 #include <asm/traps.h>
38 
39 /* Breakpoint currently in use for each BRP. */
40 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
41 
42 /* Watchpoint currently in use for each WRP. */
43 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
44 
45 /* Number of BRP/WRP registers on this CPU. */
46 static int core_num_brps;
47 static int core_num_wrps;
48 
49 /* Debug architecture version. */
50 static u8 debug_arch;
51 
52 /* Maximum supported watchpoint length. */
53 static u8 max_watchpoint_len;
54 
55 #define READ_WB_REG_CASE(OP2, M, VAL)		\
56 	case ((OP2 << 4) + M):			\
57 		ARM_DBG_READ(c ## M, OP2, VAL); \
58 		break
59 
60 #define WRITE_WB_REG_CASE(OP2, M, VAL)		\
61 	case ((OP2 << 4) + M):			\
62 		ARM_DBG_WRITE(c ## M, OP2, VAL);\
63 		break
64 
65 #define GEN_READ_WB_REG_CASES(OP2, VAL)		\
66 	READ_WB_REG_CASE(OP2, 0, VAL);		\
67 	READ_WB_REG_CASE(OP2, 1, VAL);		\
68 	READ_WB_REG_CASE(OP2, 2, VAL);		\
69 	READ_WB_REG_CASE(OP2, 3, VAL);		\
70 	READ_WB_REG_CASE(OP2, 4, VAL);		\
71 	READ_WB_REG_CASE(OP2, 5, VAL);		\
72 	READ_WB_REG_CASE(OP2, 6, VAL);		\
73 	READ_WB_REG_CASE(OP2, 7, VAL);		\
74 	READ_WB_REG_CASE(OP2, 8, VAL);		\
75 	READ_WB_REG_CASE(OP2, 9, VAL);		\
76 	READ_WB_REG_CASE(OP2, 10, VAL);		\
77 	READ_WB_REG_CASE(OP2, 11, VAL);		\
78 	READ_WB_REG_CASE(OP2, 12, VAL);		\
79 	READ_WB_REG_CASE(OP2, 13, VAL);		\
80 	READ_WB_REG_CASE(OP2, 14, VAL);		\
81 	READ_WB_REG_CASE(OP2, 15, VAL)
82 
83 #define GEN_WRITE_WB_REG_CASES(OP2, VAL)	\
84 	WRITE_WB_REG_CASE(OP2, 0, VAL);		\
85 	WRITE_WB_REG_CASE(OP2, 1, VAL);		\
86 	WRITE_WB_REG_CASE(OP2, 2, VAL);		\
87 	WRITE_WB_REG_CASE(OP2, 3, VAL);		\
88 	WRITE_WB_REG_CASE(OP2, 4, VAL);		\
89 	WRITE_WB_REG_CASE(OP2, 5, VAL);		\
90 	WRITE_WB_REG_CASE(OP2, 6, VAL);		\
91 	WRITE_WB_REG_CASE(OP2, 7, VAL);		\
92 	WRITE_WB_REG_CASE(OP2, 8, VAL);		\
93 	WRITE_WB_REG_CASE(OP2, 9, VAL);		\
94 	WRITE_WB_REG_CASE(OP2, 10, VAL);	\
95 	WRITE_WB_REG_CASE(OP2, 11, VAL);	\
96 	WRITE_WB_REG_CASE(OP2, 12, VAL);	\
97 	WRITE_WB_REG_CASE(OP2, 13, VAL);	\
98 	WRITE_WB_REG_CASE(OP2, 14, VAL);	\
99 	WRITE_WB_REG_CASE(OP2, 15, VAL)
100 
101 static u32 read_wb_reg(int n)
102 {
103 	u32 val = 0;
104 
105 	switch (n) {
106 	GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
107 	GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
108 	GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
109 	GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
110 	default:
111 		pr_warning("attempt to read from unknown breakpoint "
112 				"register %d\n", n);
113 	}
114 
115 	return val;
116 }
117 
118 static void write_wb_reg(int n, u32 val)
119 {
120 	switch (n) {
121 	GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
122 	GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
123 	GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
124 	GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
125 	default:
126 		pr_warning("attempt to write to unknown breakpoint "
127 				"register %d\n", n);
128 	}
129 	isb();
130 }
131 
132 /* Determine debug architecture. */
133 static u8 get_debug_arch(void)
134 {
135 	u32 didr;
136 
137 	/* Do we implement the extended CPUID interface? */
138 	if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
139 		pr_warning("CPUID feature registers not supported. "
140 			   "Assuming v6 debug is present.\n");
141 		return ARM_DEBUG_ARCH_V6;
142 	}
143 
144 	ARM_DBG_READ(c0, 0, didr);
145 	return (didr >> 16) & 0xf;
146 }
147 
148 u8 arch_get_debug_arch(void)
149 {
150 	return debug_arch;
151 }
152 
153 static int debug_arch_supported(void)
154 {
155 	u8 arch = get_debug_arch();
156 
157 	/* We don't support the memory-mapped interface. */
158 	return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
159 		arch >= ARM_DEBUG_ARCH_V7_1;
160 }
161 
162 /* Determine number of WRP registers available. */
163 static int get_num_wrp_resources(void)
164 {
165 	u32 didr;
166 	ARM_DBG_READ(c0, 0, didr);
167 	return ((didr >> 28) & 0xf) + 1;
168 }
169 
170 /* Determine number of BRP registers available. */
171 static int get_num_brp_resources(void)
172 {
173 	u32 didr;
174 	ARM_DBG_READ(c0, 0, didr);
175 	return ((didr >> 24) & 0xf) + 1;
176 }
177 
178 /* Does this core support mismatch breakpoints? */
179 static int core_has_mismatch_brps(void)
180 {
181 	return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
182 		get_num_brp_resources() > 1);
183 }
184 
185 /* Determine number of usable WRPs available. */
186 static int get_num_wrps(void)
187 {
188 	/*
189 	 * On debug architectures prior to 7.1, when a watchpoint fires, the
190 	 * only way to work out which watchpoint it was is by disassembling
191 	 * the faulting instruction and working out the address of the memory
192 	 * access.
193 	 *
194 	 * Furthermore, we can only do this if the watchpoint was precise
195 	 * since imprecise watchpoints prevent us from calculating register
196 	 * based addresses.
197 	 *
198 	 * Providing we have more than 1 breakpoint register, we only report
199 	 * a single watchpoint register for the time being. This way, we always
200 	 * know which watchpoint fired. In the future we can either add a
201 	 * disassembler and address generation emulator, or we can insert a
202 	 * check to see if the DFAR is set on watchpoint exception entry
203 	 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
204 	 * that it is set on some implementations].
205 	 */
206 	if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
207 		return 1;
208 
209 	return get_num_wrp_resources();
210 }
211 
212 /* Determine number of usable BRPs available. */
213 static int get_num_brps(void)
214 {
215 	int brps = get_num_brp_resources();
216 	return core_has_mismatch_brps() ? brps - 1 : brps;
217 }
218 
219 /*
220  * In order to access the breakpoint/watchpoint control registers,
221  * we must be running in debug monitor mode. Unfortunately, we can
222  * be put into halting debug mode at any time by an external debugger
223  * but there is nothing we can do to prevent that.
224  */
225 static int enable_monitor_mode(void)
226 {
227 	u32 dscr;
228 	int ret = 0;
229 
230 	ARM_DBG_READ(c1, 0, dscr);
231 
232 	/* Ensure that halting mode is disabled. */
233 	if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN,
234 		"halting debug mode enabled. Unable to access hardware resources.\n")) {
235 		ret = -EPERM;
236 		goto out;
237 	}
238 
239 	/* If monitor mode is already enabled, just return. */
240 	if (dscr & ARM_DSCR_MDBGEN)
241 		goto out;
242 
243 	/* Write to the corresponding DSCR. */
244 	switch (get_debug_arch()) {
245 	case ARM_DEBUG_ARCH_V6:
246 	case ARM_DEBUG_ARCH_V6_1:
247 		ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
248 		break;
249 	case ARM_DEBUG_ARCH_V7_ECP14:
250 	case ARM_DEBUG_ARCH_V7_1:
251 		ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
252 		break;
253 	default:
254 		ret = -ENODEV;
255 		goto out;
256 	}
257 
258 	/* Check that the write made it through. */
259 	ARM_DBG_READ(c1, 0, dscr);
260 	if (!(dscr & ARM_DSCR_MDBGEN))
261 		ret = -EPERM;
262 
263 out:
264 	return ret;
265 }
266 
267 int hw_breakpoint_slots(int type)
268 {
269 	if (!debug_arch_supported())
270 		return 0;
271 
272 	/*
273 	 * We can be called early, so don't rely on
274 	 * our static variables being initialised.
275 	 */
276 	switch (type) {
277 	case TYPE_INST:
278 		return get_num_brps();
279 	case TYPE_DATA:
280 		return get_num_wrps();
281 	default:
282 		pr_warning("unknown slot type: %d\n", type);
283 		return 0;
284 	}
285 }
286 
287 /*
288  * Check if 8-bit byte-address select is available.
289  * This clobbers WRP 0.
290  */
291 static u8 get_max_wp_len(void)
292 {
293 	u32 ctrl_reg;
294 	struct arch_hw_breakpoint_ctrl ctrl;
295 	u8 size = 4;
296 
297 	if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
298 		goto out;
299 
300 	memset(&ctrl, 0, sizeof(ctrl));
301 	ctrl.len = ARM_BREAKPOINT_LEN_8;
302 	ctrl_reg = encode_ctrl_reg(ctrl);
303 
304 	write_wb_reg(ARM_BASE_WVR, 0);
305 	write_wb_reg(ARM_BASE_WCR, ctrl_reg);
306 	if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
307 		size = 8;
308 
309 out:
310 	return size;
311 }
312 
313 u8 arch_get_max_wp_len(void)
314 {
315 	return max_watchpoint_len;
316 }
317 
318 /*
319  * Install a perf counter breakpoint.
320  */
321 int arch_install_hw_breakpoint(struct perf_event *bp)
322 {
323 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
324 	struct perf_event **slot, **slots;
325 	int i, max_slots, ctrl_base, val_base, ret = 0;
326 	u32 addr, ctrl;
327 
328 	/* Ensure that we are in monitor mode and halting mode is disabled. */
329 	ret = enable_monitor_mode();
330 	if (ret)
331 		goto out;
332 
333 	addr = info->address;
334 	ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
335 
336 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
337 		/* Breakpoint */
338 		ctrl_base = ARM_BASE_BCR;
339 		val_base = ARM_BASE_BVR;
340 		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
341 		max_slots = core_num_brps;
342 	} else {
343 		/* Watchpoint */
344 		ctrl_base = ARM_BASE_WCR;
345 		val_base = ARM_BASE_WVR;
346 		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
347 		max_slots = core_num_wrps;
348 	}
349 
350 	for (i = 0; i < max_slots; ++i) {
351 		slot = &slots[i];
352 
353 		if (!*slot) {
354 			*slot = bp;
355 			break;
356 		}
357 	}
358 
359 	if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) {
360 		ret = -EBUSY;
361 		goto out;
362 	}
363 
364 	/* Override the breakpoint data with the step data. */
365 	if (info->step_ctrl.enabled) {
366 		addr = info->trigger & ~0x3;
367 		ctrl = encode_ctrl_reg(info->step_ctrl);
368 		if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
369 			i = 0;
370 			ctrl_base = ARM_BASE_BCR + core_num_brps;
371 			val_base = ARM_BASE_BVR + core_num_brps;
372 		}
373 	}
374 
375 	/* Setup the address register. */
376 	write_wb_reg(val_base + i, addr);
377 
378 	/* Setup the control register. */
379 	write_wb_reg(ctrl_base + i, ctrl);
380 
381 out:
382 	return ret;
383 }
384 
385 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
386 {
387 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
388 	struct perf_event **slot, **slots;
389 	int i, max_slots, base;
390 
391 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
392 		/* Breakpoint */
393 		base = ARM_BASE_BCR;
394 		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
395 		max_slots = core_num_brps;
396 	} else {
397 		/* Watchpoint */
398 		base = ARM_BASE_WCR;
399 		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
400 		max_slots = core_num_wrps;
401 	}
402 
403 	/* Remove the breakpoint. */
404 	for (i = 0; i < max_slots; ++i) {
405 		slot = &slots[i];
406 
407 		if (*slot == bp) {
408 			*slot = NULL;
409 			break;
410 		}
411 	}
412 
413 	if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
414 		return;
415 
416 	/* Ensure that we disable the mismatch breakpoint. */
417 	if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
418 	    info->step_ctrl.enabled) {
419 		i = 0;
420 		base = ARM_BASE_BCR + core_num_brps;
421 	}
422 
423 	/* Reset the control register. */
424 	write_wb_reg(base + i, 0);
425 }
426 
427 static int get_hbp_len(u8 hbp_len)
428 {
429 	unsigned int len_in_bytes = 0;
430 
431 	switch (hbp_len) {
432 	case ARM_BREAKPOINT_LEN_1:
433 		len_in_bytes = 1;
434 		break;
435 	case ARM_BREAKPOINT_LEN_2:
436 		len_in_bytes = 2;
437 		break;
438 	case ARM_BREAKPOINT_LEN_4:
439 		len_in_bytes = 4;
440 		break;
441 	case ARM_BREAKPOINT_LEN_8:
442 		len_in_bytes = 8;
443 		break;
444 	}
445 
446 	return len_in_bytes;
447 }
448 
449 /*
450  * Check whether bp virtual address is in kernel space.
451  */
452 int arch_check_bp_in_kernelspace(struct perf_event *bp)
453 {
454 	unsigned int len;
455 	unsigned long va;
456 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
457 
458 	va = info->address;
459 	len = get_hbp_len(info->ctrl.len);
460 
461 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
462 }
463 
464 /*
465  * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
466  * Hopefully this will disappear when ptrace can bypass the conversion
467  * to generic breakpoint descriptions.
468  */
469 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
470 			   int *gen_len, int *gen_type)
471 {
472 	/* Type */
473 	switch (ctrl.type) {
474 	case ARM_BREAKPOINT_EXECUTE:
475 		*gen_type = HW_BREAKPOINT_X;
476 		break;
477 	case ARM_BREAKPOINT_LOAD:
478 		*gen_type = HW_BREAKPOINT_R;
479 		break;
480 	case ARM_BREAKPOINT_STORE:
481 		*gen_type = HW_BREAKPOINT_W;
482 		break;
483 	case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
484 		*gen_type = HW_BREAKPOINT_RW;
485 		break;
486 	default:
487 		return -EINVAL;
488 	}
489 
490 	/* Len */
491 	switch (ctrl.len) {
492 	case ARM_BREAKPOINT_LEN_1:
493 		*gen_len = HW_BREAKPOINT_LEN_1;
494 		break;
495 	case ARM_BREAKPOINT_LEN_2:
496 		*gen_len = HW_BREAKPOINT_LEN_2;
497 		break;
498 	case ARM_BREAKPOINT_LEN_4:
499 		*gen_len = HW_BREAKPOINT_LEN_4;
500 		break;
501 	case ARM_BREAKPOINT_LEN_8:
502 		*gen_len = HW_BREAKPOINT_LEN_8;
503 		break;
504 	default:
505 		return -EINVAL;
506 	}
507 
508 	return 0;
509 }
510 
511 /*
512  * Construct an arch_hw_breakpoint from a perf_event.
513  */
514 static int arch_build_bp_info(struct perf_event *bp)
515 {
516 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
517 
518 	/* Type */
519 	switch (bp->attr.bp_type) {
520 	case HW_BREAKPOINT_X:
521 		info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
522 		break;
523 	case HW_BREAKPOINT_R:
524 		info->ctrl.type = ARM_BREAKPOINT_LOAD;
525 		break;
526 	case HW_BREAKPOINT_W:
527 		info->ctrl.type = ARM_BREAKPOINT_STORE;
528 		break;
529 	case HW_BREAKPOINT_RW:
530 		info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
531 		break;
532 	default:
533 		return -EINVAL;
534 	}
535 
536 	/* Len */
537 	switch (bp->attr.bp_len) {
538 	case HW_BREAKPOINT_LEN_1:
539 		info->ctrl.len = ARM_BREAKPOINT_LEN_1;
540 		break;
541 	case HW_BREAKPOINT_LEN_2:
542 		info->ctrl.len = ARM_BREAKPOINT_LEN_2;
543 		break;
544 	case HW_BREAKPOINT_LEN_4:
545 		info->ctrl.len = ARM_BREAKPOINT_LEN_4;
546 		break;
547 	case HW_BREAKPOINT_LEN_8:
548 		info->ctrl.len = ARM_BREAKPOINT_LEN_8;
549 		if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
550 			&& max_watchpoint_len >= 8)
551 			break;
552 	default:
553 		return -EINVAL;
554 	}
555 
556 	/*
557 	 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
558 	 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
559 	 * by the hardware and must be aligned to the appropriate number of
560 	 * bytes.
561 	 */
562 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
563 	    info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
564 	    info->ctrl.len != ARM_BREAKPOINT_LEN_4)
565 		return -EINVAL;
566 
567 	/* Address */
568 	info->address = bp->attr.bp_addr;
569 
570 	/* Privilege */
571 	info->ctrl.privilege = ARM_BREAKPOINT_USER;
572 	if (arch_check_bp_in_kernelspace(bp))
573 		info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
574 
575 	/* Enabled? */
576 	info->ctrl.enabled = !bp->attr.disabled;
577 
578 	/* Mismatch */
579 	info->ctrl.mismatch = 0;
580 
581 	return 0;
582 }
583 
584 /*
585  * Validate the arch-specific HW Breakpoint register settings.
586  */
587 int arch_validate_hwbkpt_settings(struct perf_event *bp)
588 {
589 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
590 	int ret = 0;
591 	u32 offset, alignment_mask = 0x3;
592 
593 	/* Build the arch_hw_breakpoint. */
594 	ret = arch_build_bp_info(bp);
595 	if (ret)
596 		goto out;
597 
598 	/* Check address alignment. */
599 	if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
600 		alignment_mask = 0x7;
601 	offset = info->address & alignment_mask;
602 	switch (offset) {
603 	case 0:
604 		/* Aligned */
605 		break;
606 	case 1:
607 		/* Allow single byte watchpoint. */
608 		if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
609 			break;
610 	case 2:
611 		/* Allow halfword watchpoints and breakpoints. */
612 		if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
613 			break;
614 	default:
615 		ret = -EINVAL;
616 		goto out;
617 	}
618 
619 	info->address &= ~alignment_mask;
620 	info->ctrl.len <<= offset;
621 
622 	/*
623 	 * Currently we rely on an overflow handler to take
624 	 * care of single-stepping the breakpoint when it fires.
625 	 * In the case of userspace breakpoints on a core with V7 debug,
626 	 * we can use the mismatch feature as a poor-man's hardware
627 	 * single-step, but this only works for per-task breakpoints.
628 	 */
629 	if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
630 	    !core_has_mismatch_brps() || !bp->hw.bp_target)) {
631 		pr_warning("overflow handler required but none found\n");
632 		ret = -EINVAL;
633 	}
634 out:
635 	return ret;
636 }
637 
638 /*
639  * Enable/disable single-stepping over the breakpoint bp at address addr.
640  */
641 static void enable_single_step(struct perf_event *bp, u32 addr)
642 {
643 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
644 
645 	arch_uninstall_hw_breakpoint(bp);
646 	info->step_ctrl.mismatch  = 1;
647 	info->step_ctrl.len	  = ARM_BREAKPOINT_LEN_4;
648 	info->step_ctrl.type	  = ARM_BREAKPOINT_EXECUTE;
649 	info->step_ctrl.privilege = info->ctrl.privilege;
650 	info->step_ctrl.enabled	  = 1;
651 	info->trigger		  = addr;
652 	arch_install_hw_breakpoint(bp);
653 }
654 
655 static void disable_single_step(struct perf_event *bp)
656 {
657 	arch_uninstall_hw_breakpoint(bp);
658 	counter_arch_bp(bp)->step_ctrl.enabled = 0;
659 	arch_install_hw_breakpoint(bp);
660 }
661 
662 static void watchpoint_handler(unsigned long addr, unsigned int fsr,
663 			       struct pt_regs *regs)
664 {
665 	int i, access;
666 	u32 val, ctrl_reg, alignment_mask;
667 	struct perf_event *wp, **slots;
668 	struct arch_hw_breakpoint *info;
669 	struct arch_hw_breakpoint_ctrl ctrl;
670 
671 	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
672 
673 	for (i = 0; i < core_num_wrps; ++i) {
674 		rcu_read_lock();
675 
676 		wp = slots[i];
677 
678 		if (wp == NULL)
679 			goto unlock;
680 
681 		info = counter_arch_bp(wp);
682 		/*
683 		 * The DFAR is an unknown value on debug architectures prior
684 		 * to 7.1. Since we only allow a single watchpoint on these
685 		 * older CPUs, we can set the trigger to the lowest possible
686 		 * faulting address.
687 		 */
688 		if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
689 			BUG_ON(i > 0);
690 			info->trigger = wp->attr.bp_addr;
691 		} else {
692 			if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
693 				alignment_mask = 0x7;
694 			else
695 				alignment_mask = 0x3;
696 
697 			/* Check if the watchpoint value matches. */
698 			val = read_wb_reg(ARM_BASE_WVR + i);
699 			if (val != (addr & ~alignment_mask))
700 				goto unlock;
701 
702 			/* Possible match, check the byte address select. */
703 			ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
704 			decode_ctrl_reg(ctrl_reg, &ctrl);
705 			if (!((1 << (addr & alignment_mask)) & ctrl.len))
706 				goto unlock;
707 
708 			/* Check that the access type matches. */
709 			access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
710 				 HW_BREAKPOINT_R;
711 			if (!(access & hw_breakpoint_type(wp)))
712 				goto unlock;
713 
714 			/* We have a winner. */
715 			info->trigger = addr;
716 		}
717 
718 		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
719 		perf_bp_event(wp, regs);
720 
721 		/*
722 		 * If no overflow handler is present, insert a temporary
723 		 * mismatch breakpoint so we can single-step over the
724 		 * watchpoint trigger.
725 		 */
726 		if (!wp->overflow_handler)
727 			enable_single_step(wp, instruction_pointer(regs));
728 
729 unlock:
730 		rcu_read_unlock();
731 	}
732 }
733 
734 static void watchpoint_single_step_handler(unsigned long pc)
735 {
736 	int i;
737 	struct perf_event *wp, **slots;
738 	struct arch_hw_breakpoint *info;
739 
740 	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
741 
742 	for (i = 0; i < core_num_wrps; ++i) {
743 		rcu_read_lock();
744 
745 		wp = slots[i];
746 
747 		if (wp == NULL)
748 			goto unlock;
749 
750 		info = counter_arch_bp(wp);
751 		if (!info->step_ctrl.enabled)
752 			goto unlock;
753 
754 		/*
755 		 * Restore the original watchpoint if we've completed the
756 		 * single-step.
757 		 */
758 		if (info->trigger != pc)
759 			disable_single_step(wp);
760 
761 unlock:
762 		rcu_read_unlock();
763 	}
764 }
765 
766 static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
767 {
768 	int i;
769 	u32 ctrl_reg, val, addr;
770 	struct perf_event *bp, **slots;
771 	struct arch_hw_breakpoint *info;
772 	struct arch_hw_breakpoint_ctrl ctrl;
773 
774 	slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
775 
776 	/* The exception entry code places the amended lr in the PC. */
777 	addr = regs->ARM_pc;
778 
779 	/* Check the currently installed breakpoints first. */
780 	for (i = 0; i < core_num_brps; ++i) {
781 		rcu_read_lock();
782 
783 		bp = slots[i];
784 
785 		if (bp == NULL)
786 			goto unlock;
787 
788 		info = counter_arch_bp(bp);
789 
790 		/* Check if the breakpoint value matches. */
791 		val = read_wb_reg(ARM_BASE_BVR + i);
792 		if (val != (addr & ~0x3))
793 			goto mismatch;
794 
795 		/* Possible match, check the byte address select to confirm. */
796 		ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
797 		decode_ctrl_reg(ctrl_reg, &ctrl);
798 		if ((1 << (addr & 0x3)) & ctrl.len) {
799 			info->trigger = addr;
800 			pr_debug("breakpoint fired: address = 0x%x\n", addr);
801 			perf_bp_event(bp, regs);
802 			if (!bp->overflow_handler)
803 				enable_single_step(bp, addr);
804 			goto unlock;
805 		}
806 
807 mismatch:
808 		/* If we're stepping a breakpoint, it can now be restored. */
809 		if (info->step_ctrl.enabled)
810 			disable_single_step(bp);
811 unlock:
812 		rcu_read_unlock();
813 	}
814 
815 	/* Handle any pending watchpoint single-step breakpoints. */
816 	watchpoint_single_step_handler(addr);
817 }
818 
819 /*
820  * Called from either the Data Abort Handler [watchpoint] or the
821  * Prefetch Abort Handler [breakpoint] with interrupts disabled.
822  */
823 static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
824 				 struct pt_regs *regs)
825 {
826 	int ret = 0;
827 	u32 dscr;
828 
829 	preempt_disable();
830 
831 	if (interrupts_enabled(regs))
832 		local_irq_enable();
833 
834 	/* We only handle watchpoints and hardware breakpoints. */
835 	ARM_DBG_READ(c1, 0, dscr);
836 
837 	/* Perform perf callbacks. */
838 	switch (ARM_DSCR_MOE(dscr)) {
839 	case ARM_ENTRY_BREAKPOINT:
840 		breakpoint_handler(addr, regs);
841 		break;
842 	case ARM_ENTRY_ASYNC_WATCHPOINT:
843 		WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
844 	case ARM_ENTRY_SYNC_WATCHPOINT:
845 		watchpoint_handler(addr, fsr, regs);
846 		break;
847 	default:
848 		ret = 1; /* Unhandled fault. */
849 	}
850 
851 	preempt_enable();
852 
853 	return ret;
854 }
855 
856 /*
857  * One-time initialisation.
858  */
859 static cpumask_t debug_err_mask;
860 
861 static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
862 {
863 	int cpu = smp_processor_id();
864 
865 	pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
866 		   instr, cpu);
867 
868 	/* Set the error flag for this CPU and skip the faulting instruction. */
869 	cpumask_set_cpu(cpu, &debug_err_mask);
870 	instruction_pointer(regs) += 4;
871 	return 0;
872 }
873 
874 static struct undef_hook debug_reg_hook = {
875 	.instr_mask	= 0x0fe80f10,
876 	.instr_val	= 0x0e000e10,
877 	.fn		= debug_reg_trap,
878 };
879 
880 static void reset_ctrl_regs(void *unused)
881 {
882 	int i, raw_num_brps, err = 0, cpu = smp_processor_id();
883 	u32 dbg_power;
884 
885 	/*
886 	 * v7 debug contains save and restore registers so that debug state
887 	 * can be maintained across low-power modes without leaving the debug
888 	 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
889 	 * the debug registers out of reset, so we must unlock the OS Lock
890 	 * Access Register to avoid taking undefined instruction exceptions
891 	 * later on.
892 	 */
893 	switch (debug_arch) {
894 	case ARM_DEBUG_ARCH_V6:
895 	case ARM_DEBUG_ARCH_V6_1:
896 		/* ARMv6 cores just need to reset the registers. */
897 		goto reset_regs;
898 	case ARM_DEBUG_ARCH_V7_ECP14:
899 		/*
900 		 * Ensure sticky power-down is clear (i.e. debug logic is
901 		 * powered up).
902 		 */
903 		asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
904 		if ((dbg_power & 0x1) == 0)
905 			err = -EPERM;
906 		break;
907 	case ARM_DEBUG_ARCH_V7_1:
908 		/*
909 		 * Ensure the OS double lock is clear.
910 		 */
911 		asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
912 		if ((dbg_power & 0x1) == 1)
913 			err = -EPERM;
914 		break;
915 	}
916 
917 	if (err) {
918 		pr_warning("CPU %d debug is powered down!\n", cpu);
919 		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
920 		return;
921 	}
922 
923 	/*
924 	 * Unconditionally clear the lock by writing a value
925 	 * other than 0xC5ACCE55 to the access register.
926 	 */
927 	asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
928 	isb();
929 
930 	/*
931 	 * Clear any configured vector-catch events before
932 	 * enabling monitor mode.
933 	 */
934 	asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
935 	isb();
936 
937 reset_regs:
938 	if (enable_monitor_mode())
939 		return;
940 
941 	/* We must also reset any reserved registers. */
942 	raw_num_brps = get_num_brp_resources();
943 	for (i = 0; i < raw_num_brps; ++i) {
944 		write_wb_reg(ARM_BASE_BCR + i, 0UL);
945 		write_wb_reg(ARM_BASE_BVR + i, 0UL);
946 	}
947 
948 	for (i = 0; i < core_num_wrps; ++i) {
949 		write_wb_reg(ARM_BASE_WCR + i, 0UL);
950 		write_wb_reg(ARM_BASE_WVR + i, 0UL);
951 	}
952 }
953 
954 static int __cpuinit dbg_reset_notify(struct notifier_block *self,
955 				      unsigned long action, void *cpu)
956 {
957 	if (action == CPU_ONLINE)
958 		smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
959 
960 	return NOTIFY_OK;
961 }
962 
963 static struct notifier_block __cpuinitdata dbg_reset_nb = {
964 	.notifier_call = dbg_reset_notify,
965 };
966 
967 static int __init arch_hw_breakpoint_init(void)
968 {
969 	u32 dscr;
970 
971 	debug_arch = get_debug_arch();
972 
973 	if (!debug_arch_supported()) {
974 		pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
975 		return 0;
976 	}
977 
978 	/* Determine how many BRPs/WRPs are available. */
979 	core_num_brps = get_num_brps();
980 	core_num_wrps = get_num_wrps();
981 
982 	/*
983 	 * We need to tread carefully here because DBGSWENABLE may be
984 	 * driven low on this core and there isn't an architected way to
985 	 * determine that.
986 	 */
987 	register_undef_hook(&debug_reg_hook);
988 
989 	/*
990 	 * Reset the breakpoint resources. We assume that a halting
991 	 * debugger will leave the world in a nice state for us.
992 	 */
993 	on_each_cpu(reset_ctrl_regs, NULL, 1);
994 	unregister_undef_hook(&debug_reg_hook);
995 	if (!cpumask_empty(&debug_err_mask)) {
996 		core_num_brps = 0;
997 		core_num_wrps = 0;
998 		return 0;
999 	}
1000 
1001 	pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
1002 		core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
1003 		"", core_num_wrps);
1004 
1005 	ARM_DBG_READ(c1, 0, dscr);
1006 	if (dscr & ARM_DSCR_HDBGEN) {
1007 		max_watchpoint_len = 4;
1008 		pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n",
1009 			   max_watchpoint_len);
1010 	} else {
1011 		/* Work out the maximum supported watchpoint length. */
1012 		max_watchpoint_len = get_max_wp_len();
1013 		pr_info("maximum watchpoint size is %u bytes.\n",
1014 				max_watchpoint_len);
1015 	}
1016 
1017 	/* Register debug fault handler. */
1018 	hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1019 			TRAP_HWBKPT, "watchpoint debug exception");
1020 	hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1021 			TRAP_HWBKPT, "breakpoint debug exception");
1022 
1023 	/* Register hotplug notifier. */
1024 	register_cpu_notifier(&dbg_reset_nb);
1025 	return 0;
1026 }
1027 arch_initcall(arch_hw_breakpoint_init);
1028 
1029 void hw_breakpoint_pmu_read(struct perf_event *bp)
1030 {
1031 }
1032 
1033 /*
1034  * Dummy function to register with die_notifier.
1035  */
1036 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
1037 					unsigned long val, void *data)
1038 {
1039 	return NOTIFY_DONE;
1040 }
1041