xref: /openbmc/linux/arch/arm64/kernel/probes/kprobes.c (revision f59a3ee6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/kernel/probes/kprobes.c
4  *
5  * Kprobes support for ARM64
6  *
7  * Copyright (C) 2013 Linaro Limited.
8  * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
9  */
10 
11 #define pr_fmt(fmt) "kprobes: " fmt
12 
13 #include <linux/extable.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/kprobes.h>
17 #include <linux/sched/debug.h>
18 #include <linux/set_memory.h>
19 #include <linux/slab.h>
20 #include <linux/stop_machine.h>
21 #include <linux/stringify.h>
22 #include <linux/uaccess.h>
23 #include <linux/vmalloc.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/daifflags.h>
27 #include <asm/debug-monitors.h>
28 #include <asm/insn.h>
29 #include <asm/irq.h>
30 #include <asm/patching.h>
31 #include <asm/ptrace.h>
32 #include <asm/sections.h>
33 #include <asm/system_misc.h>
34 #include <asm/traps.h>
35 
36 #include "decode-insn.h"
37 
38 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
39 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
40 
41 static void __kprobes
42 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
43 
44 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
45 {
46 	kprobe_opcode_t *addr = p->ainsn.api.insn;
47 
48 	/*
49 	 * Prepare insn slot, Mark Rutland points out it depends on a coupe of
50 	 * subtleties:
51 	 *
52 	 * - That the I-cache maintenance for these instructions is complete
53 	 *   *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync()
54 	 *   ensures this, but just omits causing a Context-Synchronization-Event
55 	 *   on all CPUS).
56 	 *
57 	 * - That the kprobe BRK results in an exception (and consequently a
58 	 *   Context-Synchronoization-Event), which ensures that the CPU will
59 	 *   fetch thesingle-step slot instructions *after* this, ensuring that
60 	 *   the new instructions are used
61 	 *
62 	 * It supposes to place ISB after patching to guarantee I-cache maintenance
63 	 * is observed on all CPUS, however, single-step slot is installed in
64 	 * the BRK exception handler, so it is unnecessary to generate
65 	 * Contex-Synchronization-Event via ISB again.
66 	 */
67 	aarch64_insn_patch_text_nosync(addr, p->opcode);
68 	aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS);
69 
70 	/*
71 	 * Needs restoring of return address after stepping xol.
72 	 */
73 	p->ainsn.api.restore = (unsigned long) p->addr +
74 	  sizeof(kprobe_opcode_t);
75 }
76 
77 static void __kprobes arch_prepare_simulate(struct kprobe *p)
78 {
79 	/* This instructions is not executed xol. No need to adjust the PC */
80 	p->ainsn.api.restore = 0;
81 }
82 
83 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
84 {
85 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
86 
87 	if (p->ainsn.api.handler)
88 		p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
89 
90 	/* single step simulated, now go for post processing */
91 	post_kprobe_handler(p, kcb, regs);
92 }
93 
94 int __kprobes arch_prepare_kprobe(struct kprobe *p)
95 {
96 	unsigned long probe_addr = (unsigned long)p->addr;
97 
98 	if (probe_addr & 0x3)
99 		return -EINVAL;
100 
101 	/* copy instruction */
102 	p->opcode = le32_to_cpu(*p->addr);
103 
104 	if (search_exception_tables(probe_addr))
105 		return -EINVAL;
106 
107 	/* decode instruction */
108 	switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
109 	case INSN_REJECTED:	/* insn not supported */
110 		return -EINVAL;
111 
112 	case INSN_GOOD_NO_SLOT:	/* insn need simulation */
113 		p->ainsn.api.insn = NULL;
114 		break;
115 
116 	case INSN_GOOD:	/* instruction uses slot */
117 		p->ainsn.api.insn = get_insn_slot();
118 		if (!p->ainsn.api.insn)
119 			return -ENOMEM;
120 		break;
121 	}
122 
123 	/* prepare the instruction */
124 	if (p->ainsn.api.insn)
125 		arch_prepare_ss_slot(p);
126 	else
127 		arch_prepare_simulate(p);
128 
129 	return 0;
130 }
131 
132 void *alloc_insn_page(void)
133 {
134 	return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
135 			GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
136 			NUMA_NO_NODE, __builtin_return_address(0));
137 }
138 
139 /* arm kprobe: install breakpoint in text */
140 void __kprobes arch_arm_kprobe(struct kprobe *p)
141 {
142 	void *addr = p->addr;
143 	u32 insn = BRK64_OPCODE_KPROBES;
144 
145 	aarch64_insn_patch_text(&addr, &insn, 1);
146 }
147 
148 /* disarm kprobe: remove breakpoint from text */
149 void __kprobes arch_disarm_kprobe(struct kprobe *p)
150 {
151 	void *addr = p->addr;
152 
153 	aarch64_insn_patch_text(&addr, &p->opcode, 1);
154 }
155 
156 void __kprobes arch_remove_kprobe(struct kprobe *p)
157 {
158 	if (p->ainsn.api.insn) {
159 		free_insn_slot(p->ainsn.api.insn, 0);
160 		p->ainsn.api.insn = NULL;
161 	}
162 }
163 
164 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
165 {
166 	kcb->prev_kprobe.kp = kprobe_running();
167 	kcb->prev_kprobe.status = kcb->kprobe_status;
168 }
169 
170 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
171 {
172 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
173 	kcb->kprobe_status = kcb->prev_kprobe.status;
174 }
175 
176 static void __kprobes set_current_kprobe(struct kprobe *p)
177 {
178 	__this_cpu_write(current_kprobe, p);
179 }
180 
181 /*
182  * Mask all of DAIF while executing the instruction out-of-line, to keep things
183  * simple and avoid nesting exceptions. Interrupts do have to be disabled since
184  * the kprobe state is per-CPU and doesn't get migrated.
185  */
186 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
187 						struct pt_regs *regs)
188 {
189 	kcb->saved_irqflag = regs->pstate & DAIF_MASK;
190 	regs->pstate |= DAIF_MASK;
191 }
192 
193 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
194 						struct pt_regs *regs)
195 {
196 	regs->pstate &= ~DAIF_MASK;
197 	regs->pstate |= kcb->saved_irqflag;
198 }
199 
200 static void __kprobes setup_singlestep(struct kprobe *p,
201 				       struct pt_regs *regs,
202 				       struct kprobe_ctlblk *kcb, int reenter)
203 {
204 	unsigned long slot;
205 
206 	if (reenter) {
207 		save_previous_kprobe(kcb);
208 		set_current_kprobe(p);
209 		kcb->kprobe_status = KPROBE_REENTER;
210 	} else {
211 		kcb->kprobe_status = KPROBE_HIT_SS;
212 	}
213 
214 
215 	if (p->ainsn.api.insn) {
216 		/* prepare for single stepping */
217 		slot = (unsigned long)p->ainsn.api.insn;
218 
219 		kprobes_save_local_irqflag(kcb, regs);
220 		instruction_pointer_set(regs, slot);
221 	} else {
222 		/* insn simulation */
223 		arch_simulate_insn(p, regs);
224 	}
225 }
226 
227 static int __kprobes reenter_kprobe(struct kprobe *p,
228 				    struct pt_regs *regs,
229 				    struct kprobe_ctlblk *kcb)
230 {
231 	switch (kcb->kprobe_status) {
232 	case KPROBE_HIT_SSDONE:
233 	case KPROBE_HIT_ACTIVE:
234 		kprobes_inc_nmissed_count(p);
235 		setup_singlestep(p, regs, kcb, 1);
236 		break;
237 	case KPROBE_HIT_SS:
238 	case KPROBE_REENTER:
239 		pr_warn("Failed to recover from reentered kprobes.\n");
240 		dump_kprobe(p);
241 		BUG();
242 		break;
243 	default:
244 		WARN_ON(1);
245 		return 0;
246 	}
247 
248 	return 1;
249 }
250 
251 static void __kprobes
252 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
253 {
254 	/* return addr restore if non-branching insn */
255 	if (cur->ainsn.api.restore != 0)
256 		instruction_pointer_set(regs, cur->ainsn.api.restore);
257 
258 	/* restore back original saved kprobe variables and continue */
259 	if (kcb->kprobe_status == KPROBE_REENTER) {
260 		restore_previous_kprobe(kcb);
261 		return;
262 	}
263 	/* call post handler */
264 	kcb->kprobe_status = KPROBE_HIT_SSDONE;
265 	if (cur->post_handler)
266 		cur->post_handler(cur, regs, 0);
267 
268 	reset_current_kprobe();
269 }
270 
271 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
272 {
273 	struct kprobe *cur = kprobe_running();
274 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
275 
276 	switch (kcb->kprobe_status) {
277 	case KPROBE_HIT_SS:
278 	case KPROBE_REENTER:
279 		/*
280 		 * We are here because the instruction being single
281 		 * stepped caused a page fault. We reset the current
282 		 * kprobe and the ip points back to the probe address
283 		 * and allow the page fault handler to continue as a
284 		 * normal page fault.
285 		 */
286 		instruction_pointer_set(regs, (unsigned long) cur->addr);
287 		BUG_ON(!instruction_pointer(regs));
288 
289 		if (kcb->kprobe_status == KPROBE_REENTER) {
290 			restore_previous_kprobe(kcb);
291 		} else {
292 			kprobes_restore_local_irqflag(kcb, regs);
293 			reset_current_kprobe();
294 		}
295 
296 		break;
297 	case KPROBE_HIT_ACTIVE:
298 	case KPROBE_HIT_SSDONE:
299 		/*
300 		 * In case the user-specified fault handler returned
301 		 * zero, try to fix up.
302 		 */
303 		if (fixup_exception(regs))
304 			return 1;
305 	}
306 	return 0;
307 }
308 
309 static void __kprobes kprobe_handler(struct pt_regs *regs)
310 {
311 	struct kprobe *p, *cur_kprobe;
312 	struct kprobe_ctlblk *kcb;
313 	unsigned long addr = instruction_pointer(regs);
314 
315 	kcb = get_kprobe_ctlblk();
316 	cur_kprobe = kprobe_running();
317 
318 	p = get_kprobe((kprobe_opcode_t *) addr);
319 
320 	if (p) {
321 		if (cur_kprobe) {
322 			if (reenter_kprobe(p, regs, kcb))
323 				return;
324 		} else {
325 			/* Probe hit */
326 			set_current_kprobe(p);
327 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
328 
329 			/*
330 			 * If we have no pre-handler or it returned 0, we
331 			 * continue with normal processing.  If we have a
332 			 * pre-handler and it returned non-zero, it will
333 			 * modify the execution path and no need to single
334 			 * stepping. Let's just reset current kprobe and exit.
335 			 */
336 			if (!p->pre_handler || !p->pre_handler(p, regs)) {
337 				setup_singlestep(p, regs, kcb, 0);
338 			} else
339 				reset_current_kprobe();
340 		}
341 	}
342 	/*
343 	 * The breakpoint instruction was removed right
344 	 * after we hit it.  Another cpu has removed
345 	 * either a probepoint or a debugger breakpoint
346 	 * at this address.  In either case, no further
347 	 * handling of this interrupt is appropriate.
348 	 * Return back to original instruction, and continue.
349 	 */
350 }
351 
352 static int __kprobes
353 kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
354 {
355 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
356 	unsigned long addr = instruction_pointer(regs);
357 	struct kprobe *cur = kprobe_running();
358 
359 	if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
360 	    ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
361 		kprobes_restore_local_irqflag(kcb, regs);
362 		post_kprobe_handler(cur, kcb, regs);
363 
364 		return DBG_HOOK_HANDLED;
365 	}
366 
367 	/* not ours, kprobes should ignore it */
368 	return DBG_HOOK_ERROR;
369 }
370 
371 static struct break_hook kprobes_break_ss_hook = {
372 	.imm = KPROBES_BRK_SS_IMM,
373 	.fn = kprobe_breakpoint_ss_handler,
374 };
375 
376 static int __kprobes
377 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
378 {
379 	kprobe_handler(regs);
380 	return DBG_HOOK_HANDLED;
381 }
382 
383 static struct break_hook kprobes_break_hook = {
384 	.imm = KPROBES_BRK_IMM,
385 	.fn = kprobe_breakpoint_handler,
386 };
387 
388 /*
389  * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
390  * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
391  */
392 int __init arch_populate_kprobe_blacklist(void)
393 {
394 	int ret;
395 
396 	ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
397 					(unsigned long)__entry_text_end);
398 	if (ret)
399 		return ret;
400 	ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
401 					(unsigned long)__irqentry_text_end);
402 	if (ret)
403 		return ret;
404 	ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
405 					(unsigned long)__idmap_text_end);
406 	if (ret)
407 		return ret;
408 	ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
409 					(unsigned long)__hyp_text_end);
410 	if (ret || is_kernel_in_hyp_mode())
411 		return ret;
412 	ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
413 					(unsigned long)__hyp_idmap_text_end);
414 	return ret;
415 }
416 
417 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
418 {
419 	return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
420 }
421 
422 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
423 				      struct pt_regs *regs)
424 {
425 	ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
426 	ri->fp = (void *)regs->regs[29];
427 
428 	/* replace return addr (x30) with trampoline */
429 	regs->regs[30] = (long)&__kretprobe_trampoline;
430 }
431 
432 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
433 {
434 	return 0;
435 }
436 
437 int __init arch_init_kprobes(void)
438 {
439 	register_kernel_break_hook(&kprobes_break_hook);
440 	register_kernel_break_hook(&kprobes_break_ss_hook);
441 
442 	return 0;
443 }
444