xref: /openbmc/linux/arch/arm64/kernel/probes/uprobes.c (revision 8730046c)
1 /*
2  * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/highmem.h>
9 #include <linux/ptrace.h>
10 #include <linux/uprobes.h>
11 #include <asm/cacheflush.h>
12 
13 #include "decode-insn.h"
14 
15 #define UPROBE_INV_FAULT_CODE	UINT_MAX
16 
17 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
18 		void *src, unsigned long len)
19 {
20 	void *xol_page_kaddr = kmap_atomic(page);
21 	void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
22 
23 	/* Initialize the slot */
24 	memcpy(dst, src, len);
25 
26 	/* flush caches (dcache/icache) */
27 	sync_icache_aliases(dst, len);
28 
29 	kunmap_atomic(xol_page_kaddr);
30 }
31 
32 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
33 {
34 	return instruction_pointer(regs);
35 }
36 
37 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
38 		unsigned long addr)
39 {
40 	probe_opcode_t insn;
41 
42 	/* TODO: Currently we do not support AARCH32 instruction probing */
43 	if (test_bit(TIF_32BIT, &mm->context.flags))
44 		return -ENOTSUPP;
45 	else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
46 		return -EINVAL;
47 
48 	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
49 
50 	switch (arm_probe_decode_insn(insn, &auprobe->api)) {
51 	case INSN_REJECTED:
52 		return -EINVAL;
53 
54 	case INSN_GOOD_NO_SLOT:
55 		auprobe->simulate = true;
56 		break;
57 
58 	default:
59 		break;
60 	}
61 
62 	return 0;
63 }
64 
65 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
66 {
67 	struct uprobe_task *utask = current->utask;
68 
69 	/* Initialize with an invalid fault code to detect if ol insn trapped */
70 	current->thread.fault_code = UPROBE_INV_FAULT_CODE;
71 
72 	/* Instruction points to execute ol */
73 	instruction_pointer_set(regs, utask->xol_vaddr);
74 
75 	user_enable_single_step(current);
76 
77 	return 0;
78 }
79 
80 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
81 {
82 	struct uprobe_task *utask = current->utask;
83 
84 	WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
85 
86 	/* Instruction points to execute next to breakpoint address */
87 	instruction_pointer_set(regs, utask->vaddr + 4);
88 
89 	user_disable_single_step(current);
90 
91 	return 0;
92 }
93 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
94 {
95 	/*
96 	 * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
97 	 * insn itself is trapped, then detect the case with the help of
98 	 * invalid fault code which is being set in arch_uprobe_pre_xol
99 	 */
100 	if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
101 		return true;
102 
103 	return false;
104 }
105 
106 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
107 {
108 	probe_opcode_t insn;
109 	unsigned long addr;
110 
111 	if (!auprobe->simulate)
112 		return false;
113 
114 	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
115 	addr = instruction_pointer(regs);
116 
117 	if (auprobe->api.handler)
118 		auprobe->api.handler(insn, addr, regs);
119 
120 	return true;
121 }
122 
123 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
124 {
125 	struct uprobe_task *utask = current->utask;
126 
127 	/*
128 	 * Task has received a fatal signal, so reset back to probbed
129 	 * address.
130 	 */
131 	instruction_pointer_set(regs, utask->vaddr);
132 
133 	user_disable_single_step(current);
134 }
135 
136 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
137 		struct pt_regs *regs)
138 {
139 	/*
140 	 * If a simple branch instruction (B) was called for retprobed
141 	 * assembly label then return true even when regs->sp and ret->stack
142 	 * are same. It will ensure that cleanup and reporting of return
143 	 * instances corresponding to callee label is done when
144 	 * handle_trampoline for called function is executed.
145 	 */
146 	if (ctx == RP_CHECK_CHAIN_CALL)
147 		return regs->sp <= ret->stack;
148 	else
149 		return regs->sp < ret->stack;
150 }
151 
152 unsigned long
153 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
154 				  struct pt_regs *regs)
155 {
156 	unsigned long orig_ret_vaddr;
157 
158 	orig_ret_vaddr = procedure_link_pointer(regs);
159 	/* Replace the return addr with trampoline addr */
160 	procedure_link_pointer_set(regs, trampoline_vaddr);
161 
162 	return orig_ret_vaddr;
163 }
164 
165 int arch_uprobe_exception_notify(struct notifier_block *self,
166 				 unsigned long val, void *data)
167 {
168 	return NOTIFY_DONE;
169 }
170 
171 static int uprobe_breakpoint_handler(struct pt_regs *regs,
172 		unsigned int esr)
173 {
174 	if (user_mode(regs) && uprobe_pre_sstep_notifier(regs))
175 		return DBG_HOOK_HANDLED;
176 
177 	return DBG_HOOK_ERROR;
178 }
179 
180 static int uprobe_single_step_handler(struct pt_regs *regs,
181 		unsigned int esr)
182 {
183 	struct uprobe_task *utask = current->utask;
184 
185 	if (user_mode(regs)) {
186 		WARN_ON(utask &&
187 			(instruction_pointer(regs) != utask->xol_vaddr + 4));
188 
189 		if (uprobe_post_sstep_notifier(regs))
190 			return DBG_HOOK_HANDLED;
191 	}
192 
193 	return DBG_HOOK_ERROR;
194 }
195 
196 /* uprobe breakpoint handler hook */
197 static struct break_hook uprobes_break_hook = {
198 	.esr_mask = BRK64_ESR_MASK,
199 	.esr_val = BRK64_ESR_UPROBES,
200 	.fn = uprobe_breakpoint_handler,
201 };
202 
203 /* uprobe single step handler hook */
204 static struct step_hook uprobes_step_hook = {
205 	.fn = uprobe_single_step_handler,
206 };
207 
208 static int __init arch_init_uprobes(void)
209 {
210 	register_break_hook(&uprobes_break_hook);
211 	register_step_hook(&uprobes_step_hook);
212 
213 	return 0;
214 }
215 
216 device_initcall(arch_init_uprobes);
217