1 // SPDX-License-Identifier: GPL-2.0+
2
3 #define pr_fmt(fmt) "kprobes: " fmt
4
5 #include <linux/kprobes.h>
6 #include <linux/extable.h>
7 #include <linux/slab.h>
8 #include <linux/stop_machine.h>
9 #include <asm/ptrace.h>
10 #include <linux/uaccess.h>
11 #include <asm/sections.h>
12 #include <asm/cacheflush.h>
13 #include <asm/bug.h>
14 #include <asm/patch.h>
15
16 #include "decode-insn.h"
17
18 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
19 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
20
21 static void __kprobes
22 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
23
arch_prepare_ss_slot(struct kprobe * p)24 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
25 {
26 size_t len = GET_INSN_LENGTH(p->opcode);
27 u32 insn = __BUG_INSN_32;
28
29 p->ainsn.api.restore = (unsigned long)p->addr + len;
30
31 patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
32 patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
33 }
34
arch_prepare_simulate(struct kprobe * p)35 static void __kprobes arch_prepare_simulate(struct kprobe *p)
36 {
37 p->ainsn.api.restore = 0;
38 }
39
arch_simulate_insn(struct kprobe * p,struct pt_regs * regs)40 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
41 {
42 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
43
44 if (p->ainsn.api.handler)
45 p->ainsn.api.handler((u32)p->opcode,
46 (unsigned long)p->addr, regs);
47
48 post_kprobe_handler(p, kcb, regs);
49 }
50
arch_check_kprobe(struct kprobe * p)51 static bool __kprobes arch_check_kprobe(struct kprobe *p)
52 {
53 unsigned long tmp = (unsigned long)p->addr - p->offset;
54 unsigned long addr = (unsigned long)p->addr;
55
56 while (tmp <= addr) {
57 if (tmp == addr)
58 return true;
59
60 tmp += GET_INSN_LENGTH(*(u16 *)tmp);
61 }
62
63 return false;
64 }
65
arch_prepare_kprobe(struct kprobe * p)66 int __kprobes arch_prepare_kprobe(struct kprobe *p)
67 {
68 u16 *insn = (u16 *)p->addr;
69
70 if ((unsigned long)insn & 0x1)
71 return -EILSEQ;
72
73 if (!arch_check_kprobe(p))
74 return -EILSEQ;
75
76 /* copy instruction */
77 p->opcode = (kprobe_opcode_t)(*insn++);
78 if (GET_INSN_LENGTH(p->opcode) == 4)
79 p->opcode |= (kprobe_opcode_t)(*insn) << 16;
80
81 /* decode instruction */
82 switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
83 case INSN_REJECTED: /* insn not supported */
84 return -EINVAL;
85
86 case INSN_GOOD_NO_SLOT: /* insn need simulation */
87 p->ainsn.api.insn = NULL;
88 break;
89
90 case INSN_GOOD: /* instruction uses slot */
91 p->ainsn.api.insn = get_insn_slot();
92 if (!p->ainsn.api.insn)
93 return -ENOMEM;
94 break;
95 }
96
97 /* prepare the instruction */
98 if (p->ainsn.api.insn)
99 arch_prepare_ss_slot(p);
100 else
101 arch_prepare_simulate(p);
102
103 return 0;
104 }
105
106 #ifdef CONFIG_MMU
alloc_insn_page(void)107 void *alloc_insn_page(void)
108 {
109 return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
110 GFP_KERNEL, PAGE_KERNEL_READ_EXEC,
111 VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
112 __builtin_return_address(0));
113 }
114 #endif
115
116 /* install breakpoint in text */
arch_arm_kprobe(struct kprobe * p)117 void __kprobes arch_arm_kprobe(struct kprobe *p)
118 {
119 size_t len = GET_INSN_LENGTH(p->opcode);
120 u32 insn = len == 4 ? __BUG_INSN_32 : __BUG_INSN_16;
121
122 patch_text(p->addr, &insn, len);
123 }
124
125 /* remove breakpoint from text */
arch_disarm_kprobe(struct kprobe * p)126 void __kprobes arch_disarm_kprobe(struct kprobe *p)
127 {
128 size_t len = GET_INSN_LENGTH(p->opcode);
129
130 patch_text(p->addr, &p->opcode, len);
131 }
132
arch_remove_kprobe(struct kprobe * p)133 void __kprobes arch_remove_kprobe(struct kprobe *p)
134 {
135 }
136
save_previous_kprobe(struct kprobe_ctlblk * kcb)137 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
138 {
139 kcb->prev_kprobe.kp = kprobe_running();
140 kcb->prev_kprobe.status = kcb->kprobe_status;
141 }
142
restore_previous_kprobe(struct kprobe_ctlblk * kcb)143 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
144 {
145 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
146 kcb->kprobe_status = kcb->prev_kprobe.status;
147 }
148
set_current_kprobe(struct kprobe * p)149 static void __kprobes set_current_kprobe(struct kprobe *p)
150 {
151 __this_cpu_write(current_kprobe, p);
152 }
153
154 /*
155 * Interrupts need to be disabled before single-step mode is set, and not
156 * reenabled until after single-step mode ends.
157 * Without disabling interrupt on local CPU, there is a chance of
158 * interrupt occurrence in the period of exception return and start of
159 * out-of-line single-step, that result in wrongly single stepping
160 * into the interrupt handler.
161 */
kprobes_save_local_irqflag(struct kprobe_ctlblk * kcb,struct pt_regs * regs)162 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
163 struct pt_regs *regs)
164 {
165 kcb->saved_status = regs->status;
166 regs->status &= ~SR_SPIE;
167 }
168
kprobes_restore_local_irqflag(struct kprobe_ctlblk * kcb,struct pt_regs * regs)169 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
170 struct pt_regs *regs)
171 {
172 regs->status = kcb->saved_status;
173 }
174
setup_singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb,int reenter)175 static void __kprobes setup_singlestep(struct kprobe *p,
176 struct pt_regs *regs,
177 struct kprobe_ctlblk *kcb, int reenter)
178 {
179 unsigned long slot;
180
181 if (reenter) {
182 save_previous_kprobe(kcb);
183 set_current_kprobe(p);
184 kcb->kprobe_status = KPROBE_REENTER;
185 } else {
186 kcb->kprobe_status = KPROBE_HIT_SS;
187 }
188
189 if (p->ainsn.api.insn) {
190 /* prepare for single stepping */
191 slot = (unsigned long)p->ainsn.api.insn;
192
193 /* IRQs and single stepping do not mix well. */
194 kprobes_save_local_irqflag(kcb, regs);
195
196 instruction_pointer_set(regs, slot);
197 } else {
198 /* insn simulation */
199 arch_simulate_insn(p, regs);
200 }
201 }
202
reenter_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)203 static int __kprobes reenter_kprobe(struct kprobe *p,
204 struct pt_regs *regs,
205 struct kprobe_ctlblk *kcb)
206 {
207 switch (kcb->kprobe_status) {
208 case KPROBE_HIT_SSDONE:
209 case KPROBE_HIT_ACTIVE:
210 kprobes_inc_nmissed_count(p);
211 setup_singlestep(p, regs, kcb, 1);
212 break;
213 case KPROBE_HIT_SS:
214 case KPROBE_REENTER:
215 pr_warn("Failed to recover from reentered kprobes.\n");
216 dump_kprobe(p);
217 BUG();
218 break;
219 default:
220 WARN_ON(1);
221 return 0;
222 }
223
224 return 1;
225 }
226
227 static void __kprobes
post_kprobe_handler(struct kprobe * cur,struct kprobe_ctlblk * kcb,struct pt_regs * regs)228 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
229 {
230 /* return addr restore if non-branching insn */
231 if (cur->ainsn.api.restore != 0)
232 regs->epc = cur->ainsn.api.restore;
233
234 /* restore back original saved kprobe variables and continue */
235 if (kcb->kprobe_status == KPROBE_REENTER) {
236 restore_previous_kprobe(kcb);
237 return;
238 }
239
240 /* call post handler */
241 kcb->kprobe_status = KPROBE_HIT_SSDONE;
242 if (cur->post_handler) {
243 /* post_handler can hit breakpoint and single step
244 * again, so we enable D-flag for recursive exception.
245 */
246 cur->post_handler(cur, regs, 0);
247 }
248
249 reset_current_kprobe();
250 }
251
kprobe_fault_handler(struct pt_regs * regs,unsigned int trapnr)252 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
253 {
254 struct kprobe *cur = kprobe_running();
255 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
256
257 switch (kcb->kprobe_status) {
258 case KPROBE_HIT_SS:
259 case KPROBE_REENTER:
260 /*
261 * We are here because the instruction being single
262 * stepped caused a page fault. We reset the current
263 * kprobe and the ip points back to the probe address
264 * and allow the page fault handler to continue as a
265 * normal page fault.
266 */
267 regs->epc = (unsigned long) cur->addr;
268 BUG_ON(!instruction_pointer(regs));
269
270 if (kcb->kprobe_status == KPROBE_REENTER)
271 restore_previous_kprobe(kcb);
272 else {
273 kprobes_restore_local_irqflag(kcb, regs);
274 reset_current_kprobe();
275 }
276
277 break;
278 case KPROBE_HIT_ACTIVE:
279 case KPROBE_HIT_SSDONE:
280 /*
281 * In case the user-specified fault handler returned
282 * zero, try to fix up.
283 */
284 if (fixup_exception(regs))
285 return 1;
286 }
287 return 0;
288 }
289
290 bool __kprobes
kprobe_breakpoint_handler(struct pt_regs * regs)291 kprobe_breakpoint_handler(struct pt_regs *regs)
292 {
293 struct kprobe *p, *cur_kprobe;
294 struct kprobe_ctlblk *kcb;
295 unsigned long addr = instruction_pointer(regs);
296
297 kcb = get_kprobe_ctlblk();
298 cur_kprobe = kprobe_running();
299
300 p = get_kprobe((kprobe_opcode_t *) addr);
301
302 if (p) {
303 if (cur_kprobe) {
304 if (reenter_kprobe(p, regs, kcb))
305 return true;
306 } else {
307 /* Probe hit */
308 set_current_kprobe(p);
309 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
310
311 /*
312 * If we have no pre-handler or it returned 0, we
313 * continue with normal processing. If we have a
314 * pre-handler and it returned non-zero, it will
315 * modify the execution path and no need to single
316 * stepping. Let's just reset current kprobe and exit.
317 *
318 * pre_handler can hit a breakpoint and can step thru
319 * before return.
320 */
321 if (!p->pre_handler || !p->pre_handler(p, regs))
322 setup_singlestep(p, regs, kcb, 0);
323 else
324 reset_current_kprobe();
325 }
326 return true;
327 }
328
329 /*
330 * The breakpoint instruction was removed right
331 * after we hit it. Another cpu has removed
332 * either a probepoint or a debugger breakpoint
333 * at this address. In either case, no further
334 * handling of this interrupt is appropriate.
335 * Return back to original instruction, and continue.
336 */
337 return false;
338 }
339
340 bool __kprobes
kprobe_single_step_handler(struct pt_regs * regs)341 kprobe_single_step_handler(struct pt_regs *regs)
342 {
343 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
344 unsigned long addr = instruction_pointer(regs);
345 struct kprobe *cur = kprobe_running();
346
347 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
348 ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
349 kprobes_restore_local_irqflag(kcb, regs);
350 post_kprobe_handler(cur, kcb, regs);
351 return true;
352 }
353 /* not ours, kprobes should ignore it */
354 return false;
355 }
356
357 /*
358 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
359 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
360 */
arch_populate_kprobe_blacklist(void)361 int __init arch_populate_kprobe_blacklist(void)
362 {
363 int ret;
364
365 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
366 (unsigned long)__irqentry_text_end);
367 return ret;
368 }
369
arch_trampoline_kprobe(struct kprobe * p)370 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
371 {
372 return 0;
373 }
374
arch_init_kprobes(void)375 int __init arch_init_kprobes(void)
376 {
377 return 0;
378 }
379