1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Kernel Probes (KProbes)
4 *
5 * Copyright (C) IBM Corporation, 2002, 2004
6 *
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8 * Probes initial implementation ( includes contributions from
9 * Rusty Russell).
10 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11 * interface to access function arguments.
12 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
13 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
14 * 2005-Mar Roland McGrath <roland@redhat.com>
15 * Fixed to handle %rip-relative addressing mode correctly.
16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18 * <prasanna@in.ibm.com> added function-return probes.
19 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
20 * Added function return probes functionality
21 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
22 * kprobe-booster and kretprobe-booster for i386.
23 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
24 * and kretprobe-booster for x86-64
25 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
26 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
27 * unified x86 kprobes code.
28 */
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/string.h>
32 #include <linux/slab.h>
33 #include <linux/hardirq.h>
34 #include <linux/preempt.h>
35 #include <linux/sched/debug.h>
36 #include <linux/perf_event.h>
37 #include <linux/extable.h>
38 #include <linux/kdebug.h>
39 #include <linux/kallsyms.h>
40 #include <linux/kgdb.h>
41 #include <linux/ftrace.h>
42 #include <linux/kasan.h>
43 #include <linux/moduleloader.h>
44 #include <linux/objtool.h>
45 #include <linux/vmalloc.h>
46 #include <linux/pgtable.h>
47 #include <linux/set_memory.h>
48 #include <linux/cfi.h>
49
50 #include <asm/text-patching.h>
51 #include <asm/cacheflush.h>
52 #include <asm/desc.h>
53 #include <linux/uaccess.h>
54 #include <asm/alternative.h>
55 #include <asm/insn.h>
56 #include <asm/debugreg.h>
57 #include <asm/ibt.h>
58
59 #include "common.h"
60
61 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
62 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
63
64 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
65 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
66 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
67 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
68 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
69 << (row % 32))
70 /*
71 * Undefined/reserved opcodes, conditional jump, Opcode Extension
72 * Groups, and some special opcodes can not boost.
73 * This is non-const and volatile to keep gcc from statically
74 * optimizing it out, as variable_test_bit makes gcc think only
75 * *(unsigned long*) is used.
76 */
77 static volatile u32 twobyte_is_boostable[256 / 32] = {
78 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
79 /* ---------------------------------------------- */
80 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
81 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
82 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
83 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
84 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
85 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
86 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
87 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
88 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
89 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
90 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
91 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
92 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
93 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
94 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
95 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
96 /* ----------------------------------------------- */
97 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
98 };
99 #undef W
100
101 struct kretprobe_blackpoint kretprobe_blacklist[] = {
102 {"__switch_to", }, /* This function switches only current task, but
103 doesn't switch kernel stack.*/
104 {NULL, NULL} /* Terminator */
105 };
106
107 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
108
109 static nokprobe_inline void
__synthesize_relative_insn(void * dest,void * from,void * to,u8 op)110 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
111 {
112 struct __arch_relative_insn {
113 u8 op;
114 s32 raddr;
115 } __packed *insn;
116
117 insn = (struct __arch_relative_insn *)dest;
118 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
119 insn->op = op;
120 }
121
122 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
synthesize_reljump(void * dest,void * from,void * to)123 void synthesize_reljump(void *dest, void *from, void *to)
124 {
125 __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
126 }
127 NOKPROBE_SYMBOL(synthesize_reljump);
128
129 /* Insert a call instruction at address 'from', which calls address 'to'.*/
synthesize_relcall(void * dest,void * from,void * to)130 void synthesize_relcall(void *dest, void *from, void *to)
131 {
132 __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
133 }
134 NOKPROBE_SYMBOL(synthesize_relcall);
135
136 /*
137 * Returns non-zero if INSN is boostable.
138 * RIP relative instructions are adjusted at copying time in 64 bits mode
139 */
can_boost(struct insn * insn,void * addr)140 int can_boost(struct insn *insn, void *addr)
141 {
142 kprobe_opcode_t opcode;
143 insn_byte_t prefix;
144 int i;
145
146 if (search_exception_tables((unsigned long)addr))
147 return 0; /* Page fault may occur on this address. */
148
149 /* 2nd-byte opcode */
150 if (insn->opcode.nbytes == 2)
151 return test_bit(insn->opcode.bytes[1],
152 (unsigned long *)twobyte_is_boostable);
153
154 if (insn->opcode.nbytes != 1)
155 return 0;
156
157 for_each_insn_prefix(insn, i, prefix) {
158 insn_attr_t attr;
159
160 attr = inat_get_opcode_attribute(prefix);
161 /* Can't boost Address-size override prefix and CS override prefix */
162 if (prefix == 0x2e || inat_is_address_size_prefix(attr))
163 return 0;
164 }
165
166 opcode = insn->opcode.bytes[0];
167
168 switch (opcode) {
169 case 0x62: /* bound */
170 case 0x70 ... 0x7f: /* Conditional jumps */
171 case 0x9a: /* Call far */
172 case 0xc0 ... 0xc1: /* Grp2 */
173 case 0xcc ... 0xce: /* software exceptions */
174 case 0xd0 ... 0xd3: /* Grp2 */
175 case 0xd6: /* (UD) */
176 case 0xd8 ... 0xdf: /* ESC */
177 case 0xe0 ... 0xe3: /* LOOP*, JCXZ */
178 case 0xe8 ... 0xe9: /* near Call, JMP */
179 case 0xeb: /* Short JMP */
180 case 0xf0 ... 0xf4: /* LOCK/REP, HLT */
181 case 0xf6 ... 0xf7: /* Grp3 */
182 case 0xfe: /* Grp4 */
183 /* ... are not boostable */
184 return 0;
185 case 0xff: /* Grp5 */
186 /* Only indirect jmp is boostable */
187 return X86_MODRM_REG(insn->modrm.bytes[0]) == 4;
188 default:
189 return 1;
190 }
191 }
192
193 static unsigned long
__recover_probed_insn(kprobe_opcode_t * buf,unsigned long addr)194 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
195 {
196 struct kprobe *kp;
197 bool faddr;
198
199 kp = get_kprobe((void *)addr);
200 faddr = ftrace_location(addr) == addr;
201 /*
202 * Use the current code if it is not modified by Kprobe
203 * and it cannot be modified by ftrace.
204 */
205 if (!kp && !faddr)
206 return addr;
207
208 /*
209 * Basically, kp->ainsn.insn has an original instruction.
210 * However, RIP-relative instruction can not do single-stepping
211 * at different place, __copy_instruction() tweaks the displacement of
212 * that instruction. In that case, we can't recover the instruction
213 * from the kp->ainsn.insn.
214 *
215 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
216 * of the first byte of the probed instruction, which is overwritten
217 * by int3. And the instruction at kp->addr is not modified by kprobes
218 * except for the first byte, we can recover the original instruction
219 * from it and kp->opcode.
220 *
221 * In case of Kprobes using ftrace, we do not have a copy of
222 * the original instruction. In fact, the ftrace location might
223 * be modified at anytime and even could be in an inconsistent state.
224 * Fortunately, we know that the original code is the ideal 5-byte
225 * long NOP.
226 */
227 if (copy_from_kernel_nofault(buf, (void *)addr,
228 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
229 return 0UL;
230
231 if (faddr)
232 memcpy(buf, x86_nops[5], 5);
233 else
234 buf[0] = kp->opcode;
235 return (unsigned long)buf;
236 }
237
238 /*
239 * Recover the probed instruction at addr for further analysis.
240 * Caller must lock kprobes by kprobe_mutex, or disable preemption
241 * for preventing to release referencing kprobes.
242 * Returns zero if the instruction can not get recovered (or access failed).
243 */
recover_probed_instruction(kprobe_opcode_t * buf,unsigned long addr)244 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
245 {
246 unsigned long __addr;
247
248 __addr = __recover_optprobed_insn(buf, addr);
249 if (__addr != addr)
250 return __addr;
251
252 return __recover_probed_insn(buf, addr);
253 }
254
255 /* Check if paddr is at an instruction boundary */
can_probe(unsigned long paddr)256 static int can_probe(unsigned long paddr)
257 {
258 unsigned long addr, __addr, offset = 0;
259 struct insn insn;
260 kprobe_opcode_t buf[MAX_INSN_SIZE];
261
262 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
263 return 0;
264
265 /* Decode instructions */
266 addr = paddr - offset;
267 while (addr < paddr) {
268 int ret;
269
270 /*
271 * Check if the instruction has been modified by another
272 * kprobe, in which case we replace the breakpoint by the
273 * original instruction in our buffer.
274 * Also, jump optimization will change the breakpoint to
275 * relative-jump. Since the relative-jump itself is
276 * normally used, we just go through if there is no kprobe.
277 */
278 __addr = recover_probed_instruction(buf, addr);
279 if (!__addr)
280 return 0;
281
282 ret = insn_decode_kernel(&insn, (void *)__addr);
283 if (ret < 0)
284 return 0;
285
286 #ifdef CONFIG_KGDB
287 /*
288 * If there is a dynamically installed kgdb sw breakpoint,
289 * this function should not be probed.
290 */
291 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
292 kgdb_has_hit_break(addr))
293 return 0;
294 #endif
295 addr += insn.length;
296 }
297 if (IS_ENABLED(CONFIG_CFI_CLANG)) {
298 /*
299 * The compiler generates the following instruction sequence
300 * for indirect call checks and cfi.c decodes this;
301 *
302 * movl -<id>, %r10d ; 6 bytes
303 * addl -4(%reg), %r10d ; 4 bytes
304 * je .Ltmp1 ; 2 bytes
305 * ud2 ; <- regs->ip
306 * .Ltmp1:
307 *
308 * Also, these movl and addl are used for showing expected
309 * type. So those must not be touched.
310 */
311 __addr = recover_probed_instruction(buf, addr);
312 if (!__addr)
313 return 0;
314
315 if (insn_decode_kernel(&insn, (void *)__addr) < 0)
316 return 0;
317
318 if (insn.opcode.value == 0xBA)
319 offset = 12;
320 else if (insn.opcode.value == 0x3)
321 offset = 6;
322 else
323 goto out;
324
325 /* This movl/addl is used for decoding CFI. */
326 if (is_cfi_trap(addr + offset))
327 return 0;
328 }
329
330 out:
331 return (addr == paddr);
332 }
333
334 /* If x86 supports IBT (ENDBR) it must be skipped. */
arch_adjust_kprobe_addr(unsigned long addr,unsigned long offset,bool * on_func_entry)335 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
336 bool *on_func_entry)
337 {
338 u32 insn;
339
340 /*
341 * Since 'addr' is not guaranteed to be safe to access, use
342 * copy_from_kernel_nofault() to read the instruction:
343 */
344 if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
345 return NULL;
346
347 if (is_endbr(insn)) {
348 *on_func_entry = !offset || offset == 4;
349 if (*on_func_entry)
350 offset = 4;
351
352 } else {
353 *on_func_entry = !offset;
354 }
355
356 return (kprobe_opcode_t *)(addr + offset);
357 }
358
359 /*
360 * Copy an instruction with recovering modified instruction by kprobes
361 * and adjust the displacement if the instruction uses the %rip-relative
362 * addressing mode. Note that since @real will be the final place of copied
363 * instruction, displacement must be adjust by @real, not @dest.
364 * This returns the length of copied instruction, or 0 if it has an error.
365 */
__copy_instruction(u8 * dest,u8 * src,u8 * real,struct insn * insn)366 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
367 {
368 kprobe_opcode_t buf[MAX_INSN_SIZE];
369 unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src);
370 int ret;
371
372 if (!recovered_insn || !insn)
373 return 0;
374
375 /* This can access kernel text if given address is not recovered */
376 if (copy_from_kernel_nofault(dest, (void *)recovered_insn,
377 MAX_INSN_SIZE))
378 return 0;
379
380 ret = insn_decode_kernel(insn, dest);
381 if (ret < 0)
382 return 0;
383
384 /* We can not probe force emulate prefixed instruction */
385 if (insn_has_emulate_prefix(insn))
386 return 0;
387
388 /* Another subsystem puts a breakpoint, failed to recover */
389 if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
390 return 0;
391
392 /* We should not singlestep on the exception masking instructions */
393 if (insn_masking_exception(insn))
394 return 0;
395
396 #ifdef CONFIG_X86_64
397 /* Only x86_64 has RIP relative instructions */
398 if (insn_rip_relative(insn)) {
399 s64 newdisp;
400 u8 *disp;
401 /*
402 * The copied instruction uses the %rip-relative addressing
403 * mode. Adjust the displacement for the difference between
404 * the original location of this instruction and the location
405 * of the copy that will actually be run. The tricky bit here
406 * is making sure that the sign extension happens correctly in
407 * this calculation, since we need a signed 32-bit result to
408 * be sign-extended to 64 bits when it's added to the %rip
409 * value and yield the same 64-bit result that the sign-
410 * extension of the original signed 32-bit displacement would
411 * have given.
412 */
413 newdisp = (u8 *) src + (s64) insn->displacement.value
414 - (u8 *) real;
415 if ((s64) (s32) newdisp != newdisp) {
416 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
417 return 0;
418 }
419 disp = (u8 *) dest + insn_offset_displacement(insn);
420 *(s32 *) disp = (s32) newdisp;
421 }
422 #endif
423 return insn->length;
424 }
425
426 /* Prepare reljump or int3 right after instruction */
prepare_singlestep(kprobe_opcode_t * buf,struct kprobe * p,struct insn * insn)427 static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
428 struct insn *insn)
429 {
430 int len = insn->length;
431
432 if (!IS_ENABLED(CONFIG_PREEMPTION) &&
433 !p->post_handler && can_boost(insn, p->addr) &&
434 MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
435 /*
436 * These instructions can be executed directly if it
437 * jumps back to correct address.
438 */
439 synthesize_reljump(buf + len, p->ainsn.insn + len,
440 p->addr + insn->length);
441 len += JMP32_INSN_SIZE;
442 p->ainsn.boostable = 1;
443 } else {
444 /* Otherwise, put an int3 for trapping singlestep */
445 if (MAX_INSN_SIZE - len < INT3_INSN_SIZE)
446 return -ENOSPC;
447
448 buf[len] = INT3_INSN_OPCODE;
449 len += INT3_INSN_SIZE;
450 }
451
452 return len;
453 }
454
455 /* Make page to RO mode when allocate it */
alloc_insn_page(void)456 void *alloc_insn_page(void)
457 {
458 void *page;
459
460 page = module_alloc(PAGE_SIZE);
461 if (!page)
462 return NULL;
463
464 /*
465 * TODO: Once additional kernel code protection mechanisms are set, ensure
466 * that the page was not maliciously altered and it is still zeroed.
467 */
468 set_memory_rox((unsigned long)page, 1);
469
470 return page;
471 }
472
473 /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
474
kprobe_emulate_ifmodifiers(struct kprobe * p,struct pt_regs * regs)475 static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
476 {
477 switch (p->ainsn.opcode) {
478 case 0xfa: /* cli */
479 regs->flags &= ~(X86_EFLAGS_IF);
480 break;
481 case 0xfb: /* sti */
482 regs->flags |= X86_EFLAGS_IF;
483 break;
484 case 0x9c: /* pushf */
485 int3_emulate_push(regs, regs->flags);
486 break;
487 case 0x9d: /* popf */
488 regs->flags = int3_emulate_pop(regs);
489 break;
490 }
491 regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
492 }
493 NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);
494
kprobe_emulate_ret(struct kprobe * p,struct pt_regs * regs)495 static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
496 {
497 int3_emulate_ret(regs);
498 }
499 NOKPROBE_SYMBOL(kprobe_emulate_ret);
500
kprobe_emulate_call(struct kprobe * p,struct pt_regs * regs)501 static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
502 {
503 unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
504
505 func += p->ainsn.rel32;
506 int3_emulate_call(regs, func);
507 }
508 NOKPROBE_SYMBOL(kprobe_emulate_call);
509
kprobe_emulate_jmp(struct kprobe * p,struct pt_regs * regs)510 static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
511 {
512 unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
513
514 ip += p->ainsn.rel32;
515 int3_emulate_jmp(regs, ip);
516 }
517 NOKPROBE_SYMBOL(kprobe_emulate_jmp);
518
kprobe_emulate_jcc(struct kprobe * p,struct pt_regs * regs)519 static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
520 {
521 unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
522
523 int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
524 }
525 NOKPROBE_SYMBOL(kprobe_emulate_jcc);
526
kprobe_emulate_loop(struct kprobe * p,struct pt_regs * regs)527 static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
528 {
529 unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
530 bool match;
531
532 if (p->ainsn.loop.type != 3) { /* LOOP* */
533 if (p->ainsn.loop.asize == 32)
534 match = ((*(u32 *)®s->cx)--) != 0;
535 #ifdef CONFIG_X86_64
536 else if (p->ainsn.loop.asize == 64)
537 match = ((*(u64 *)®s->cx)--) != 0;
538 #endif
539 else
540 match = ((*(u16 *)®s->cx)--) != 0;
541 } else { /* JCXZ */
542 if (p->ainsn.loop.asize == 32)
543 match = *(u32 *)(®s->cx) == 0;
544 #ifdef CONFIG_X86_64
545 else if (p->ainsn.loop.asize == 64)
546 match = *(u64 *)(®s->cx) == 0;
547 #endif
548 else
549 match = *(u16 *)(®s->cx) == 0;
550 }
551
552 if (p->ainsn.loop.type == 0) /* LOOPNE */
553 match = match && !(regs->flags & X86_EFLAGS_ZF);
554 else if (p->ainsn.loop.type == 1) /* LOOPE */
555 match = match && (regs->flags & X86_EFLAGS_ZF);
556
557 if (match)
558 ip += p->ainsn.rel32;
559 int3_emulate_jmp(regs, ip);
560 }
561 NOKPROBE_SYMBOL(kprobe_emulate_loop);
562
563 static const int addrmode_regoffs[] = {
564 offsetof(struct pt_regs, ax),
565 offsetof(struct pt_regs, cx),
566 offsetof(struct pt_regs, dx),
567 offsetof(struct pt_regs, bx),
568 offsetof(struct pt_regs, sp),
569 offsetof(struct pt_regs, bp),
570 offsetof(struct pt_regs, si),
571 offsetof(struct pt_regs, di),
572 #ifdef CONFIG_X86_64
573 offsetof(struct pt_regs, r8),
574 offsetof(struct pt_regs, r9),
575 offsetof(struct pt_regs, r10),
576 offsetof(struct pt_regs, r11),
577 offsetof(struct pt_regs, r12),
578 offsetof(struct pt_regs, r13),
579 offsetof(struct pt_regs, r14),
580 offsetof(struct pt_regs, r15),
581 #endif
582 };
583
kprobe_emulate_call_indirect(struct kprobe * p,struct pt_regs * regs)584 static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
585 {
586 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
587
588 int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
589 int3_emulate_jmp(regs, regs_get_register(regs, offs));
590 }
591 NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
592
kprobe_emulate_jmp_indirect(struct kprobe * p,struct pt_regs * regs)593 static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
594 {
595 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
596
597 int3_emulate_jmp(regs, regs_get_register(regs, offs));
598 }
599 NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);
600
prepare_emulation(struct kprobe * p,struct insn * insn)601 static int prepare_emulation(struct kprobe *p, struct insn *insn)
602 {
603 insn_byte_t opcode = insn->opcode.bytes[0];
604
605 switch (opcode) {
606 case 0xfa: /* cli */
607 case 0xfb: /* sti */
608 case 0x9c: /* pushfl */
609 case 0x9d: /* popf/popfd */
610 /*
611 * IF modifiers must be emulated since it will enable interrupt while
612 * int3 single stepping.
613 */
614 p->ainsn.emulate_op = kprobe_emulate_ifmodifiers;
615 p->ainsn.opcode = opcode;
616 break;
617 case 0xc2: /* ret/lret */
618 case 0xc3:
619 case 0xca:
620 case 0xcb:
621 p->ainsn.emulate_op = kprobe_emulate_ret;
622 break;
623 case 0x9a: /* far call absolute -- segment is not supported */
624 case 0xea: /* far jmp absolute -- segment is not supported */
625 case 0xcc: /* int3 */
626 case 0xcf: /* iret -- in-kernel IRET is not supported */
627 return -EOPNOTSUPP;
628 break;
629 case 0xe8: /* near call relative */
630 p->ainsn.emulate_op = kprobe_emulate_call;
631 if (insn->immediate.nbytes == 2)
632 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
633 else
634 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
635 break;
636 case 0xeb: /* short jump relative */
637 case 0xe9: /* near jump relative */
638 p->ainsn.emulate_op = kprobe_emulate_jmp;
639 if (insn->immediate.nbytes == 1)
640 p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
641 else if (insn->immediate.nbytes == 2)
642 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
643 else
644 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
645 break;
646 case 0x70 ... 0x7f:
647 /* 1 byte conditional jump */
648 p->ainsn.emulate_op = kprobe_emulate_jcc;
649 p->ainsn.jcc.type = opcode & 0xf;
650 p->ainsn.rel32 = insn->immediate.value;
651 break;
652 case 0x0f:
653 opcode = insn->opcode.bytes[1];
654 if ((opcode & 0xf0) == 0x80) {
655 /* 2 bytes Conditional Jump */
656 p->ainsn.emulate_op = kprobe_emulate_jcc;
657 p->ainsn.jcc.type = opcode & 0xf;
658 if (insn->immediate.nbytes == 2)
659 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
660 else
661 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
662 } else if (opcode == 0x01 &&
663 X86_MODRM_REG(insn->modrm.bytes[0]) == 0 &&
664 X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) {
665 /* VM extensions - not supported */
666 return -EOPNOTSUPP;
667 }
668 break;
669 case 0xe0: /* Loop NZ */
670 case 0xe1: /* Loop */
671 case 0xe2: /* Loop */
672 case 0xe3: /* J*CXZ */
673 p->ainsn.emulate_op = kprobe_emulate_loop;
674 p->ainsn.loop.type = opcode & 0x3;
675 p->ainsn.loop.asize = insn->addr_bytes * 8;
676 p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
677 break;
678 case 0xff:
679 /*
680 * Since the 0xff is an extended group opcode, the instruction
681 * is determined by the MOD/RM byte.
682 */
683 opcode = insn->modrm.bytes[0];
684 switch (X86_MODRM_REG(opcode)) {
685 case 0b010: /* FF /2, call near, absolute indirect */
686 p->ainsn.emulate_op = kprobe_emulate_call_indirect;
687 break;
688 case 0b100: /* FF /4, jmp near, absolute indirect */
689 p->ainsn.emulate_op = kprobe_emulate_jmp_indirect;
690 break;
691 case 0b011: /* FF /3, call far, absolute indirect */
692 case 0b101: /* FF /5, jmp far, absolute indirect */
693 return -EOPNOTSUPP;
694 }
695
696 if (!p->ainsn.emulate_op)
697 break;
698
699 if (insn->addr_bytes != sizeof(unsigned long))
700 return -EOPNOTSUPP; /* Don't support different size */
701 if (X86_MODRM_MOD(opcode) != 3)
702 return -EOPNOTSUPP; /* TODO: support memory addressing */
703
704 p->ainsn.indirect.reg = X86_MODRM_RM(opcode);
705 #ifdef CONFIG_X86_64
706 if (X86_REX_B(insn->rex_prefix.value))
707 p->ainsn.indirect.reg += 8;
708 #endif
709 break;
710 default:
711 break;
712 }
713 p->ainsn.size = insn->length;
714
715 return 0;
716 }
717
arch_copy_kprobe(struct kprobe * p)718 static int arch_copy_kprobe(struct kprobe *p)
719 {
720 struct insn insn;
721 kprobe_opcode_t buf[MAX_INSN_SIZE];
722 int ret, len;
723
724 /* Copy an instruction with recovering if other optprobe modifies it.*/
725 len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
726 if (!len)
727 return -EINVAL;
728
729 /* Analyze the opcode and setup emulate functions */
730 ret = prepare_emulation(p, &insn);
731 if (ret < 0)
732 return ret;
733
734 /* Add int3 for single-step or booster jmp */
735 len = prepare_singlestep(buf, p, &insn);
736 if (len < 0)
737 return len;
738
739 /* Also, displacement change doesn't affect the first byte */
740 p->opcode = buf[0];
741
742 p->ainsn.tp_len = len;
743 perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len);
744
745 /* OK, write back the instruction(s) into ROX insn buffer */
746 text_poke(p->ainsn.insn, buf, len);
747
748 return 0;
749 }
750
arch_prepare_kprobe(struct kprobe * p)751 int arch_prepare_kprobe(struct kprobe *p)
752 {
753 int ret;
754
755 if (alternatives_text_reserved(p->addr, p->addr))
756 return -EINVAL;
757
758 if (!can_probe((unsigned long)p->addr))
759 return -EILSEQ;
760
761 memset(&p->ainsn, 0, sizeof(p->ainsn));
762
763 /* insn: must be on special executable page on x86. */
764 p->ainsn.insn = get_insn_slot();
765 if (!p->ainsn.insn)
766 return -ENOMEM;
767
768 ret = arch_copy_kprobe(p);
769 if (ret) {
770 free_insn_slot(p->ainsn.insn, 0);
771 p->ainsn.insn = NULL;
772 }
773
774 return ret;
775 }
776
arch_arm_kprobe(struct kprobe * p)777 void arch_arm_kprobe(struct kprobe *p)
778 {
779 u8 int3 = INT3_INSN_OPCODE;
780
781 text_poke(p->addr, &int3, 1);
782 text_poke_sync();
783 perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
784 }
785
arch_disarm_kprobe(struct kprobe * p)786 void arch_disarm_kprobe(struct kprobe *p)
787 {
788 u8 int3 = INT3_INSN_OPCODE;
789
790 perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
791 text_poke(p->addr, &p->opcode, 1);
792 text_poke_sync();
793 }
794
arch_remove_kprobe(struct kprobe * p)795 void arch_remove_kprobe(struct kprobe *p)
796 {
797 if (p->ainsn.insn) {
798 /* Record the perf event before freeing the slot */
799 perf_event_text_poke(p->ainsn.insn, p->ainsn.insn,
800 p->ainsn.tp_len, NULL, 0);
801 free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
802 p->ainsn.insn = NULL;
803 }
804 }
805
806 static nokprobe_inline void
save_previous_kprobe(struct kprobe_ctlblk * kcb)807 save_previous_kprobe(struct kprobe_ctlblk *kcb)
808 {
809 kcb->prev_kprobe.kp = kprobe_running();
810 kcb->prev_kprobe.status = kcb->kprobe_status;
811 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
812 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
813 }
814
815 static nokprobe_inline void
restore_previous_kprobe(struct kprobe_ctlblk * kcb)816 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
817 {
818 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
819 kcb->kprobe_status = kcb->prev_kprobe.status;
820 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
821 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
822 }
823
824 static nokprobe_inline void
set_current_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)825 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
826 struct kprobe_ctlblk *kcb)
827 {
828 __this_cpu_write(current_kprobe, p);
829 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
830 = (regs->flags & X86_EFLAGS_IF);
831 }
832
kprobe_post_process(struct kprobe * cur,struct pt_regs * regs,struct kprobe_ctlblk * kcb)833 static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
834 struct kprobe_ctlblk *kcb)
835 {
836 /* Restore back the original saved kprobes variables and continue. */
837 if (kcb->kprobe_status == KPROBE_REENTER) {
838 /* This will restore both kcb and current_kprobe */
839 restore_previous_kprobe(kcb);
840 } else {
841 /*
842 * Always update the kcb status because
843 * reset_curent_kprobe() doesn't update kcb.
844 */
845 kcb->kprobe_status = KPROBE_HIT_SSDONE;
846 if (cur->post_handler)
847 cur->post_handler(cur, regs, 0);
848 reset_current_kprobe();
849 }
850 }
851 NOKPROBE_SYMBOL(kprobe_post_process);
852
setup_singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb,int reenter)853 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
854 struct kprobe_ctlblk *kcb, int reenter)
855 {
856 if (setup_detour_execution(p, regs, reenter))
857 return;
858
859 #if !defined(CONFIG_PREEMPTION)
860 if (p->ainsn.boostable) {
861 /* Boost up -- we can execute copied instructions directly */
862 if (!reenter)
863 reset_current_kprobe();
864 /*
865 * Reentering boosted probe doesn't reset current_kprobe,
866 * nor set current_kprobe, because it doesn't use single
867 * stepping.
868 */
869 regs->ip = (unsigned long)p->ainsn.insn;
870 return;
871 }
872 #endif
873 if (reenter) {
874 save_previous_kprobe(kcb);
875 set_current_kprobe(p, regs, kcb);
876 kcb->kprobe_status = KPROBE_REENTER;
877 } else
878 kcb->kprobe_status = KPROBE_HIT_SS;
879
880 if (p->ainsn.emulate_op) {
881 p->ainsn.emulate_op(p, regs);
882 kprobe_post_process(p, regs, kcb);
883 return;
884 }
885
886 /* Disable interrupt, and set ip register on trampoline */
887 regs->flags &= ~X86_EFLAGS_IF;
888 regs->ip = (unsigned long)p->ainsn.insn;
889 }
890 NOKPROBE_SYMBOL(setup_singlestep);
891
892 /*
893 * Called after single-stepping. p->addr is the address of the
894 * instruction whose first byte has been replaced by the "int3"
895 * instruction. To avoid the SMP problems that can occur when we
896 * temporarily put back the original opcode to single-step, we
897 * single-stepped a copy of the instruction. The address of this
898 * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
899 * right after the copied instruction.
900 * Different from the trap single-step, "int3" single-step can not
901 * handle the instruction which changes the ip register, e.g. jmp,
902 * call, conditional jmp, and the instructions which changes the IF
903 * flags because interrupt must be disabled around the single-stepping.
904 * Such instructions are software emulated, but others are single-stepped
905 * using "int3".
906 *
907 * When the 2nd "int3" handled, the regs->ip and regs->flags needs to
908 * be adjusted, so that we can resume execution on correct code.
909 */
resume_singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)910 static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
911 struct kprobe_ctlblk *kcb)
912 {
913 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
914 unsigned long orig_ip = (unsigned long)p->addr;
915
916 /* Restore saved interrupt flag and ip register */
917 regs->flags |= kcb->kprobe_saved_flags;
918 /* Note that regs->ip is executed int3 so must be a step back */
919 regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE;
920 }
921 NOKPROBE_SYMBOL(resume_singlestep);
922
923 /*
924 * We have reentered the kprobe_handler(), since another probe was hit while
925 * within the handler. We save the original kprobes variables and just single
926 * step on the instruction of the new probe without calling any user handlers.
927 */
reenter_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)928 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
929 struct kprobe_ctlblk *kcb)
930 {
931 switch (kcb->kprobe_status) {
932 case KPROBE_HIT_SSDONE:
933 case KPROBE_HIT_ACTIVE:
934 case KPROBE_HIT_SS:
935 kprobes_inc_nmissed_count(p);
936 setup_singlestep(p, regs, kcb, 1);
937 break;
938 case KPROBE_REENTER:
939 /* A probe has been hit in the codepath leading up to, or just
940 * after, single-stepping of a probed instruction. This entire
941 * codepath should strictly reside in .kprobes.text section.
942 * Raise a BUG or we'll continue in an endless reentering loop
943 * and eventually a stack overflow.
944 */
945 pr_err("Unrecoverable kprobe detected.\n");
946 dump_kprobe(p);
947 BUG();
948 default:
949 /* impossible cases */
950 WARN_ON(1);
951 return 0;
952 }
953
954 return 1;
955 }
956 NOKPROBE_SYMBOL(reenter_kprobe);
957
kprobe_is_ss(struct kprobe_ctlblk * kcb)958 static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb)
959 {
960 return (kcb->kprobe_status == KPROBE_HIT_SS ||
961 kcb->kprobe_status == KPROBE_REENTER);
962 }
963
964 /*
965 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
966 * remain disabled throughout this function.
967 */
kprobe_int3_handler(struct pt_regs * regs)968 int kprobe_int3_handler(struct pt_regs *regs)
969 {
970 kprobe_opcode_t *addr;
971 struct kprobe *p;
972 struct kprobe_ctlblk *kcb;
973
974 if (user_mode(regs))
975 return 0;
976
977 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
978 /*
979 * We don't want to be preempted for the entire duration of kprobe
980 * processing. Since int3 and debug trap disables irqs and we clear
981 * IF while singlestepping, it must be no preemptible.
982 */
983
984 kcb = get_kprobe_ctlblk();
985 p = get_kprobe(addr);
986
987 if (p) {
988 if (kprobe_running()) {
989 if (reenter_kprobe(p, regs, kcb))
990 return 1;
991 } else {
992 set_current_kprobe(p, regs, kcb);
993 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
994
995 /*
996 * If we have no pre-handler or it returned 0, we
997 * continue with normal processing. If we have a
998 * pre-handler and it returned non-zero, that means
999 * user handler setup registers to exit to another
1000 * instruction, we must skip the single stepping.
1001 */
1002 if (!p->pre_handler || !p->pre_handler(p, regs))
1003 setup_singlestep(p, regs, kcb, 0);
1004 else
1005 reset_current_kprobe();
1006 return 1;
1007 }
1008 } else if (kprobe_is_ss(kcb)) {
1009 p = kprobe_running();
1010 if ((unsigned long)p->ainsn.insn < regs->ip &&
1011 (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) {
1012 /* Most provably this is the second int3 for singlestep */
1013 resume_singlestep(p, regs, kcb);
1014 kprobe_post_process(p, regs, kcb);
1015 return 1;
1016 }
1017 } /* else: not a kprobe fault; let the kernel handle it */
1018
1019 return 0;
1020 }
1021 NOKPROBE_SYMBOL(kprobe_int3_handler);
1022
kprobe_fault_handler(struct pt_regs * regs,int trapnr)1023 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1024 {
1025 struct kprobe *cur = kprobe_running();
1026 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1027
1028 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
1029 /* This must happen on single-stepping */
1030 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
1031 kcb->kprobe_status != KPROBE_REENTER);
1032 /*
1033 * We are here because the instruction being single
1034 * stepped caused a page fault. We reset the current
1035 * kprobe and the ip points back to the probe address
1036 * and allow the page fault handler to continue as a
1037 * normal page fault.
1038 */
1039 regs->ip = (unsigned long)cur->addr;
1040
1041 /*
1042 * If the IF flag was set before the kprobe hit,
1043 * don't touch it:
1044 */
1045 regs->flags |= kcb->kprobe_old_flags;
1046
1047 if (kcb->kprobe_status == KPROBE_REENTER)
1048 restore_previous_kprobe(kcb);
1049 else
1050 reset_current_kprobe();
1051 }
1052
1053 return 0;
1054 }
1055 NOKPROBE_SYMBOL(kprobe_fault_handler);
1056
arch_populate_kprobe_blacklist(void)1057 int __init arch_populate_kprobe_blacklist(void)
1058 {
1059 return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
1060 (unsigned long)__entry_text_end);
1061 }
1062
arch_init_kprobes(void)1063 int __init arch_init_kprobes(void)
1064 {
1065 return 0;
1066 }
1067
arch_trampoline_kprobe(struct kprobe * p)1068 int arch_trampoline_kprobe(struct kprobe *p)
1069 {
1070 return 0;
1071 }
1072