1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * User-space Probes (UProbes) for x86
4 *
5 * Copyright (C) IBM Corporation, 2008-2011
6 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
9 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/uprobes.h>
14 #include <linux/uaccess.h>
15
16 #include <linux/kdebug.h>
17 #include <asm/processor.h>
18 #include <asm/insn.h>
19 #include <asm/mmu_context.h>
20
21 /* Post-execution fixups. */
22
23 /* Adjust IP back to vicinity of actual insn */
24 #define UPROBE_FIX_IP 0x01
25
26 /* Adjust the return address of a call insn */
27 #define UPROBE_FIX_CALL 0x02
28
29 /* Instruction will modify TF, don't change it */
30 #define UPROBE_FIX_SETF 0x04
31
32 #define UPROBE_FIX_RIP_SI 0x08
33 #define UPROBE_FIX_RIP_DI 0x10
34 #define UPROBE_FIX_RIP_BX 0x20
35 #define UPROBE_FIX_RIP_MASK \
36 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
37
38 #define UPROBE_TRAP_NR UINT_MAX
39
40 /* Adaptations for mhiramat x86 decoder v14. */
41 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
42 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
43 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
44 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
45
46 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
47 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
48 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
49 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
50 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
51 << (row % 32))
52
53 /*
54 * Good-instruction tables for 32-bit apps. This is non-const and volatile
55 * to keep gcc from statically optimizing it out, as variable_test_bit makes
56 * some versions of gcc to think only *(unsigned long*) is used.
57 *
58 * Opcodes we'll probably never support:
59 * 6c-6f - ins,outs. SEGVs if used in userspace
60 * e4-e7 - in,out imm. SEGVs if used in userspace
61 * ec-ef - in,out acc. SEGVs if used in userspace
62 * cc - int3. SIGTRAP if used in userspace
63 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
64 * (why we support bound (62) then? it's similar, and similarly unused...)
65 * f1 - int1. SIGTRAP if used in userspace
66 * f4 - hlt. SEGVs if used in userspace
67 * fa - cli. SEGVs if used in userspace
68 * fb - sti. SEGVs if used in userspace
69 *
70 * Opcodes which need some work to be supported:
71 * 07,17,1f - pop es/ss/ds
72 * Normally not used in userspace, but would execute if used.
73 * Can cause GP or stack exception if tries to load wrong segment descriptor.
74 * We hesitate to run them under single step since kernel's handling
75 * of userspace single-stepping (TF flag) is fragile.
76 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
77 * on the same grounds that they are never used.
78 * cd - int N.
79 * Used by userspace for "int 80" syscall entry. (Other "int N"
80 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
81 * Not supported since kernel's handling of userspace single-stepping
82 * (TF flag) is fragile.
83 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
84 */
85 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
86 static volatile u32 good_insns_32[256 / 32] = {
87 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
88 /* ---------------------------------------------- */
89 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
90 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
91 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
96 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
99 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
101 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
102 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
103 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
104 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
105 /* ---------------------------------------------- */
106 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
107 };
108 #else
109 #define good_insns_32 NULL
110 #endif
111
112 /* Good-instruction tables for 64-bit apps.
113 *
114 * Genuinely invalid opcodes:
115 * 06,07 - formerly push/pop es
116 * 0e - formerly push cs
117 * 16,17 - formerly push/pop ss
118 * 1e,1f - formerly push/pop ds
119 * 27,2f,37,3f - formerly daa/das/aaa/aas
120 * 60,61 - formerly pusha/popa
121 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
122 * 82 - formerly redundant encoding of Group1
123 * 9a - formerly call seg:ofs
124 * ce - formerly into
125 * d4,d5 - formerly aam/aad
126 * d6 - formerly undocumented salc
127 * ea - formerly jmp seg:ofs
128 *
129 * Opcodes we'll probably never support:
130 * 6c-6f - ins,outs. SEGVs if used in userspace
131 * e4-e7 - in,out imm. SEGVs if used in userspace
132 * ec-ef - in,out acc. SEGVs if used in userspace
133 * cc - int3. SIGTRAP if used in userspace
134 * f1 - int1. SIGTRAP if used in userspace
135 * f4 - hlt. SEGVs if used in userspace
136 * fa - cli. SEGVs if used in userspace
137 * fb - sti. SEGVs if used in userspace
138 *
139 * Opcodes which need some work to be supported:
140 * cd - int N.
141 * Used by userspace for "int 80" syscall entry. (Other "int N"
142 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
143 * Not supported since kernel's handling of userspace single-stepping
144 * (TF flag) is fragile.
145 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
146 */
147 #if defined(CONFIG_X86_64)
148 static volatile u32 good_insns_64[256 / 32] = {
149 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
150 /* ---------------------------------------------- */
151 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
152 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
153 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
154 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
155 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
156 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
157 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
158 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
159 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
160 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
161 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
162 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
163 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
164 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
165 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
166 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
167 /* ---------------------------------------------- */
168 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
169 };
170 #else
171 #define good_insns_64 NULL
172 #endif
173
174 /* Using this for both 64-bit and 32-bit apps.
175 * Opcodes we don't support:
176 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
177 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
178 * Also encodes tons of other system insns if mod=11.
179 * Some are in fact non-system: xend, xtest, rdtscp, maybe more
180 * 0f 05 - syscall
181 * 0f 06 - clts (CPL0 insn)
182 * 0f 07 - sysret
183 * 0f 08 - invd (CPL0 insn)
184 * 0f 09 - wbinvd (CPL0 insn)
185 * 0f 0b - ud2
186 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
187 * 0f 34 - sysenter
188 * 0f 35 - sysexit
189 * 0f 37 - getsec
190 * 0f 78 - vmread (Intel VMX. CPL0 insn)
191 * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
192 * Note: with prefixes, these two opcodes are
193 * extrq/insertq/AVX512 convert vector ops.
194 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
195 * {rd,wr}{fs,gs}base,{s,l,m}fence.
196 * Why? They are all user-executable.
197 */
198 static volatile u32 good_2byte_insns[256 / 32] = {
199 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
200 /* ---------------------------------------------- */
201 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
202 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
203 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
204 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
205 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
206 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
207 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
208 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
209 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
210 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
211 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
212 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
213 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
214 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
215 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
216 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */
217 /* ---------------------------------------------- */
218 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
219 };
220 #undef W
221
222 /*
223 * opcodes we may need to refine support for:
224 *
225 * 0f - 2-byte instructions: For many of these instructions, the validity
226 * depends on the prefix and/or the reg field. On such instructions, we
227 * just consider the opcode combination valid if it corresponds to any
228 * valid instruction.
229 *
230 * 8f - Group 1 - only reg = 0 is OK
231 * c6-c7 - Group 11 - only reg = 0 is OK
232 * d9-df - fpu insns with some illegal encodings
233 * f2, f3 - repnz, repz prefixes. These are also the first byte for
234 * certain floating-point instructions, such as addsd.
235 *
236 * fe - Group 4 - only reg = 0 or 1 is OK
237 * ff - Group 5 - only reg = 0-6 is OK
238 *
239 * others -- Do we need to support these?
240 *
241 * 0f - (floating-point?) prefetch instructions
242 * 07, 17, 1f - pop es, pop ss, pop ds
243 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
244 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
245 * 67 - addr16 prefix
246 * ce - into
247 * f0 - lock prefix
248 */
249
250 /*
251 * TODO:
252 * - Where necessary, examine the modrm byte and allow only valid instructions
253 * in the different Groups and fpu instructions.
254 */
255
is_prefix_bad(struct insn * insn)256 static bool is_prefix_bad(struct insn *insn)
257 {
258 insn_byte_t p;
259 int i;
260
261 for_each_insn_prefix(insn, i, p) {
262 insn_attr_t attr;
263
264 attr = inat_get_opcode_attribute(p);
265 switch (attr) {
266 case INAT_MAKE_PREFIX(INAT_PFX_ES):
267 case INAT_MAKE_PREFIX(INAT_PFX_CS):
268 case INAT_MAKE_PREFIX(INAT_PFX_DS):
269 case INAT_MAKE_PREFIX(INAT_PFX_SS):
270 case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
271 return true;
272 }
273 }
274 return false;
275 }
276
uprobe_init_insn(struct arch_uprobe * auprobe,struct insn * insn,bool x86_64)277 static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
278 {
279 enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32;
280 u32 volatile *good_insns;
281 int ret;
282
283 ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m);
284 if (ret < 0)
285 return -ENOEXEC;
286
287 if (is_prefix_bad(insn))
288 return -ENOTSUPP;
289
290 /* We should not singlestep on the exception masking instructions */
291 if (insn_masking_exception(insn))
292 return -ENOTSUPP;
293
294 if (x86_64)
295 good_insns = good_insns_64;
296 else
297 good_insns = good_insns_32;
298
299 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
300 return 0;
301
302 if (insn->opcode.nbytes == 2) {
303 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
304 return 0;
305 }
306
307 return -ENOTSUPP;
308 }
309
310 #ifdef CONFIG_X86_64
311 /*
312 * If arch_uprobe->insn doesn't use rip-relative addressing, return
313 * immediately. Otherwise, rewrite the instruction so that it accesses
314 * its memory operand indirectly through a scratch register. Set
315 * defparam->fixups accordingly. (The contents of the scratch register
316 * will be saved before we single-step the modified instruction,
317 * and restored afterward).
318 *
319 * We do this because a rip-relative instruction can access only a
320 * relatively small area (+/- 2 GB from the instruction), and the XOL
321 * area typically lies beyond that area. At least for instructions
322 * that store to memory, we can't execute the original instruction
323 * and "fix things up" later, because the misdirected store could be
324 * disastrous.
325 *
326 * Some useful facts about rip-relative instructions:
327 *
328 * - There's always a modrm byte with bit layout "00 reg 101".
329 * - There's never a SIB byte.
330 * - The displacement is always 4 bytes.
331 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
332 * has no effect on rip-relative mode. It doesn't make modrm byte
333 * with r/m=101 refer to register 1101 = R13.
334 */
riprel_analyze(struct arch_uprobe * auprobe,struct insn * insn)335 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
336 {
337 u8 *cursor;
338 u8 reg;
339 u8 reg2;
340
341 if (!insn_rip_relative(insn))
342 return;
343
344 /*
345 * insn_rip_relative() would have decoded rex_prefix, vex_prefix, modrm.
346 * Clear REX.b bit (extension of MODRM.rm field):
347 * we want to encode low numbered reg, not r8+.
348 */
349 if (insn->rex_prefix.nbytes) {
350 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
351 /* REX byte has 0100wrxb layout, clearing REX.b bit */
352 *cursor &= 0xfe;
353 }
354 /*
355 * Similar treatment for VEX3/EVEX prefix.
356 * TODO: add XOP treatment when insn decoder supports them
357 */
358 if (insn->vex_prefix.nbytes >= 3) {
359 /*
360 * vex2: c5 rvvvvLpp (has no b bit)
361 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp
362 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa
363 * Setting VEX3.b (setting because it has inverted meaning).
364 * Setting EVEX.x since (in non-SIB encoding) EVEX.x
365 * is the 4th bit of MODRM.rm, and needs the same treatment.
366 * For VEX3-encoded insns, VEX3.x value has no effect in
367 * non-SIB encoding, the change is superfluous but harmless.
368 */
369 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
370 *cursor |= 0x60;
371 }
372
373 /*
374 * Convert from rip-relative addressing to register-relative addressing
375 * via a scratch register.
376 *
377 * This is tricky since there are insns with modrm byte
378 * which also use registers not encoded in modrm byte:
379 * [i]div/[i]mul: implicitly use dx:ax
380 * shift ops: implicitly use cx
381 * cmpxchg: implicitly uses ax
382 * cmpxchg8/16b: implicitly uses dx:ax and bx:cx
383 * Encoding: 0f c7/1 modrm
384 * The code below thinks that reg=1 (cx), chooses si as scratch.
385 * mulx: implicitly uses dx: mulx r/m,r1,r2 does r1:r2 = dx * r/m.
386 * First appeared in Haswell (BMI2 insn). It is vex-encoded.
387 * Example where none of bx,cx,dx can be used as scratch reg:
388 * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx
389 * [v]pcmpistri: implicitly uses cx, xmm0
390 * [v]pcmpistrm: implicitly uses xmm0
391 * [v]pcmpestri: implicitly uses ax, dx, cx, xmm0
392 * [v]pcmpestrm: implicitly uses ax, dx, xmm0
393 * Evil SSE4.2 string comparison ops from hell.
394 * maskmovq/[v]maskmovdqu: implicitly uses (ds:rdi) as destination.
395 * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm.
396 * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi).
397 * AMD says it has no 3-operand form (vex.vvvv must be 1111)
398 * and that it can have only register operands, not mem
399 * (its modrm byte must have mode=11).
400 * If these restrictions will ever be lifted,
401 * we'll need code to prevent selection of di as scratch reg!
402 *
403 * Summary: I don't know any insns with modrm byte which
404 * use SI register implicitly. DI register is used only
405 * by one insn (maskmovq) and BX register is used
406 * only by one too (cmpxchg8b).
407 * BP is stack-segment based (may be a problem?).
408 * AX, DX, CX are off-limits (many implicit users).
409 * SP is unusable (it's stack pointer - think about "pop mem";
410 * also, rsp+disp32 needs sib encoding -> insn length change).
411 */
412
413 reg = MODRM_REG(insn); /* Fetch modrm.reg */
414 reg2 = 0xff; /* Fetch vex.vvvv */
415 if (insn->vex_prefix.nbytes)
416 reg2 = insn->vex_prefix.bytes[2];
417 /*
418 * TODO: add XOP vvvv reading.
419 *
420 * vex.vvvv field is in bits 6-3, bits are inverted.
421 * But in 32-bit mode, high-order bit may be ignored.
422 * Therefore, let's consider only 3 low-order bits.
423 */
424 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
425 /*
426 * Register numbering is ax,cx,dx,bx, sp,bp,si,di, r8..r15.
427 *
428 * Choose scratch reg. Order is important: must not select bx
429 * if we can use si (cmpxchg8b case!)
430 */
431 if (reg != 6 && reg2 != 6) {
432 reg2 = 6;
433 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
434 } else if (reg != 7 && reg2 != 7) {
435 reg2 = 7;
436 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
437 /* TODO (paranoia): force maskmovq to not use di */
438 } else {
439 reg2 = 3;
440 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
441 }
442 /*
443 * Point cursor at the modrm byte. The next 4 bytes are the
444 * displacement. Beyond the displacement, for some instructions,
445 * is the immediate operand.
446 */
447 cursor = auprobe->insn + insn_offset_modrm(insn);
448 /*
449 * Change modrm from "00 reg 101" to "10 reg reg2". Example:
450 * 89 05 disp32 mov %eax,disp32(%rip) becomes
451 * 89 86 disp32 mov %eax,disp32(%rsi)
452 */
453 *cursor = 0x80 | (reg << 3) | reg2;
454 }
455
456 static inline unsigned long *
scratch_reg(struct arch_uprobe * auprobe,struct pt_regs * regs)457 scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
458 {
459 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
460 return ®s->si;
461 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
462 return ®s->di;
463 return ®s->bx;
464 }
465
466 /*
467 * If we're emulating a rip-relative instruction, save the contents
468 * of the scratch register and store the target address in that register.
469 */
riprel_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)470 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
471 {
472 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
473 struct uprobe_task *utask = current->utask;
474 unsigned long *sr = scratch_reg(auprobe, regs);
475
476 utask->autask.saved_scratch_register = *sr;
477 *sr = utask->vaddr + auprobe->defparam.ilen;
478 }
479 }
480
riprel_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)481 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
482 {
483 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
484 struct uprobe_task *utask = current->utask;
485 unsigned long *sr = scratch_reg(auprobe, regs);
486
487 *sr = utask->autask.saved_scratch_register;
488 }
489 }
490 #else /* 32-bit: */
491 /*
492 * No RIP-relative addressing on 32-bit
493 */
riprel_analyze(struct arch_uprobe * auprobe,struct insn * insn)494 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
495 {
496 }
riprel_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)497 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
498 {
499 }
riprel_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)500 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
501 {
502 }
503 #endif /* CONFIG_X86_64 */
504
505 struct uprobe_xol_ops {
506 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
507 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
508 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
509 void (*abort)(struct arch_uprobe *, struct pt_regs *);
510 };
511
sizeof_long(struct pt_regs * regs)512 static inline int sizeof_long(struct pt_regs *regs)
513 {
514 /*
515 * Check registers for mode as in_xxx_syscall() does not apply here.
516 */
517 return user_64bit_mode(regs) ? 8 : 4;
518 }
519
default_pre_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)520 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
521 {
522 riprel_pre_xol(auprobe, regs);
523 return 0;
524 }
525
emulate_push_stack(struct pt_regs * regs,unsigned long val)526 static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
527 {
528 unsigned long new_sp = regs->sp - sizeof_long(regs);
529
530 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
531 return -EFAULT;
532
533 regs->sp = new_sp;
534 return 0;
535 }
536
537 /*
538 * We have to fix things up as follows:
539 *
540 * Typically, the new ip is relative to the copied instruction. We need
541 * to make it relative to the original instruction (FIX_IP). Exceptions
542 * are return instructions and absolute or indirect jump or call instructions.
543 *
544 * If the single-stepped instruction was a call, the return address that
545 * is atop the stack is the address following the copied instruction. We
546 * need to make it the address following the original instruction (FIX_CALL).
547 *
548 * If the original instruction was a rip-relative instruction such as
549 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
550 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)".
551 * We need to restore the contents of the scratch register
552 * (FIX_RIP_reg).
553 */
default_post_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)554 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
555 {
556 struct uprobe_task *utask = current->utask;
557
558 riprel_post_xol(auprobe, regs);
559 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
560 long correction = utask->vaddr - utask->xol_vaddr;
561 regs->ip += correction;
562 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
563 regs->sp += sizeof_long(regs); /* Pop incorrect return address */
564 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
565 return -ERESTART;
566 }
567 /* popf; tell the caller to not touch TF */
568 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
569 utask->autask.saved_tf = true;
570
571 return 0;
572 }
573
default_abort_op(struct arch_uprobe * auprobe,struct pt_regs * regs)574 static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
575 {
576 riprel_post_xol(auprobe, regs);
577 }
578
579 static const struct uprobe_xol_ops default_xol_ops = {
580 .pre_xol = default_pre_xol_op,
581 .post_xol = default_post_xol_op,
582 .abort = default_abort_op,
583 };
584
branch_is_call(struct arch_uprobe * auprobe)585 static bool branch_is_call(struct arch_uprobe *auprobe)
586 {
587 return auprobe->branch.opc1 == 0xe8;
588 }
589
590 #define CASE_COND \
591 COND(70, 71, XF(OF)) \
592 COND(72, 73, XF(CF)) \
593 COND(74, 75, XF(ZF)) \
594 COND(78, 79, XF(SF)) \
595 COND(7a, 7b, XF(PF)) \
596 COND(76, 77, XF(CF) || XF(ZF)) \
597 COND(7c, 7d, XF(SF) != XF(OF)) \
598 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
599
600 #define COND(op_y, op_n, expr) \
601 case 0x ## op_y: DO((expr) != 0) \
602 case 0x ## op_n: DO((expr) == 0)
603
604 #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
605
is_cond_jmp_opcode(u8 opcode)606 static bool is_cond_jmp_opcode(u8 opcode)
607 {
608 switch (opcode) {
609 #define DO(expr) \
610 return true;
611 CASE_COND
612 #undef DO
613
614 default:
615 return false;
616 }
617 }
618
check_jmp_cond(struct arch_uprobe * auprobe,struct pt_regs * regs)619 static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
620 {
621 unsigned long flags = regs->flags;
622
623 switch (auprobe->branch.opc1) {
624 #define DO(expr) \
625 return expr;
626 CASE_COND
627 #undef DO
628
629 default: /* not a conditional jmp */
630 return true;
631 }
632 }
633
634 #undef XF
635 #undef COND
636 #undef CASE_COND
637
branch_emulate_op(struct arch_uprobe * auprobe,struct pt_regs * regs)638 static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
639 {
640 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
641 unsigned long offs = (long)auprobe->branch.offs;
642
643 if (branch_is_call(auprobe)) {
644 /*
645 * If it fails we execute this (mangled, see the comment in
646 * branch_clear_offset) insn out-of-line. In the likely case
647 * this should trigger the trap, and the probed application
648 * should die or restart the same insn after it handles the
649 * signal, arch_uprobe_post_xol() won't be even called.
650 *
651 * But there is corner case, see the comment in ->post_xol().
652 */
653 if (emulate_push_stack(regs, new_ip))
654 return false;
655 } else if (!check_jmp_cond(auprobe, regs)) {
656 offs = 0;
657 }
658
659 regs->ip = new_ip + offs;
660 return true;
661 }
662
push_emulate_op(struct arch_uprobe * auprobe,struct pt_regs * regs)663 static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
664 {
665 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
666
667 if (emulate_push_stack(regs, *src_ptr))
668 return false;
669 regs->ip += auprobe->push.ilen;
670 return true;
671 }
672
branch_post_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)673 static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
674 {
675 BUG_ON(!branch_is_call(auprobe));
676 /*
677 * We can only get here if branch_emulate_op() failed to push the ret
678 * address _and_ another thread expanded our stack before the (mangled)
679 * "call" insn was executed out-of-line. Just restore ->sp and restart.
680 * We could also restore ->ip and try to call branch_emulate_op() again.
681 */
682 regs->sp += sizeof_long(regs);
683 return -ERESTART;
684 }
685
branch_clear_offset(struct arch_uprobe * auprobe,struct insn * insn)686 static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
687 {
688 /*
689 * Turn this insn into "call 1f; 1:", this is what we will execute
690 * out-of-line if ->emulate() fails. We only need this to generate
691 * a trap, so that the probed task receives the correct signal with
692 * the properly filled siginfo.
693 *
694 * But see the comment in ->post_xol(), in the unlikely case it can
695 * succeed. So we need to ensure that the new ->ip can not fall into
696 * the non-canonical area and trigger #GP.
697 *
698 * We could turn it into (say) "pushf", but then we would need to
699 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
700 * of ->insn[] for set_orig_insn().
701 */
702 memset(auprobe->insn + insn_offset_immediate(insn),
703 0, insn->immediate.nbytes);
704 }
705
706 static const struct uprobe_xol_ops branch_xol_ops = {
707 .emulate = branch_emulate_op,
708 .post_xol = branch_post_xol_op,
709 };
710
711 static const struct uprobe_xol_ops push_xol_ops = {
712 .emulate = push_emulate_op,
713 };
714
715 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
branch_setup_xol_ops(struct arch_uprobe * auprobe,struct insn * insn)716 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
717 {
718 u8 opc1 = OPCODE1(insn);
719 insn_byte_t p;
720 int i;
721
722 switch (opc1) {
723 case 0xeb: /* jmp 8 */
724 case 0xe9: /* jmp 32 */
725 break;
726 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
727 goto setup;
728
729 case 0xe8: /* call relative */
730 branch_clear_offset(auprobe, insn);
731 break;
732
733 case 0x0f:
734 if (insn->opcode.nbytes != 2)
735 return -ENOSYS;
736 /*
737 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches
738 * OPCODE1() of the "short" jmp which checks the same condition.
739 */
740 opc1 = OPCODE2(insn) - 0x10;
741 fallthrough;
742 default:
743 if (!is_cond_jmp_opcode(opc1))
744 return -ENOSYS;
745 }
746
747 /*
748 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported.
749 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
750 * No one uses these insns, reject any branch insns with such prefix.
751 */
752 for_each_insn_prefix(insn, i, p) {
753 if (p == 0x66)
754 return -ENOTSUPP;
755 }
756
757 setup:
758 auprobe->branch.opc1 = opc1;
759 auprobe->branch.ilen = insn->length;
760 auprobe->branch.offs = insn->immediate.value;
761
762 auprobe->ops = &branch_xol_ops;
763 return 0;
764 }
765
766 /* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
push_setup_xol_ops(struct arch_uprobe * auprobe,struct insn * insn)767 static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
768 {
769 u8 opc1 = OPCODE1(insn), reg_offset = 0;
770
771 if (opc1 < 0x50 || opc1 > 0x57)
772 return -ENOSYS;
773
774 if (insn->length > 2)
775 return -ENOSYS;
776 if (insn->length == 2) {
777 /* only support rex_prefix 0x41 (x64 only) */
778 #ifdef CONFIG_X86_64
779 if (insn->rex_prefix.nbytes != 1 ||
780 insn->rex_prefix.bytes[0] != 0x41)
781 return -ENOSYS;
782
783 switch (opc1) {
784 case 0x50:
785 reg_offset = offsetof(struct pt_regs, r8);
786 break;
787 case 0x51:
788 reg_offset = offsetof(struct pt_regs, r9);
789 break;
790 case 0x52:
791 reg_offset = offsetof(struct pt_regs, r10);
792 break;
793 case 0x53:
794 reg_offset = offsetof(struct pt_regs, r11);
795 break;
796 case 0x54:
797 reg_offset = offsetof(struct pt_regs, r12);
798 break;
799 case 0x55:
800 reg_offset = offsetof(struct pt_regs, r13);
801 break;
802 case 0x56:
803 reg_offset = offsetof(struct pt_regs, r14);
804 break;
805 case 0x57:
806 reg_offset = offsetof(struct pt_regs, r15);
807 break;
808 }
809 #else
810 return -ENOSYS;
811 #endif
812 } else {
813 switch (opc1) {
814 case 0x50:
815 reg_offset = offsetof(struct pt_regs, ax);
816 break;
817 case 0x51:
818 reg_offset = offsetof(struct pt_regs, cx);
819 break;
820 case 0x52:
821 reg_offset = offsetof(struct pt_regs, dx);
822 break;
823 case 0x53:
824 reg_offset = offsetof(struct pt_regs, bx);
825 break;
826 case 0x54:
827 reg_offset = offsetof(struct pt_regs, sp);
828 break;
829 case 0x55:
830 reg_offset = offsetof(struct pt_regs, bp);
831 break;
832 case 0x56:
833 reg_offset = offsetof(struct pt_regs, si);
834 break;
835 case 0x57:
836 reg_offset = offsetof(struct pt_regs, di);
837 break;
838 }
839 }
840
841 auprobe->push.reg_offset = reg_offset;
842 auprobe->push.ilen = insn->length;
843 auprobe->ops = &push_xol_ops;
844 return 0;
845 }
846
847 /**
848 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
849 * @auprobe: the probepoint information.
850 * @mm: the probed address space.
851 * @addr: virtual address at which to install the probepoint
852 * Return 0 on success or a -ve number on error.
853 */
arch_uprobe_analyze_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long addr)854 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
855 {
856 struct insn insn;
857 u8 fix_ip_or_call = UPROBE_FIX_IP;
858 int ret;
859
860 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
861 if (ret)
862 return ret;
863
864 ret = branch_setup_xol_ops(auprobe, &insn);
865 if (ret != -ENOSYS)
866 return ret;
867
868 ret = push_setup_xol_ops(auprobe, &insn);
869 if (ret != -ENOSYS)
870 return ret;
871
872 /*
873 * Figure out which fixups default_post_xol_op() will need to perform,
874 * and annotate defparam->fixups accordingly.
875 */
876 switch (OPCODE1(&insn)) {
877 case 0x9d: /* popf */
878 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
879 break;
880 case 0xc3: /* ret or lret -- ip is correct */
881 case 0xcb:
882 case 0xc2:
883 case 0xca:
884 case 0xea: /* jmp absolute -- ip is correct */
885 fix_ip_or_call = 0;
886 break;
887 case 0x9a: /* call absolute - Fix return addr, not ip */
888 fix_ip_or_call = UPROBE_FIX_CALL;
889 break;
890 case 0xff:
891 switch (MODRM_REG(&insn)) {
892 case 2: case 3: /* call or lcall, indirect */
893 fix_ip_or_call = UPROBE_FIX_CALL;
894 break;
895 case 4: case 5: /* jmp or ljmp, indirect */
896 fix_ip_or_call = 0;
897 break;
898 }
899 fallthrough;
900 default:
901 riprel_analyze(auprobe, &insn);
902 }
903
904 auprobe->defparam.ilen = insn.length;
905 auprobe->defparam.fixups |= fix_ip_or_call;
906
907 auprobe->ops = &default_xol_ops;
908 return 0;
909 }
910
911 /*
912 * arch_uprobe_pre_xol - prepare to execute out of line.
913 * @auprobe: the probepoint information.
914 * @regs: reflects the saved user state of current task.
915 */
arch_uprobe_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)916 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
917 {
918 struct uprobe_task *utask = current->utask;
919
920 if (auprobe->ops->pre_xol) {
921 int err = auprobe->ops->pre_xol(auprobe, regs);
922 if (err)
923 return err;
924 }
925
926 regs->ip = utask->xol_vaddr;
927 utask->autask.saved_trap_nr = current->thread.trap_nr;
928 current->thread.trap_nr = UPROBE_TRAP_NR;
929
930 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
931 regs->flags |= X86_EFLAGS_TF;
932 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
933 set_task_blockstep(current, false);
934
935 return 0;
936 }
937
938 /*
939 * If xol insn itself traps and generates a signal(Say,
940 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
941 * instruction jumps back to its own address. It is assumed that anything
942 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
943 *
944 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
945 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
946 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
947 */
arch_uprobe_xol_was_trapped(struct task_struct * t)948 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
949 {
950 if (t->thread.trap_nr != UPROBE_TRAP_NR)
951 return true;
952
953 return false;
954 }
955
956 /*
957 * Called after single-stepping. To avoid the SMP problems that can
958 * occur when we temporarily put back the original opcode to
959 * single-step, we single-stepped a copy of the instruction.
960 *
961 * This function prepares to resume execution after the single-step.
962 */
arch_uprobe_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)963 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
964 {
965 struct uprobe_task *utask = current->utask;
966 bool send_sigtrap = utask->autask.saved_tf;
967 int err = 0;
968
969 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
970 current->thread.trap_nr = utask->autask.saved_trap_nr;
971
972 if (auprobe->ops->post_xol) {
973 err = auprobe->ops->post_xol(auprobe, regs);
974 if (err) {
975 /*
976 * Restore ->ip for restart or post mortem analysis.
977 * ->post_xol() must not return -ERESTART unless this
978 * is really possible.
979 */
980 regs->ip = utask->vaddr;
981 if (err == -ERESTART)
982 err = 0;
983 send_sigtrap = false;
984 }
985 }
986 /*
987 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
988 * so we can get an extra SIGTRAP if we do not clear TF. We need
989 * to examine the opcode to make it right.
990 */
991 if (send_sigtrap)
992 send_sig(SIGTRAP, current, 0);
993
994 if (!utask->autask.saved_tf)
995 regs->flags &= ~X86_EFLAGS_TF;
996
997 return err;
998 }
999
1000 /* callback routine for handling exceptions. */
arch_uprobe_exception_notify(struct notifier_block * self,unsigned long val,void * data)1001 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1002 {
1003 struct die_args *args = data;
1004 struct pt_regs *regs = args->regs;
1005 int ret = NOTIFY_DONE;
1006
1007 /* We are only interested in userspace traps */
1008 if (regs && !user_mode(regs))
1009 return NOTIFY_DONE;
1010
1011 switch (val) {
1012 case DIE_INT3:
1013 if (uprobe_pre_sstep_notifier(regs))
1014 ret = NOTIFY_STOP;
1015
1016 break;
1017
1018 case DIE_DEBUG:
1019 if (uprobe_post_sstep_notifier(regs))
1020 ret = NOTIFY_STOP;
1021
1022 break;
1023
1024 default:
1025 break;
1026 }
1027
1028 return ret;
1029 }
1030
1031 /*
1032 * This function gets called when XOL instruction either gets trapped or
1033 * the thread has a fatal signal. Reset the instruction pointer to its
1034 * probed address for the potential restart or for post mortem analysis.
1035 */
arch_uprobe_abort_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1036 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1037 {
1038 struct uprobe_task *utask = current->utask;
1039
1040 if (auprobe->ops->abort)
1041 auprobe->ops->abort(auprobe, regs);
1042
1043 current->thread.trap_nr = utask->autask.saved_trap_nr;
1044 regs->ip = utask->vaddr;
1045 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
1046 if (!utask->autask.saved_tf)
1047 regs->flags &= ~X86_EFLAGS_TF;
1048 }
1049
__skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)1050 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1051 {
1052 if (auprobe->ops->emulate)
1053 return auprobe->ops->emulate(auprobe, regs);
1054 return false;
1055 }
1056
arch_uprobe_skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)1057 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1058 {
1059 bool ret = __skip_sstep(auprobe, regs);
1060 if (ret && (regs->flags & X86_EFLAGS_TF))
1061 send_sig(SIGTRAP, current, 0);
1062 return ret;
1063 }
1064
1065 unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,struct pt_regs * regs)1066 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1067 {
1068 int rasize = sizeof_long(regs), nleft;
1069 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
1070
1071 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
1072 return -1;
1073
1074 /* check whether address has been already hijacked */
1075 if (orig_ret_vaddr == trampoline_vaddr)
1076 return orig_ret_vaddr;
1077
1078 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
1079 if (likely(!nleft)) {
1080 if (shstk_update_last_frame(trampoline_vaddr)) {
1081 force_sig(SIGSEGV);
1082 return -1;
1083 }
1084 return orig_ret_vaddr;
1085 }
1086
1087 if (nleft != rasize) {
1088 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
1089 current->pid, regs->sp, regs->ip);
1090
1091 force_sig(SIGSEGV);
1092 }
1093
1094 return -1;
1095 }
1096
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)1097 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1098 struct pt_regs *regs)
1099 {
1100 if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */
1101 return regs->sp < ret->stack;
1102 else
1103 return regs->sp <= ret->stack;
1104 }
1105