Lines Matching +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * User-space Probes (UProbes) for x86
5 * Copyright (C) IBM Corporation, 2008-2011
21 /* Post-execution fixups. */
41 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
42 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
43 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
44 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
54 * Good-instruction tables for 32-bit apps. This is non-const and volatile
59 * 6c-6f - ins,outs. SEGVs if used in userspace
60 * e4-e7 - in,out imm. SEGVs if used in userspace
61 * ec-ef - in,out acc. SEGVs if used in userspace
62 * cc - int3. SIGTRAP if used in userspace
63 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
65 * f1 - int1. SIGTRAP if used in userspace
66 * f4 - hlt. SEGVs if used in userspace
67 * fa - cli. SEGVs if used in userspace
68 * fb - sti. SEGVs if used in userspace
71 * 07,17,1f - pop es/ss/ds
75 * of userspace single-stepping (TF flag) is fragile.
76 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
78 * cd - int N.
80 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
81 * Not supported since kernel's handling of userspace single-stepping
83 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
87 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
88 /* ---------------------------------------------- */
89 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
90 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
91 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
96 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
99 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
101 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
102 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
103 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
104 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
105 /* ---------------------------------------------- */
106 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
112 /* Good-instruction tables for 64-bit apps.
115 * 06,07 - formerly push/pop es
116 * 0e - formerly push cs
117 * 16,17 - formerly push/pop ss
118 * 1e,1f - formerly push/pop ds
119 * 27,2f,37,3f - formerly daa/das/aaa/aas
120 * 60,61 - formerly pusha/popa
121 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
122 * 82 - formerly redundant encoding of Group1
123 * 9a - formerly call seg:ofs
124 * ce - formerly into
125 * d4,d5 - formerly aam/aad
126 * d6 - formerly undocumented salc
127 * ea - formerly jmp seg:ofs
130 * 6c-6f - ins,outs. SEGVs if used in userspace
131 * e4-e7 - in,out imm. SEGVs if used in userspace
132 * ec-ef - in,out acc. SEGVs if used in userspace
133 * cc - int3. SIGTRAP if used in userspace
134 * f1 - int1. SIGTRAP if used in userspace
135 * f4 - hlt. SEGVs if used in userspace
136 * fa - cli. SEGVs if used in userspace
137 * fb - sti. SEGVs if used in userspace
140 * cd - int N.
142 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
143 * Not supported since kernel's handling of userspace single-stepping
145 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
149 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
150 /* ---------------------------------------------- */
151 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
152 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
153 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
154 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
155 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
156 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
157 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
158 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
159 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
160 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
161 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
162 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
163 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
164 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
165 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
166 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
167 /* ---------------------------------------------- */
168 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
174 /* Using this for both 64-bit and 32-bit apps.
176 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
177 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
179 * Some are in fact non-system: xend, xtest, rdtscp, maybe more
180 * 0f 05 - syscall
181 * 0f 06 - clts (CPL0 insn)
182 * 0f 07 - sysret
183 * 0f 08 - invd (CPL0 insn)
184 * 0f 09 - wbinvd (CPL0 insn)
185 * 0f 0b - ud2
186 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
187 * 0f 34 - sysenter
188 * 0f 35 - sysexit
189 * 0f 37 - getsec
190 * 0f 78 - vmread (Intel VMX. CPL0 insn)
191 * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
194 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
196 * Why? They are all user-executable.
199 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
200 /* ---------------------------------------------- */
201 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
202 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
203 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
204 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
205 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
206 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
207 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
208 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
209 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
210 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
211 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
212 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
213 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
214 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
215 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
216 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */
217 /* ---------------------------------------------- */
218 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
225 * 0f - 2-byte instructions: For many of these instructions, the validity
230 * 8f - Group 1 - only reg = 0 is OK
231 * c6-c7 - Group 11 - only reg = 0 is OK
232 * d9-df - fpu insns with some illegal encodings
233 * f2, f3 - repnz, repz prefixes. These are also the first byte for
234 * certain floating-point instructions, such as addsd.
236 * fe - Group 4 - only reg = 0 or 1 is OK
237 * ff - Group 5 - only reg = 0-6 is OK
239 * others -- Do we need to support these?
241 * 0f - (floating-point?) prefetch instructions
242 * 07, 17, 1f - pop es, pop ss, pop ds
243 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
245 * 67 - addr16 prefix
246 * ce - into
247 * f0 - lock prefix
252 * - Where necessary, examine the modrm byte and allow only valid instructions
283 ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m); in uprobe_init_insn()
285 return -ENOEXEC; in uprobe_init_insn()
288 return -ENOTSUPP; in uprobe_init_insn()
292 return -ENOTSUPP; in uprobe_init_insn()
302 if (insn->opcode.nbytes == 2) { in uprobe_init_insn()
307 return -ENOTSUPP; in uprobe_init_insn()
312 * If arch_uprobe->insn doesn't use rip-relative addressing, return
315 * defparam->fixups accordingly. (The contents of the scratch register
316 * will be saved before we single-step the modified instruction,
319 * We do this because a rip-relative instruction can access only a
320 * relatively small area (+/- 2 GB from the instruction), and the XOL
326 * Some useful facts about rip-relative instructions:
328 * - There's always a modrm byte with bit layout "00 reg 101".
329 * - There's never a SIB byte.
330 * - The displacement is always 4 bytes.
331 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
332 * has no effect on rip-relative mode. It doesn't make modrm byte
349 if (insn->rex_prefix.nbytes) { in riprel_analyze()
350 cursor = auprobe->insn + insn_offset_rex_prefix(insn); in riprel_analyze()
358 if (insn->vex_prefix.nbytes >= 3) { in riprel_analyze()
364 * Setting EVEX.x since (in non-SIB encoding) EVEX.x in riprel_analyze()
366 * For VEX3-encoded insns, VEX3.x value has no effect in in riprel_analyze()
367 * non-SIB encoding, the change is superfluous but harmless. in riprel_analyze()
369 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; in riprel_analyze()
374 * Convert from rip-relative addressing to register-relative addressing in riprel_analyze()
383 * Encoding: 0f c7/1 modrm in riprel_analyze()
384 * The code below thinks that reg=1 (cx), chooses si as scratch. in riprel_analyze()
386 * First appeared in Haswell (BMI2 insn). It is vex-encoded. in riprel_analyze()
395 * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm. in riprel_analyze()
396 * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi). in riprel_analyze()
397 * AMD says it has no 3-operand form (vex.vvvv must be 1111) in riprel_analyze()
407 * BP is stack-segment based (may be a problem?). in riprel_analyze()
408 * AX, DX, CX are off-limits (many implicit users). in riprel_analyze()
409 * SP is unusable (it's stack pointer - think about "pop mem"; in riprel_analyze()
410 * also, rsp+disp32 needs sib encoding -> insn length change). in riprel_analyze()
415 if (insn->vex_prefix.nbytes) in riprel_analyze()
416 reg2 = insn->vex_prefix.bytes[2]; in riprel_analyze()
420 * vex.vvvv field is in bits 6-3, bits are inverted. in riprel_analyze()
421 * But in 32-bit mode, high-order bit may be ignored. in riprel_analyze()
422 * Therefore, let's consider only 3 low-order bits. in riprel_analyze()
433 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI; in riprel_analyze()
436 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI; in riprel_analyze()
440 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX; in riprel_analyze()
447 cursor = auprobe->insn + insn_offset_modrm(insn); in riprel_analyze()
459 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI) in scratch_reg()
460 return ®s->si; in scratch_reg()
461 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI) in scratch_reg()
462 return ®s->di; in scratch_reg()
463 return ®s->bx; in scratch_reg()
467 * If we're emulating a rip-relative instruction, save the contents
472 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { in riprel_pre_xol()
473 struct uprobe_task *utask = current->utask; in riprel_pre_xol()
476 utask->autask.saved_scratch_register = *sr; in riprel_pre_xol()
477 *sr = utask->vaddr + auprobe->defparam.ilen; in riprel_pre_xol()
483 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { in riprel_post_xol()
484 struct uprobe_task *utask = current->utask; in riprel_post_xol()
487 *sr = utask->autask.saved_scratch_register; in riprel_post_xol()
490 #else /* 32-bit: */
492 * No RIP-relative addressing on 32-bit
528 unsigned long new_sp = regs->sp - sizeof_long(regs); in emulate_push_stack()
531 return -EFAULT; in emulate_push_stack()
533 regs->sp = new_sp; in emulate_push_stack()
544 * If the single-stepped instruction was a call, the return address that
548 * If the original instruction was a rip-relative instruction such as
550 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)".
556 struct uprobe_task *utask = current->utask; in default_post_xol_op()
559 if (auprobe->defparam.fixups & UPROBE_FIX_IP) { in default_post_xol_op()
560 long correction = utask->vaddr - utask->xol_vaddr; in default_post_xol_op()
561 regs->ip += correction; in default_post_xol_op()
562 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { in default_post_xol_op()
563 regs->sp += sizeof_long(regs); /* Pop incorrect return address */ in default_post_xol_op()
564 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) in default_post_xol_op()
565 return -ERESTART; in default_post_xol_op()
568 if (auprobe->defparam.fixups & UPROBE_FIX_SETF) in default_post_xol_op()
569 utask->autask.saved_tf = true; in default_post_xol_op()
587 return auprobe->branch.opc1 == 0xe8; in branch_is_call()
621 unsigned long flags = regs->flags; in check_jmp_cond()
623 switch (auprobe->branch.opc1) { in check_jmp_cond()
640 unsigned long new_ip = regs->ip += auprobe->branch.ilen; in branch_emulate_op()
641 unsigned long offs = (long)auprobe->branch.offs; in branch_emulate_op()
646 * branch_clear_offset) insn out-of-line. In the likely case in branch_emulate_op()
651 * But there is corner case, see the comment in ->post_xol(). in branch_emulate_op()
659 regs->ip = new_ip + offs; in branch_emulate_op()
665 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset; in push_emulate_op()
669 regs->ip += auprobe->push.ilen; in push_emulate_op()
679 * "call" insn was executed out-of-line. Just restore ->sp and restart. in branch_post_xol_op()
680 * We could also restore ->ip and try to call branch_emulate_op() again. in branch_post_xol_op()
682 regs->sp += sizeof_long(regs); in branch_post_xol_op()
683 return -ERESTART; in branch_post_xol_op()
689 * Turn this insn into "call 1f; 1:", this is what we will execute in branch_clear_offset()
690 * out-of-line if ->emulate() fails. We only need this to generate in branch_clear_offset()
694 * But see the comment in ->post_xol(), in the unlikely case it can in branch_clear_offset()
695 * succeed. So we need to ensure that the new ->ip can not fall into in branch_clear_offset()
696 * the non-canonical area and trigger #GP. in branch_clear_offset()
699 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte in branch_clear_offset()
700 * of ->insn[] for set_orig_insn(). in branch_clear_offset()
702 memset(auprobe->insn + insn_offset_immediate(insn), in branch_clear_offset()
703 0, insn->immediate.nbytes); in branch_clear_offset()
715 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
734 if (insn->opcode.nbytes != 2) in branch_setup_xol_ops()
735 return -ENOSYS; in branch_setup_xol_ops()
737 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches in branch_setup_xol_ops()
740 opc1 = OPCODE2(insn) - 0x10; in branch_setup_xol_ops()
744 return -ENOSYS; in branch_setup_xol_ops()
748 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported. in branch_setup_xol_ops()
749 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. in branch_setup_xol_ops()
754 return -ENOTSUPP; in branch_setup_xol_ops()
758 auprobe->branch.opc1 = opc1; in branch_setup_xol_ops()
759 auprobe->branch.ilen = insn->length; in branch_setup_xol_ops()
760 auprobe->branch.offs = insn->immediate.value; in branch_setup_xol_ops()
762 auprobe->ops = &branch_xol_ops; in branch_setup_xol_ops()
766 /* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
772 return -ENOSYS; in push_setup_xol_ops()
774 if (insn->length > 2) in push_setup_xol_ops()
775 return -ENOSYS; in push_setup_xol_ops()
776 if (insn->length == 2) { in push_setup_xol_ops()
779 if (insn->rex_prefix.nbytes != 1 || in push_setup_xol_ops()
780 insn->rex_prefix.bytes[0] != 0x41) in push_setup_xol_ops()
781 return -ENOSYS; in push_setup_xol_ops()
810 return -ENOSYS; in push_setup_xol_ops()
841 auprobe->push.reg_offset = reg_offset; in push_setup_xol_ops()
842 auprobe->push.ilen = insn->length; in push_setup_xol_ops()
843 auprobe->ops = &push_xol_ops; in push_setup_xol_ops()
848 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
852 * Return 0 on success or a -ve number on error.
865 if (ret != -ENOSYS) in arch_uprobe_analyze_insn()
869 if (ret != -ENOSYS) in arch_uprobe_analyze_insn()
874 * and annotate defparam->fixups accordingly. in arch_uprobe_analyze_insn()
878 auprobe->defparam.fixups |= UPROBE_FIX_SETF; in arch_uprobe_analyze_insn()
880 case 0xc3: /* ret or lret -- ip is correct */ in arch_uprobe_analyze_insn()
884 case 0xea: /* jmp absolute -- ip is correct */ in arch_uprobe_analyze_insn()
887 case 0x9a: /* call absolute - Fix return addr, not ip */ in arch_uprobe_analyze_insn()
904 auprobe->defparam.ilen = insn.length; in arch_uprobe_analyze_insn()
905 auprobe->defparam.fixups |= fix_ip_or_call; in arch_uprobe_analyze_insn()
907 auprobe->ops = &default_xol_ops; in arch_uprobe_analyze_insn()
912 * arch_uprobe_pre_xol - prepare to execute out of line.
918 struct uprobe_task *utask = current->utask; in arch_uprobe_pre_xol()
920 if (auprobe->ops->pre_xol) { in arch_uprobe_pre_xol()
921 int err = auprobe->ops->pre_xol(auprobe, regs); in arch_uprobe_pre_xol()
926 regs->ip = utask->xol_vaddr; in arch_uprobe_pre_xol()
927 utask->autask.saved_trap_nr = current->thread.trap_nr; in arch_uprobe_pre_xol()
928 current->thread.trap_nr = UPROBE_TRAP_NR; in arch_uprobe_pre_xol()
930 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF); in arch_uprobe_pre_xol()
931 regs->flags |= X86_EFLAGS_TF; in arch_uprobe_pre_xol()
942 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
945 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
946 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
950 if (t->thread.trap_nr != UPROBE_TRAP_NR) in arch_uprobe_xol_was_trapped()
957 * Called after single-stepping. To avoid the SMP problems that can
959 * single-step, we single-stepped a copy of the instruction.
961 * This function prepares to resume execution after the single-step.
965 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol()
966 bool send_sigtrap = utask->autask.saved_tf; in arch_uprobe_post_xol()
969 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); in arch_uprobe_post_xol()
970 current->thread.trap_nr = utask->autask.saved_trap_nr; in arch_uprobe_post_xol()
972 if (auprobe->ops->post_xol) { in arch_uprobe_post_xol()
973 err = auprobe->ops->post_xol(auprobe, regs); in arch_uprobe_post_xol()
976 * Restore ->ip for restart or post mortem analysis. in arch_uprobe_post_xol()
977 * ->post_xol() must not return -ERESTART unless this in arch_uprobe_post_xol()
980 regs->ip = utask->vaddr; in arch_uprobe_post_xol()
981 if (err == -ERESTART) in arch_uprobe_post_xol()
994 if (!utask->autask.saved_tf) in arch_uprobe_post_xol()
995 regs->flags &= ~X86_EFLAGS_TF; in arch_uprobe_post_xol()
1004 struct pt_regs *regs = args->regs; in arch_uprobe_exception_notify()
1038 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol()
1040 if (auprobe->ops->abort) in arch_uprobe_abort_xol()
1041 auprobe->ops->abort(auprobe, regs); in arch_uprobe_abort_xol()
1043 current->thread.trap_nr = utask->autask.saved_trap_nr; in arch_uprobe_abort_xol()
1044 regs->ip = utask->vaddr; in arch_uprobe_abort_xol()
1046 if (!utask->autask.saved_tf) in arch_uprobe_abort_xol()
1047 regs->flags &= ~X86_EFLAGS_TF; in arch_uprobe_abort_xol()
1052 if (auprobe->ops->emulate) in __skip_sstep()
1053 return auprobe->ops->emulate(auprobe, regs); in __skip_sstep()
1060 if (ret && (regs->flags & X86_EFLAGS_TF)) in arch_uprobe_skip_sstep()
1069 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ in arch_uretprobe_hijack_return_addr()
1071 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) in arch_uretprobe_hijack_return_addr()
1072 return -1; in arch_uretprobe_hijack_return_addr()
1078 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); in arch_uretprobe_hijack_return_addr()
1082 return -1; in arch_uretprobe_hijack_return_addr()
1089 current->pid, regs->sp, regs->ip); in arch_uretprobe_hijack_return_addr()
1094 return -1; in arch_uretprobe_hijack_return_addr()
1101 return regs->sp < ret->stack; in arch_uretprobe_is_alive()
1103 return regs->sp <= ret->stack; in arch_uretprobe_is_alive()