1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * User-space Probes (UProbes) for x86 4 * 5 * Copyright (C) IBM Corporation, 2008-2011 6 * Authors: 7 * Srikar Dronamraju 8 * Jim Keniston 9 */ 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/ptrace.h> 13 #include <linux/uprobes.h> 14 #include <linux/uaccess.h> 15 16 #include <linux/kdebug.h> 17 #include <asm/processor.h> 18 #include <asm/insn.h> 19 #include <asm/mmu_context.h> 20 21 /* Post-execution fixups. */ 22 23 /* Adjust IP back to vicinity of actual insn */ 24 #define UPROBE_FIX_IP 0x01 25 26 /* Adjust the return address of a call insn */ 27 #define UPROBE_FIX_CALL 0x02 28 29 /* Instruction will modify TF, don't change it */ 30 #define UPROBE_FIX_SETF 0x04 31 32 #define UPROBE_FIX_RIP_SI 0x08 33 #define UPROBE_FIX_RIP_DI 0x10 34 #define UPROBE_FIX_RIP_BX 0x20 35 #define UPROBE_FIX_RIP_MASK \ 36 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX) 37 38 #define UPROBE_TRAP_NR UINT_MAX 39 40 /* Adaptations for mhiramat x86 decoder v14. */ 41 #define OPCODE1(insn) ((insn)->opcode.bytes[0]) 42 #define OPCODE2(insn) ((insn)->opcode.bytes[1]) 43 #define OPCODE3(insn) ((insn)->opcode.bytes[2]) 44 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value) 45 46 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ 47 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ 48 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ 49 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ 50 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ 51 << (row % 32)) 52 53 /* 54 * Good-instruction tables for 32-bit apps. This is non-const and volatile 55 * to keep gcc from statically optimizing it out, as variable_test_bit makes 56 * some versions of gcc to think only *(unsigned long*) is used. 57 * 58 * Opcodes we'll probably never support: 59 * 6c-6f - ins,outs. SEGVs if used in userspace 60 * e4-e7 - in,out imm. SEGVs if used in userspace 61 * ec-ef - in,out acc. SEGVs if used in userspace 62 * cc - int3. SIGTRAP if used in userspace 63 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs 64 * (why we support bound (62) then? it's similar, and similarly unused...) 65 * f1 - int1. SIGTRAP if used in userspace 66 * f4 - hlt. SEGVs if used in userspace 67 * fa - cli. SEGVs if used in userspace 68 * fb - sti. SEGVs if used in userspace 69 * 70 * Opcodes which need some work to be supported: 71 * 07,17,1f - pop es/ss/ds 72 * Normally not used in userspace, but would execute if used. 73 * Can cause GP or stack exception if tries to load wrong segment descriptor. 74 * We hesitate to run them under single step since kernel's handling 75 * of userspace single-stepping (TF flag) is fragile. 76 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e) 77 * on the same grounds that they are never used. 78 * cd - int N. 79 * Used by userspace for "int 80" syscall entry. (Other "int N" 80 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3). 81 * Not supported since kernel's handling of userspace single-stepping 82 * (TF flag) is fragile. 83 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad 84 */ 85 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 86 static volatile u32 good_insns_32[256 / 32] = { 87 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 88 /* ---------------------------------------------- */ 89 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */ 90 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ 91 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ 92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ 93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ 96 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ 97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ 98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ 99 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ 100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ 101 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ 102 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ 103 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ 104 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ 105 /* ---------------------------------------------- */ 106 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 107 }; 108 #else 109 #define good_insns_32 NULL 110 #endif 111 112 /* Good-instruction tables for 64-bit apps. 113 * 114 * Genuinely invalid opcodes: 115 * 06,07 - formerly push/pop es 116 * 0e - formerly push cs 117 * 16,17 - formerly push/pop ss 118 * 1e,1f - formerly push/pop ds 119 * 27,2f,37,3f - formerly daa/das/aaa/aas 120 * 60,61 - formerly pusha/popa 121 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported) 122 * 82 - formerly redundant encoding of Group1 123 * 9a - formerly call seg:ofs 124 * ce - formerly into 125 * d4,d5 - formerly aam/aad 126 * d6 - formerly undocumented salc 127 * ea - formerly jmp seg:ofs 128 * 129 * Opcodes we'll probably never support: 130 * 6c-6f - ins,outs. SEGVs if used in userspace 131 * e4-e7 - in,out imm. SEGVs if used in userspace 132 * ec-ef - in,out acc. SEGVs if used in userspace 133 * cc - int3. SIGTRAP if used in userspace 134 * f1 - int1. SIGTRAP if used in userspace 135 * f4 - hlt. SEGVs if used in userspace 136 * fa - cli. SEGVs if used in userspace 137 * fb - sti. SEGVs if used in userspace 138 * 139 * Opcodes which need some work to be supported: 140 * cd - int N. 141 * Used by userspace for "int 80" syscall entry. (Other "int N" 142 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3). 143 * Not supported since kernel's handling of userspace single-stepping 144 * (TF flag) is fragile. 145 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad 146 */ 147 #if defined(CONFIG_X86_64) 148 static volatile u32 good_insns_64[256 / 32] = { 149 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 150 /* ---------------------------------------------- */ 151 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */ 152 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */ 153 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */ 154 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */ 155 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 156 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 157 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ 158 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ 159 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ 160 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */ 161 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ 162 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ 163 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ 164 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ 165 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */ 166 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ 167 /* ---------------------------------------------- */ 168 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 169 }; 170 #else 171 #define good_insns_64 NULL 172 #endif 173 174 /* Using this for both 64-bit and 32-bit apps. 175 * Opcodes we don't support: 176 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns 177 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group. 178 * Also encodes tons of other system insns if mod=11. 179 * Some are in fact non-system: xend, xtest, rdtscp, maybe more 180 * 0f 05 - syscall 181 * 0f 06 - clts (CPL0 insn) 182 * 0f 07 - sysret 183 * 0f 08 - invd (CPL0 insn) 184 * 0f 09 - wbinvd (CPL0 insn) 185 * 0f 0b - ud2 186 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?) 187 * 0f 34 - sysenter 188 * 0f 35 - sysexit 189 * 0f 37 - getsec 190 * 0f 78 - vmread (Intel VMX. CPL0 insn) 191 * 0f 79 - vmwrite (Intel VMX. CPL0 insn) 192 * Note: with prefixes, these two opcodes are 193 * extrq/insertq/AVX512 convert vector ops. 194 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt], 195 * {rd,wr}{fs,gs}base,{s,l,m}fence. 196 * Why? They are all user-executable. 197 */ 198 static volatile u32 good_2byte_insns[256 / 32] = { 199 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 200 /* ---------------------------------------------- */ 201 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */ 202 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ 203 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ 204 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ 205 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 206 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 207 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ 208 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */ 209 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ 210 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ 211 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ 212 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ 213 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ 214 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ 215 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ 216 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */ 217 /* ---------------------------------------------- */ 218 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 219 }; 220 #undef W 221 222 /* 223 * opcodes we may need to refine support for: 224 * 225 * 0f - 2-byte instructions: For many of these instructions, the validity 226 * depends on the prefix and/or the reg field. On such instructions, we 227 * just consider the opcode combination valid if it corresponds to any 228 * valid instruction. 229 * 230 * 8f - Group 1 - only reg = 0 is OK 231 * c6-c7 - Group 11 - only reg = 0 is OK 232 * d9-df - fpu insns with some illegal encodings 233 * f2, f3 - repnz, repz prefixes. These are also the first byte for 234 * certain floating-point instructions, such as addsd. 235 * 236 * fe - Group 4 - only reg = 0 or 1 is OK 237 * ff - Group 5 - only reg = 0-6 is OK 238 * 239 * others -- Do we need to support these? 240 * 241 * 0f - (floating-point?) prefetch instructions 242 * 07, 17, 1f - pop es, pop ss, pop ds 243 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes -- 244 * but 64 and 65 (fs: and gs:) seem to be used, so we support them 245 * 67 - addr16 prefix 246 * ce - into 247 * f0 - lock prefix 248 */ 249 250 /* 251 * TODO: 252 * - Where necessary, examine the modrm byte and allow only valid instructions 253 * in the different Groups and fpu instructions. 254 */ 255 256 static bool is_prefix_bad(struct insn *insn) 257 { 258 int i; 259 260 for (i = 0; i < insn->prefixes.nbytes; i++) { 261 insn_attr_t attr; 262 263 attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); 264 switch (attr) { 265 case INAT_MAKE_PREFIX(INAT_PFX_ES): 266 case INAT_MAKE_PREFIX(INAT_PFX_CS): 267 case INAT_MAKE_PREFIX(INAT_PFX_DS): 268 case INAT_MAKE_PREFIX(INAT_PFX_SS): 269 case INAT_MAKE_PREFIX(INAT_PFX_LOCK): 270 return true; 271 } 272 } 273 return false; 274 } 275 276 static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64) 277 { 278 u32 volatile *good_insns; 279 280 insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64); 281 /* has the side-effect of processing the entire instruction */ 282 insn_get_length(insn); 283 if (!insn_complete(insn)) 284 return -ENOEXEC; 285 286 if (is_prefix_bad(insn)) 287 return -ENOTSUPP; 288 289 /* We should not singlestep on the exception masking instructions */ 290 if (insn_masking_exception(insn)) 291 return -ENOTSUPP; 292 293 if (x86_64) 294 good_insns = good_insns_64; 295 else 296 good_insns = good_insns_32; 297 298 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns)) 299 return 0; 300 301 if (insn->opcode.nbytes == 2) { 302 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns)) 303 return 0; 304 } 305 306 return -ENOTSUPP; 307 } 308 309 #ifdef CONFIG_X86_64 310 /* 311 * If arch_uprobe->insn doesn't use rip-relative addressing, return 312 * immediately. Otherwise, rewrite the instruction so that it accesses 313 * its memory operand indirectly through a scratch register. Set 314 * defparam->fixups accordingly. (The contents of the scratch register 315 * will be saved before we single-step the modified instruction, 316 * and restored afterward). 317 * 318 * We do this because a rip-relative instruction can access only a 319 * relatively small area (+/- 2 GB from the instruction), and the XOL 320 * area typically lies beyond that area. At least for instructions 321 * that store to memory, we can't execute the original instruction 322 * and "fix things up" later, because the misdirected store could be 323 * disastrous. 324 * 325 * Some useful facts about rip-relative instructions: 326 * 327 * - There's always a modrm byte with bit layout "00 reg 101". 328 * - There's never a SIB byte. 329 * - The displacement is always 4 bytes. 330 * - REX.B=1 bit in REX prefix, which normally extends r/m field, 331 * has no effect on rip-relative mode. It doesn't make modrm byte 332 * with r/m=101 refer to register 1101 = R13. 333 */ 334 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) 335 { 336 u8 *cursor; 337 u8 reg; 338 u8 reg2; 339 340 if (!insn_rip_relative(insn)) 341 return; 342 343 /* 344 * insn_rip_relative() would have decoded rex_prefix, vex_prefix, modrm. 345 * Clear REX.b bit (extension of MODRM.rm field): 346 * we want to encode low numbered reg, not r8+. 347 */ 348 if (insn->rex_prefix.nbytes) { 349 cursor = auprobe->insn + insn_offset_rex_prefix(insn); 350 /* REX byte has 0100wrxb layout, clearing REX.b bit */ 351 *cursor &= 0xfe; 352 } 353 /* 354 * Similar treatment for VEX3/EVEX prefix. 355 * TODO: add XOP treatment when insn decoder supports them 356 */ 357 if (insn->vex_prefix.nbytes >= 3) { 358 /* 359 * vex2: c5 rvvvvLpp (has no b bit) 360 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp 361 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa 362 * Setting VEX3.b (setting because it has inverted meaning). 363 * Setting EVEX.x since (in non-SIB encoding) EVEX.x 364 * is the 4th bit of MODRM.rm, and needs the same treatment. 365 * For VEX3-encoded insns, VEX3.x value has no effect in 366 * non-SIB encoding, the change is superfluous but harmless. 367 */ 368 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; 369 *cursor |= 0x60; 370 } 371 372 /* 373 * Convert from rip-relative addressing to register-relative addressing 374 * via a scratch register. 375 * 376 * This is tricky since there are insns with modrm byte 377 * which also use registers not encoded in modrm byte: 378 * [i]div/[i]mul: implicitly use dx:ax 379 * shift ops: implicitly use cx 380 * cmpxchg: implicitly uses ax 381 * cmpxchg8/16b: implicitly uses dx:ax and bx:cx 382 * Encoding: 0f c7/1 modrm 383 * The code below thinks that reg=1 (cx), chooses si as scratch. 384 * mulx: implicitly uses dx: mulx r/m,r1,r2 does r1:r2 = dx * r/m. 385 * First appeared in Haswell (BMI2 insn). It is vex-encoded. 386 * Example where none of bx,cx,dx can be used as scratch reg: 387 * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx 388 * [v]pcmpistri: implicitly uses cx, xmm0 389 * [v]pcmpistrm: implicitly uses xmm0 390 * [v]pcmpestri: implicitly uses ax, dx, cx, xmm0 391 * [v]pcmpestrm: implicitly uses ax, dx, xmm0 392 * Evil SSE4.2 string comparison ops from hell. 393 * maskmovq/[v]maskmovdqu: implicitly uses (ds:rdi) as destination. 394 * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm. 395 * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi). 396 * AMD says it has no 3-operand form (vex.vvvv must be 1111) 397 * and that it can have only register operands, not mem 398 * (its modrm byte must have mode=11). 399 * If these restrictions will ever be lifted, 400 * we'll need code to prevent selection of di as scratch reg! 401 * 402 * Summary: I don't know any insns with modrm byte which 403 * use SI register implicitly. DI register is used only 404 * by one insn (maskmovq) and BX register is used 405 * only by one too (cmpxchg8b). 406 * BP is stack-segment based (may be a problem?). 407 * AX, DX, CX are off-limits (many implicit users). 408 * SP is unusable (it's stack pointer - think about "pop mem"; 409 * also, rsp+disp32 needs sib encoding -> insn length change). 410 */ 411 412 reg = MODRM_REG(insn); /* Fetch modrm.reg */ 413 reg2 = 0xff; /* Fetch vex.vvvv */ 414 if (insn->vex_prefix.nbytes) 415 reg2 = insn->vex_prefix.bytes[2]; 416 /* 417 * TODO: add XOP vvvv reading. 418 * 419 * vex.vvvv field is in bits 6-3, bits are inverted. 420 * But in 32-bit mode, high-order bit may be ignored. 421 * Therefore, let's consider only 3 low-order bits. 422 */ 423 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7; 424 /* 425 * Register numbering is ax,cx,dx,bx, sp,bp,si,di, r8..r15. 426 * 427 * Choose scratch reg. Order is important: must not select bx 428 * if we can use si (cmpxchg8b case!) 429 */ 430 if (reg != 6 && reg2 != 6) { 431 reg2 = 6; 432 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI; 433 } else if (reg != 7 && reg2 != 7) { 434 reg2 = 7; 435 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI; 436 /* TODO (paranoia): force maskmovq to not use di */ 437 } else { 438 reg2 = 3; 439 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX; 440 } 441 /* 442 * Point cursor at the modrm byte. The next 4 bytes are the 443 * displacement. Beyond the displacement, for some instructions, 444 * is the immediate operand. 445 */ 446 cursor = auprobe->insn + insn_offset_modrm(insn); 447 /* 448 * Change modrm from "00 reg 101" to "10 reg reg2". Example: 449 * 89 05 disp32 mov %eax,disp32(%rip) becomes 450 * 89 86 disp32 mov %eax,disp32(%rsi) 451 */ 452 *cursor = 0x80 | (reg << 3) | reg2; 453 } 454 455 static inline unsigned long * 456 scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) 457 { 458 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI) 459 return ®s->si; 460 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI) 461 return ®s->di; 462 return ®s->bx; 463 } 464 465 /* 466 * If we're emulating a rip-relative instruction, save the contents 467 * of the scratch register and store the target address in that register. 468 */ 469 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 470 { 471 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { 472 struct uprobe_task *utask = current->utask; 473 unsigned long *sr = scratch_reg(auprobe, regs); 474 475 utask->autask.saved_scratch_register = *sr; 476 *sr = utask->vaddr + auprobe->defparam.ilen; 477 } 478 } 479 480 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 481 { 482 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { 483 struct uprobe_task *utask = current->utask; 484 unsigned long *sr = scratch_reg(auprobe, regs); 485 486 *sr = utask->autask.saved_scratch_register; 487 } 488 } 489 #else /* 32-bit: */ 490 /* 491 * No RIP-relative addressing on 32-bit 492 */ 493 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) 494 { 495 } 496 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 497 { 498 } 499 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 500 { 501 } 502 #endif /* CONFIG_X86_64 */ 503 504 struct uprobe_xol_ops { 505 bool (*emulate)(struct arch_uprobe *, struct pt_regs *); 506 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *); 507 int (*post_xol)(struct arch_uprobe *, struct pt_regs *); 508 void (*abort)(struct arch_uprobe *, struct pt_regs *); 509 }; 510 511 static inline int sizeof_long(void) 512 { 513 return in_ia32_syscall() ? 4 : 8; 514 } 515 516 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 517 { 518 riprel_pre_xol(auprobe, regs); 519 return 0; 520 } 521 522 static int emulate_push_stack(struct pt_regs *regs, unsigned long val) 523 { 524 unsigned long new_sp = regs->sp - sizeof_long(); 525 526 if (copy_to_user((void __user *)new_sp, &val, sizeof_long())) 527 return -EFAULT; 528 529 regs->sp = new_sp; 530 return 0; 531 } 532 533 /* 534 * We have to fix things up as follows: 535 * 536 * Typically, the new ip is relative to the copied instruction. We need 537 * to make it relative to the original instruction (FIX_IP). Exceptions 538 * are return instructions and absolute or indirect jump or call instructions. 539 * 540 * If the single-stepped instruction was a call, the return address that 541 * is atop the stack is the address following the copied instruction. We 542 * need to make it the address following the original instruction (FIX_CALL). 543 * 544 * If the original instruction was a rip-relative instruction such as 545 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent 546 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)". 547 * We need to restore the contents of the scratch register 548 * (FIX_RIP_reg). 549 */ 550 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 551 { 552 struct uprobe_task *utask = current->utask; 553 554 riprel_post_xol(auprobe, regs); 555 if (auprobe->defparam.fixups & UPROBE_FIX_IP) { 556 long correction = utask->vaddr - utask->xol_vaddr; 557 regs->ip += correction; 558 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { 559 regs->sp += sizeof_long(); /* Pop incorrect return address */ 560 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) 561 return -ERESTART; 562 } 563 /* popf; tell the caller to not touch TF */ 564 if (auprobe->defparam.fixups & UPROBE_FIX_SETF) 565 utask->autask.saved_tf = true; 566 567 return 0; 568 } 569 570 static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 571 { 572 riprel_post_xol(auprobe, regs); 573 } 574 575 static const struct uprobe_xol_ops default_xol_ops = { 576 .pre_xol = default_pre_xol_op, 577 .post_xol = default_post_xol_op, 578 .abort = default_abort_op, 579 }; 580 581 static bool branch_is_call(struct arch_uprobe *auprobe) 582 { 583 return auprobe->branch.opc1 == 0xe8; 584 } 585 586 #define CASE_COND \ 587 COND(70, 71, XF(OF)) \ 588 COND(72, 73, XF(CF)) \ 589 COND(74, 75, XF(ZF)) \ 590 COND(78, 79, XF(SF)) \ 591 COND(7a, 7b, XF(PF)) \ 592 COND(76, 77, XF(CF) || XF(ZF)) \ 593 COND(7c, 7d, XF(SF) != XF(OF)) \ 594 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF)) 595 596 #define COND(op_y, op_n, expr) \ 597 case 0x ## op_y: DO((expr) != 0) \ 598 case 0x ## op_n: DO((expr) == 0) 599 600 #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf)) 601 602 static bool is_cond_jmp_opcode(u8 opcode) 603 { 604 switch (opcode) { 605 #define DO(expr) \ 606 return true; 607 CASE_COND 608 #undef DO 609 610 default: 611 return false; 612 } 613 } 614 615 static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs) 616 { 617 unsigned long flags = regs->flags; 618 619 switch (auprobe->branch.opc1) { 620 #define DO(expr) \ 621 return expr; 622 CASE_COND 623 #undef DO 624 625 default: /* not a conditional jmp */ 626 return true; 627 } 628 } 629 630 #undef XF 631 #undef COND 632 #undef CASE_COND 633 634 static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 635 { 636 unsigned long new_ip = regs->ip += auprobe->branch.ilen; 637 unsigned long offs = (long)auprobe->branch.offs; 638 639 if (branch_is_call(auprobe)) { 640 /* 641 * If it fails we execute this (mangled, see the comment in 642 * branch_clear_offset) insn out-of-line. In the likely case 643 * this should trigger the trap, and the probed application 644 * should die or restart the same insn after it handles the 645 * signal, arch_uprobe_post_xol() won't be even called. 646 * 647 * But there is corner case, see the comment in ->post_xol(). 648 */ 649 if (emulate_push_stack(regs, new_ip)) 650 return false; 651 } else if (!check_jmp_cond(auprobe, regs)) { 652 offs = 0; 653 } 654 655 regs->ip = new_ip + offs; 656 return true; 657 } 658 659 static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 660 { 661 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset; 662 663 if (emulate_push_stack(regs, *src_ptr)) 664 return false; 665 regs->ip += auprobe->push.ilen; 666 return true; 667 } 668 669 static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 670 { 671 BUG_ON(!branch_is_call(auprobe)); 672 /* 673 * We can only get here if branch_emulate_op() failed to push the ret 674 * address _and_ another thread expanded our stack before the (mangled) 675 * "call" insn was executed out-of-line. Just restore ->sp and restart. 676 * We could also restore ->ip and try to call branch_emulate_op() again. 677 */ 678 regs->sp += sizeof_long(); 679 return -ERESTART; 680 } 681 682 static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn) 683 { 684 /* 685 * Turn this insn into "call 1f; 1:", this is what we will execute 686 * out-of-line if ->emulate() fails. We only need this to generate 687 * a trap, so that the probed task receives the correct signal with 688 * the properly filled siginfo. 689 * 690 * But see the comment in ->post_xol(), in the unlikely case it can 691 * succeed. So we need to ensure that the new ->ip can not fall into 692 * the non-canonical area and trigger #GP. 693 * 694 * We could turn it into (say) "pushf", but then we would need to 695 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte 696 * of ->insn[] for set_orig_insn(). 697 */ 698 memset(auprobe->insn + insn_offset_immediate(insn), 699 0, insn->immediate.nbytes); 700 } 701 702 static const struct uprobe_xol_ops branch_xol_ops = { 703 .emulate = branch_emulate_op, 704 .post_xol = branch_post_xol_op, 705 }; 706 707 static const struct uprobe_xol_ops push_xol_ops = { 708 .emulate = push_emulate_op, 709 }; 710 711 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */ 712 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) 713 { 714 u8 opc1 = OPCODE1(insn); 715 int i; 716 717 switch (opc1) { 718 case 0xeb: /* jmp 8 */ 719 case 0xe9: /* jmp 32 */ 720 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ 721 break; 722 723 case 0xe8: /* call relative */ 724 branch_clear_offset(auprobe, insn); 725 break; 726 727 case 0x0f: 728 if (insn->opcode.nbytes != 2) 729 return -ENOSYS; 730 /* 731 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches 732 * OPCODE1() of the "short" jmp which checks the same condition. 733 */ 734 opc1 = OPCODE2(insn) - 0x10; 735 /* fall through */ 736 default: 737 if (!is_cond_jmp_opcode(opc1)) 738 return -ENOSYS; 739 } 740 741 /* 742 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported. 743 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. 744 * No one uses these insns, reject any branch insns with such prefix. 745 */ 746 for (i = 0; i < insn->prefixes.nbytes; i++) { 747 if (insn->prefixes.bytes[i] == 0x66) 748 return -ENOTSUPP; 749 } 750 751 auprobe->branch.opc1 = opc1; 752 auprobe->branch.ilen = insn->length; 753 auprobe->branch.offs = insn->immediate.value; 754 755 auprobe->ops = &branch_xol_ops; 756 return 0; 757 } 758 759 /* Returns -ENOSYS if push_xol_ops doesn't handle this insn */ 760 static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) 761 { 762 u8 opc1 = OPCODE1(insn), reg_offset = 0; 763 764 if (opc1 < 0x50 || opc1 > 0x57) 765 return -ENOSYS; 766 767 if (insn->length > 2) 768 return -ENOSYS; 769 if (insn->length == 2) { 770 /* only support rex_prefix 0x41 (x64 only) */ 771 #ifdef CONFIG_X86_64 772 if (insn->rex_prefix.nbytes != 1 || 773 insn->rex_prefix.bytes[0] != 0x41) 774 return -ENOSYS; 775 776 switch (opc1) { 777 case 0x50: 778 reg_offset = offsetof(struct pt_regs, r8); 779 break; 780 case 0x51: 781 reg_offset = offsetof(struct pt_regs, r9); 782 break; 783 case 0x52: 784 reg_offset = offsetof(struct pt_regs, r10); 785 break; 786 case 0x53: 787 reg_offset = offsetof(struct pt_regs, r11); 788 break; 789 case 0x54: 790 reg_offset = offsetof(struct pt_regs, r12); 791 break; 792 case 0x55: 793 reg_offset = offsetof(struct pt_regs, r13); 794 break; 795 case 0x56: 796 reg_offset = offsetof(struct pt_regs, r14); 797 break; 798 case 0x57: 799 reg_offset = offsetof(struct pt_regs, r15); 800 break; 801 } 802 #else 803 return -ENOSYS; 804 #endif 805 } else { 806 switch (opc1) { 807 case 0x50: 808 reg_offset = offsetof(struct pt_regs, ax); 809 break; 810 case 0x51: 811 reg_offset = offsetof(struct pt_regs, cx); 812 break; 813 case 0x52: 814 reg_offset = offsetof(struct pt_regs, dx); 815 break; 816 case 0x53: 817 reg_offset = offsetof(struct pt_regs, bx); 818 break; 819 case 0x54: 820 reg_offset = offsetof(struct pt_regs, sp); 821 break; 822 case 0x55: 823 reg_offset = offsetof(struct pt_regs, bp); 824 break; 825 case 0x56: 826 reg_offset = offsetof(struct pt_regs, si); 827 break; 828 case 0x57: 829 reg_offset = offsetof(struct pt_regs, di); 830 break; 831 } 832 } 833 834 auprobe->push.reg_offset = reg_offset; 835 auprobe->push.ilen = insn->length; 836 auprobe->ops = &push_xol_ops; 837 return 0; 838 } 839 840 /** 841 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. 842 * @mm: the probed address space. 843 * @arch_uprobe: the probepoint information. 844 * @addr: virtual address at which to install the probepoint 845 * Return 0 on success or a -ve number on error. 846 */ 847 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) 848 { 849 struct insn insn; 850 u8 fix_ip_or_call = UPROBE_FIX_IP; 851 int ret; 852 853 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm)); 854 if (ret) 855 return ret; 856 857 ret = branch_setup_xol_ops(auprobe, &insn); 858 if (ret != -ENOSYS) 859 return ret; 860 861 ret = push_setup_xol_ops(auprobe, &insn); 862 if (ret != -ENOSYS) 863 return ret; 864 865 /* 866 * Figure out which fixups default_post_xol_op() will need to perform, 867 * and annotate defparam->fixups accordingly. 868 */ 869 switch (OPCODE1(&insn)) { 870 case 0x9d: /* popf */ 871 auprobe->defparam.fixups |= UPROBE_FIX_SETF; 872 break; 873 case 0xc3: /* ret or lret -- ip is correct */ 874 case 0xcb: 875 case 0xc2: 876 case 0xca: 877 case 0xea: /* jmp absolute -- ip is correct */ 878 fix_ip_or_call = 0; 879 break; 880 case 0x9a: /* call absolute - Fix return addr, not ip */ 881 fix_ip_or_call = UPROBE_FIX_CALL; 882 break; 883 case 0xff: 884 switch (MODRM_REG(&insn)) { 885 case 2: case 3: /* call or lcall, indirect */ 886 fix_ip_or_call = UPROBE_FIX_CALL; 887 break; 888 case 4: case 5: /* jmp or ljmp, indirect */ 889 fix_ip_or_call = 0; 890 break; 891 } 892 /* fall through */ 893 default: 894 riprel_analyze(auprobe, &insn); 895 } 896 897 auprobe->defparam.ilen = insn.length; 898 auprobe->defparam.fixups |= fix_ip_or_call; 899 900 auprobe->ops = &default_xol_ops; 901 return 0; 902 } 903 904 /* 905 * arch_uprobe_pre_xol - prepare to execute out of line. 906 * @auprobe: the probepoint information. 907 * @regs: reflects the saved user state of current task. 908 */ 909 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 910 { 911 struct uprobe_task *utask = current->utask; 912 913 if (auprobe->ops->pre_xol) { 914 int err = auprobe->ops->pre_xol(auprobe, regs); 915 if (err) 916 return err; 917 } 918 919 regs->ip = utask->xol_vaddr; 920 utask->autask.saved_trap_nr = current->thread.trap_nr; 921 current->thread.trap_nr = UPROBE_TRAP_NR; 922 923 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF); 924 regs->flags |= X86_EFLAGS_TF; 925 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP)) 926 set_task_blockstep(current, false); 927 928 return 0; 929 } 930 931 /* 932 * If xol insn itself traps and generates a signal(Say, 933 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped 934 * instruction jumps back to its own address. It is assumed that anything 935 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. 936 * 937 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, 938 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to 939 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). 940 */ 941 bool arch_uprobe_xol_was_trapped(struct task_struct *t) 942 { 943 if (t->thread.trap_nr != UPROBE_TRAP_NR) 944 return true; 945 946 return false; 947 } 948 949 /* 950 * Called after single-stepping. To avoid the SMP problems that can 951 * occur when we temporarily put back the original opcode to 952 * single-step, we single-stepped a copy of the instruction. 953 * 954 * This function prepares to resume execution after the single-step. 955 */ 956 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 957 { 958 struct uprobe_task *utask = current->utask; 959 bool send_sigtrap = utask->autask.saved_tf; 960 int err = 0; 961 962 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); 963 current->thread.trap_nr = utask->autask.saved_trap_nr; 964 965 if (auprobe->ops->post_xol) { 966 err = auprobe->ops->post_xol(auprobe, regs); 967 if (err) { 968 /* 969 * Restore ->ip for restart or post mortem analysis. 970 * ->post_xol() must not return -ERESTART unless this 971 * is really possible. 972 */ 973 regs->ip = utask->vaddr; 974 if (err == -ERESTART) 975 err = 0; 976 send_sigtrap = false; 977 } 978 } 979 /* 980 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP 981 * so we can get an extra SIGTRAP if we do not clear TF. We need 982 * to examine the opcode to make it right. 983 */ 984 if (send_sigtrap) 985 send_sig(SIGTRAP, current, 0); 986 987 if (!utask->autask.saved_tf) 988 regs->flags &= ~X86_EFLAGS_TF; 989 990 return err; 991 } 992 993 /* callback routine for handling exceptions. */ 994 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data) 995 { 996 struct die_args *args = data; 997 struct pt_regs *regs = args->regs; 998 int ret = NOTIFY_DONE; 999 1000 /* We are only interested in userspace traps */ 1001 if (regs && !user_mode(regs)) 1002 return NOTIFY_DONE; 1003 1004 switch (val) { 1005 case DIE_INT3: 1006 if (uprobe_pre_sstep_notifier(regs)) 1007 ret = NOTIFY_STOP; 1008 1009 break; 1010 1011 case DIE_DEBUG: 1012 if (uprobe_post_sstep_notifier(regs)) 1013 ret = NOTIFY_STOP; 1014 1015 default: 1016 break; 1017 } 1018 1019 return ret; 1020 } 1021 1022 /* 1023 * This function gets called when XOL instruction either gets trapped or 1024 * the thread has a fatal signal. Reset the instruction pointer to its 1025 * probed address for the potential restart or for post mortem analysis. 1026 */ 1027 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 1028 { 1029 struct uprobe_task *utask = current->utask; 1030 1031 if (auprobe->ops->abort) 1032 auprobe->ops->abort(auprobe, regs); 1033 1034 current->thread.trap_nr = utask->autask.saved_trap_nr; 1035 regs->ip = utask->vaddr; 1036 /* clear TF if it was set by us in arch_uprobe_pre_xol() */ 1037 if (!utask->autask.saved_tf) 1038 regs->flags &= ~X86_EFLAGS_TF; 1039 } 1040 1041 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 1042 { 1043 if (auprobe->ops->emulate) 1044 return auprobe->ops->emulate(auprobe, regs); 1045 return false; 1046 } 1047 1048 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 1049 { 1050 bool ret = __skip_sstep(auprobe, regs); 1051 if (ret && (regs->flags & X86_EFLAGS_TF)) 1052 send_sig(SIGTRAP, current, 0); 1053 return ret; 1054 } 1055 1056 unsigned long 1057 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) 1058 { 1059 int rasize = sizeof_long(), nleft; 1060 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ 1061 1062 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) 1063 return -1; 1064 1065 /* check whether address has been already hijacked */ 1066 if (orig_ret_vaddr == trampoline_vaddr) 1067 return orig_ret_vaddr; 1068 1069 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); 1070 if (likely(!nleft)) 1071 return orig_ret_vaddr; 1072 1073 if (nleft != rasize) { 1074 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n", 1075 current->pid, regs->sp, regs->ip); 1076 1077 force_sig(SIGSEGV); 1078 } 1079 1080 return -1; 1081 } 1082 1083 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 1084 struct pt_regs *regs) 1085 { 1086 if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */ 1087 return regs->sp < ret->stack; 1088 else 1089 return regs->sp <= ret->stack; 1090 } 1091