1 /* 2 * Kernel Probes Jump Optimization (Optprobes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2002, 2004 19 * Copyright (C) Hitachi Ltd., 2012 20 */ 21 #include <linux/kprobes.h> 22 #include <linux/ptrace.h> 23 #include <linux/string.h> 24 #include <linux/slab.h> 25 #include <linux/hardirq.h> 26 #include <linux/preempt.h> 27 #include <linux/module.h> 28 #include <linux/kdebug.h> 29 #include <linux/kallsyms.h> 30 #include <linux/ftrace.h> 31 32 #include <asm/cacheflush.h> 33 #include <asm/desc.h> 34 #include <asm/pgtable.h> 35 #include <asm/uaccess.h> 36 #include <asm/alternative.h> 37 #include <asm/insn.h> 38 #include <asm/debugreg.h> 39 40 #include "common.h" 41 42 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) 43 { 44 struct optimized_kprobe *op; 45 struct kprobe *kp; 46 long offs; 47 int i; 48 49 for (i = 0; i < RELATIVEJUMP_SIZE; i++) { 50 kp = get_kprobe((void *)addr - i); 51 /* This function only handles jump-optimized kprobe */ 52 if (kp && kprobe_optimized(kp)) { 53 op = container_of(kp, struct optimized_kprobe, kp); 54 /* If op->list is not empty, op is under optimizing */ 55 if (list_empty(&op->list)) 56 goto found; 57 } 58 } 59 60 return addr; 61 found: 62 /* 63 * If the kprobe can be optimized, original bytes which can be 64 * overwritten by jump destination address. In this case, original 65 * bytes must be recovered from op->optinsn.copied_insn buffer. 66 */ 67 memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 68 if (addr == (unsigned long)kp->addr) { 69 buf[0] = kp->opcode; 70 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); 71 } else { 72 offs = addr - (unsigned long)kp->addr - 1; 73 memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); 74 } 75 76 return (unsigned long)buf; 77 } 78 79 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ 80 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) 81 { 82 #ifdef CONFIG_X86_64 83 *addr++ = 0x48; 84 *addr++ = 0xbf; 85 #else 86 *addr++ = 0xb8; 87 #endif 88 *(unsigned long *)addr = val; 89 } 90 91 asm ( 92 ".global optprobe_template_entry\n" 93 "optprobe_template_entry:\n" 94 #ifdef CONFIG_X86_64 95 /* We don't bother saving the ss register */ 96 " pushq %rsp\n" 97 " pushfq\n" 98 SAVE_REGS_STRING 99 " movq %rsp, %rsi\n" 100 ".global optprobe_template_val\n" 101 "optprobe_template_val:\n" 102 ASM_NOP5 103 ASM_NOP5 104 ".global optprobe_template_call\n" 105 "optprobe_template_call:\n" 106 ASM_NOP5 107 /* Move flags to rsp */ 108 " movq 144(%rsp), %rdx\n" 109 " movq %rdx, 152(%rsp)\n" 110 RESTORE_REGS_STRING 111 /* Skip flags entry */ 112 " addq $8, %rsp\n" 113 " popfq\n" 114 #else /* CONFIG_X86_32 */ 115 " pushf\n" 116 SAVE_REGS_STRING 117 " movl %esp, %edx\n" 118 ".global optprobe_template_val\n" 119 "optprobe_template_val:\n" 120 ASM_NOP5 121 ".global optprobe_template_call\n" 122 "optprobe_template_call:\n" 123 ASM_NOP5 124 RESTORE_REGS_STRING 125 " addl $4, %esp\n" /* skip cs */ 126 " popf\n" 127 #endif 128 ".global optprobe_template_end\n" 129 "optprobe_template_end:\n"); 130 131 #define TMPL_MOVE_IDX \ 132 ((long)&optprobe_template_val - (long)&optprobe_template_entry) 133 #define TMPL_CALL_IDX \ 134 ((long)&optprobe_template_call - (long)&optprobe_template_entry) 135 #define TMPL_END_IDX \ 136 ((long)&optprobe_template_end - (long)&optprobe_template_entry) 137 138 #define INT3_SIZE sizeof(kprobe_opcode_t) 139 140 /* Optimized kprobe call back function: called from optinsn */ 141 static void 142 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) 143 { 144 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 145 unsigned long flags; 146 147 /* This is possible if op is under delayed unoptimizing */ 148 if (kprobe_disabled(&op->kp)) 149 return; 150 151 local_irq_save(flags); 152 if (kprobe_running()) { 153 kprobes_inc_nmissed_count(&op->kp); 154 } else { 155 /* Save skipped registers */ 156 #ifdef CONFIG_X86_64 157 regs->cs = __KERNEL_CS; 158 #else 159 regs->cs = __KERNEL_CS | get_kernel_rpl(); 160 regs->gs = 0; 161 #endif 162 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; 163 regs->orig_ax = ~0UL; 164 165 __this_cpu_write(current_kprobe, &op->kp); 166 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 167 opt_pre_handler(&op->kp, regs); 168 __this_cpu_write(current_kprobe, NULL); 169 } 170 local_irq_restore(flags); 171 } 172 NOKPROBE_SYMBOL(optimized_callback); 173 174 static int copy_optimized_instructions(u8 *dest, u8 *src) 175 { 176 int len = 0, ret; 177 178 while (len < RELATIVEJUMP_SIZE) { 179 ret = __copy_instruction(dest + len, src + len); 180 if (!ret || !can_boost(dest + len)) 181 return -EINVAL; 182 len += ret; 183 } 184 /* Check whether the address range is reserved */ 185 if (ftrace_text_reserved(src, src + len - 1) || 186 alternatives_text_reserved(src, src + len - 1) || 187 jump_label_text_reserved(src, src + len - 1)) 188 return -EBUSY; 189 190 return len; 191 } 192 193 /* Check whether insn is indirect jump */ 194 static int insn_is_indirect_jump(struct insn *insn) 195 { 196 return ((insn->opcode.bytes[0] == 0xff && 197 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ 198 insn->opcode.bytes[0] == 0xea); /* Segment based jump */ 199 } 200 201 /* Check whether insn jumps into specified address range */ 202 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) 203 { 204 unsigned long target = 0; 205 206 switch (insn->opcode.bytes[0]) { 207 case 0xe0: /* loopne */ 208 case 0xe1: /* loope */ 209 case 0xe2: /* loop */ 210 case 0xe3: /* jcxz */ 211 case 0xe9: /* near relative jump */ 212 case 0xeb: /* short relative jump */ 213 break; 214 case 0x0f: 215 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ 216 break; 217 return 0; 218 default: 219 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ 220 break; 221 return 0; 222 } 223 target = (unsigned long)insn->next_byte + insn->immediate.value; 224 225 return (start <= target && target <= start + len); 226 } 227 228 /* Decode whole function to ensure any instructions don't jump into target */ 229 static int can_optimize(unsigned long paddr) 230 { 231 unsigned long addr, size = 0, offset = 0; 232 struct insn insn; 233 kprobe_opcode_t buf[MAX_INSN_SIZE]; 234 235 /* Lookup symbol including addr */ 236 if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) 237 return 0; 238 239 /* 240 * Do not optimize in the entry code due to the unstable 241 * stack handling. 242 */ 243 if ((paddr >= (unsigned long)__entry_text_start) && 244 (paddr < (unsigned long)__entry_text_end)) 245 return 0; 246 247 /* Check there is enough space for a relative jump. */ 248 if (size - offset < RELATIVEJUMP_SIZE) 249 return 0; 250 251 /* Decode instructions */ 252 addr = paddr - offset; 253 while (addr < paddr - offset + size) { /* Decode until function end */ 254 unsigned long recovered_insn; 255 if (search_exception_tables(addr)) 256 /* 257 * Since some fixup code will jumps into this function, 258 * we can't optimize kprobe in this function. 259 */ 260 return 0; 261 recovered_insn = recover_probed_instruction(buf, addr); 262 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); 263 insn_get_length(&insn); 264 /* Another subsystem puts a breakpoint */ 265 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) 266 return 0; 267 /* Recover address */ 268 insn.kaddr = (void *)addr; 269 insn.next_byte = (void *)(addr + insn.length); 270 /* Check any instructions don't jump into target */ 271 if (insn_is_indirect_jump(&insn) || 272 insn_jump_into_range(&insn, paddr + INT3_SIZE, 273 RELATIVE_ADDR_SIZE)) 274 return 0; 275 addr += insn.length; 276 } 277 278 return 1; 279 } 280 281 /* Check optimized_kprobe can actually be optimized. */ 282 int arch_check_optimized_kprobe(struct optimized_kprobe *op) 283 { 284 int i; 285 struct kprobe *p; 286 287 for (i = 1; i < op->optinsn.size; i++) { 288 p = get_kprobe(op->kp.addr + i); 289 if (p && !kprobe_disabled(p)) 290 return -EEXIST; 291 } 292 293 return 0; 294 } 295 296 /* Check the addr is within the optimized instructions. */ 297 int arch_within_optimized_kprobe(struct optimized_kprobe *op, 298 unsigned long addr) 299 { 300 return ((unsigned long)op->kp.addr <= addr && 301 (unsigned long)op->kp.addr + op->optinsn.size > addr); 302 } 303 304 /* Free optimized instruction slot */ 305 static 306 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) 307 { 308 if (op->optinsn.insn) { 309 free_optinsn_slot(op->optinsn.insn, dirty); 310 op->optinsn.insn = NULL; 311 op->optinsn.size = 0; 312 } 313 } 314 315 void arch_remove_optimized_kprobe(struct optimized_kprobe *op) 316 { 317 __arch_remove_optimized_kprobe(op, 1); 318 } 319 320 /* 321 * Copy replacing target instructions 322 * Target instructions MUST be relocatable (checked inside) 323 * This is called when new aggr(opt)probe is allocated or reused. 324 */ 325 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, 326 struct kprobe *__unused) 327 { 328 u8 *buf; 329 int ret; 330 long rel; 331 332 if (!can_optimize((unsigned long)op->kp.addr)) 333 return -EILSEQ; 334 335 op->optinsn.insn = get_optinsn_slot(); 336 if (!op->optinsn.insn) 337 return -ENOMEM; 338 339 /* 340 * Verify if the address gap is in 2GB range, because this uses 341 * a relative jump. 342 */ 343 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; 344 if (abs(rel) > 0x7fffffff) { 345 __arch_remove_optimized_kprobe(op, 0); 346 return -ERANGE; 347 } 348 349 buf = (u8 *)op->optinsn.insn; 350 351 /* Copy instructions into the out-of-line buffer */ 352 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); 353 if (ret < 0) { 354 __arch_remove_optimized_kprobe(op, 0); 355 return ret; 356 } 357 op->optinsn.size = ret; 358 359 /* Copy arch-dep-instance from template */ 360 memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); 361 362 /* Set probe information */ 363 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); 364 365 /* Set probe function call */ 366 synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); 367 368 /* Set returning jmp instruction at the tail of out-of-line buffer */ 369 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, 370 (u8 *)op->kp.addr + op->optinsn.size); 371 372 flush_icache_range((unsigned long) buf, 373 (unsigned long) buf + TMPL_END_IDX + 374 op->optinsn.size + RELATIVEJUMP_SIZE); 375 return 0; 376 } 377 378 /* 379 * Replace breakpoints (int3) with relative jumps. 380 * Caller must call with locking kprobe_mutex and text_mutex. 381 */ 382 void arch_optimize_kprobes(struct list_head *oplist) 383 { 384 struct optimized_kprobe *op, *tmp; 385 u8 insn_buf[RELATIVEJUMP_SIZE]; 386 387 list_for_each_entry_safe(op, tmp, oplist, list) { 388 s32 rel = (s32)((long)op->optinsn.insn - 389 ((long)op->kp.addr + RELATIVEJUMP_SIZE)); 390 391 WARN_ON(kprobe_disabled(&op->kp)); 392 393 /* Backup instructions which will be replaced by jump address */ 394 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, 395 RELATIVE_ADDR_SIZE); 396 397 insn_buf[0] = RELATIVEJUMP_OPCODE; 398 *(s32 *)(&insn_buf[1]) = rel; 399 400 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, 401 op->optinsn.insn); 402 403 list_del_init(&op->list); 404 } 405 } 406 407 /* Replace a relative jump with a breakpoint (int3). */ 408 void arch_unoptimize_kprobe(struct optimized_kprobe *op) 409 { 410 u8 insn_buf[RELATIVEJUMP_SIZE]; 411 412 /* Set int3 to first byte for kprobes */ 413 insn_buf[0] = BREAKPOINT_INSTRUCTION; 414 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); 415 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, 416 op->optinsn.insn); 417 } 418 419 /* 420 * Recover original instructions and breakpoints from relative jumps. 421 * Caller must call with locking kprobe_mutex. 422 */ 423 extern void arch_unoptimize_kprobes(struct list_head *oplist, 424 struct list_head *done_list) 425 { 426 struct optimized_kprobe *op, *tmp; 427 428 list_for_each_entry_safe(op, tmp, oplist, list) { 429 arch_unoptimize_kprobe(op); 430 list_move(&op->list, done_list); 431 } 432 } 433 434 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) 435 { 436 struct optimized_kprobe *op; 437 438 if (p->flags & KPROBE_FLAG_OPTIMIZED) { 439 /* This kprobe is really able to run optimized path. */ 440 op = container_of(p, struct optimized_kprobe, kp); 441 /* Detour through copied instructions */ 442 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; 443 if (!reenter) 444 reset_current_kprobe(); 445 preempt_enable_no_resched(); 446 return 1; 447 } 448 return 0; 449 } 450 NOKPROBE_SYMBOL(setup_detour_execution); 451