1 /* 2 * Code for replacing ftrace calls with jumps. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * 6 * Thanks goes to Ingo Molnar, for suggesting the idea. 7 * Mathieu Desnoyers, for suggesting postponing the modifications. 8 * Arjan van de Ven, for keeping me straight, and explaining to me 9 * the dangers of modifying code on the run. 10 */ 11 12 #include <linux/spinlock.h> 13 #include <linux/hardirq.h> 14 #include <linux/ftrace.h> 15 #include <linux/percpu.h> 16 #include <linux/init.h> 17 #include <linux/list.h> 18 19 #include <asm/alternative.h> 20 #include <asm/ftrace.h> 21 22 23 /* Long is fine, even if it is only 4 bytes ;-) */ 24 static long *ftrace_nop; 25 26 union ftrace_code_union { 27 char code[MCOUNT_INSN_SIZE]; 28 struct { 29 char e8; 30 int offset; 31 } __attribute__((packed)); 32 }; 33 34 35 static int notrace ftrace_calc_offset(long ip, long addr) 36 { 37 return (int)(addr - ip); 38 } 39 40 notrace unsigned char *ftrace_nop_replace(void) 41 { 42 return (char *)ftrace_nop; 43 } 44 45 notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 46 { 47 static union ftrace_code_union calc; 48 49 calc.e8 = 0xe8; 50 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); 51 52 /* 53 * No locking needed, this must be called via kstop_machine 54 * which in essence is like running on a uniprocessor machine. 55 */ 56 return calc.code; 57 } 58 59 notrace int 60 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 61 unsigned char *new_code) 62 { 63 unsigned replaced; 64 unsigned old = *(unsigned *)old_code; /* 4 bytes */ 65 unsigned new = *(unsigned *)new_code; /* 4 bytes */ 66 unsigned char newch = new_code[4]; 67 int faulted = 0; 68 69 /* 70 * Note: Due to modules and __init, code can 71 * disappear and change, we need to protect against faulting 72 * as well as code changing. 73 * 74 * No real locking needed, this code is run through 75 * kstop_machine. 76 */ 77 asm volatile ( 78 "1: lock\n" 79 " cmpxchg %3, (%2)\n" 80 " jnz 2f\n" 81 " movb %b4, 4(%2)\n" 82 "2:\n" 83 ".section .fixup, \"ax\"\n" 84 "3: movl $1, %0\n" 85 " jmp 2b\n" 86 ".previous\n" 87 _ASM_EXTABLE(1b, 3b) 88 : "=r"(faulted), "=a"(replaced) 89 : "r"(ip), "r"(new), "c"(newch), 90 "0"(faulted), "a"(old) 91 : "memory"); 92 sync_core(); 93 94 if (replaced != old && replaced != new) 95 faulted = 2; 96 97 return faulted; 98 } 99 100 notrace int ftrace_update_ftrace_func(ftrace_func_t func) 101 { 102 unsigned long ip = (unsigned long)(&ftrace_call); 103 unsigned char old[MCOUNT_INSN_SIZE], *new; 104 int ret; 105 106 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); 107 new = ftrace_call_replace(ip, (unsigned long)func); 108 ret = ftrace_modify_code(ip, old, new); 109 110 return ret; 111 } 112 113 notrace int ftrace_mcount_set(unsigned long *data) 114 { 115 unsigned long ip = (long)(&mcount_call); 116 unsigned long *addr = data; 117 unsigned char old[MCOUNT_INSN_SIZE], *new; 118 119 /* 120 * Replace the mcount stub with a pointer to the 121 * ip recorder function. 122 */ 123 memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); 124 new = ftrace_call_replace(ip, *addr); 125 *addr = ftrace_modify_code(ip, old, new); 126 127 return 0; 128 } 129 130 int __init ftrace_dyn_arch_init(void *data) 131 { 132 const unsigned char *const *noptable = find_nop_table(); 133 134 /* This is running in kstop_machine */ 135 136 ftrace_mcount_set(data); 137 138 ftrace_nop = (unsigned long *)noptable[MCOUNT_INSN_SIZE]; 139 140 return 0; 141 } 142