1 /* 2 * Dynamic function tracing support. 3 * 4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> 5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in> 6 * 7 * For licencing details, see COPYING. 8 * 9 * Defines low-level handling of mcount calls when the kernel 10 * is compiled with the -pg flag. When using dynamic ftrace, the 11 * mcount call-sites get patched with NOP till they are enabled. 12 * All code mutation routines here are called under stop_machine(). 13 */ 14 15 #include <linux/ftrace.h> 16 #include <linux/uaccess.h> 17 #include <linux/module.h> 18 #include <linux/stop_machine.h> 19 20 #include <asm/cacheflush.h> 21 #include <asm/opcodes.h> 22 #include <asm/ftrace.h> 23 #include <asm/insn.h> 24 #include <asm/set_memory.h> 25 #include <asm/patch.h> 26 27 #ifdef CONFIG_THUMB2_KERNEL 28 #define NOP 0xf85deb04 /* pop.w {lr} */ 29 #else 30 #define NOP 0xe8bd4000 /* pop {lr} */ 31 #endif 32 33 #ifdef CONFIG_DYNAMIC_FTRACE 34 35 static int __ftrace_modify_code(void *data) 36 { 37 int *command = data; 38 39 ftrace_modify_all_code(*command); 40 41 return 0; 42 } 43 44 void arch_ftrace_update_code(int command) 45 { 46 stop_machine(__ftrace_modify_code, &command, NULL); 47 } 48 49 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 50 { 51 return NOP; 52 } 53 54 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) 55 { 56 return addr; 57 } 58 59 int ftrace_arch_code_modify_prepare(void) 60 { 61 return 0; 62 } 63 64 int ftrace_arch_code_modify_post_process(void) 65 { 66 /* Make sure any TLB misses during machine stop are cleared. */ 67 flush_tlb_all(); 68 return 0; 69 } 70 71 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 72 { 73 return arm_gen_branch_link(pc, addr); 74 } 75 76 static int ftrace_modify_code(unsigned long pc, unsigned long old, 77 unsigned long new, bool validate) 78 { 79 unsigned long replaced; 80 81 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) 82 old = __opcode_to_mem_thumb32(old); 83 else 84 old = __opcode_to_mem_arm(old); 85 86 if (validate) { 87 if (copy_from_kernel_nofault(&replaced, (void *)pc, 88 MCOUNT_INSN_SIZE)) 89 return -EFAULT; 90 91 if (replaced != old) 92 return -EINVAL; 93 } 94 95 __patch_text((void *)pc, new); 96 97 return 0; 98 } 99 100 int ftrace_update_ftrace_func(ftrace_func_t func) 101 { 102 unsigned long pc; 103 unsigned long new; 104 int ret; 105 106 pc = (unsigned long)&ftrace_call; 107 new = ftrace_call_replace(pc, (unsigned long)func); 108 109 ret = ftrace_modify_code(pc, 0, new, false); 110 111 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 112 if (!ret) { 113 pc = (unsigned long)&ftrace_regs_call; 114 new = ftrace_call_replace(pc, (unsigned long)func); 115 116 ret = ftrace_modify_code(pc, 0, new, false); 117 } 118 #endif 119 120 return ret; 121 } 122 123 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 124 { 125 unsigned long new, old; 126 unsigned long ip = rec->ip; 127 128 old = ftrace_nop_replace(rec); 129 130 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 131 132 return ftrace_modify_code(rec->ip, old, new, true); 133 } 134 135 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 136 137 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 138 unsigned long addr) 139 { 140 unsigned long new, old; 141 unsigned long ip = rec->ip; 142 143 old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); 144 145 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 146 147 return ftrace_modify_code(rec->ip, old, new, true); 148 } 149 150 #endif 151 152 int ftrace_make_nop(struct module *mod, 153 struct dyn_ftrace *rec, unsigned long addr) 154 { 155 unsigned long ip = rec->ip; 156 unsigned long old; 157 unsigned long new; 158 int ret; 159 160 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 161 new = ftrace_nop_replace(rec); 162 ret = ftrace_modify_code(ip, old, new, true); 163 164 return ret; 165 } 166 167 int __init ftrace_dyn_arch_init(void) 168 { 169 return 0; 170 } 171 #endif /* CONFIG_DYNAMIC_FTRACE */ 172 173 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 174 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 175 unsigned long frame_pointer) 176 { 177 unsigned long return_hooker = (unsigned long) &return_to_handler; 178 unsigned long old; 179 180 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 181 return; 182 183 old = *parent; 184 *parent = return_hooker; 185 186 if (function_graph_enter(old, self_addr, frame_pointer, NULL)) 187 *parent = old; 188 } 189 190 #ifdef CONFIG_DYNAMIC_FTRACE 191 extern unsigned long ftrace_graph_call; 192 extern unsigned long ftrace_graph_call_old; 193 extern void ftrace_graph_caller_old(void); 194 extern unsigned long ftrace_graph_regs_call; 195 extern void ftrace_graph_regs_caller(void); 196 197 static int __ftrace_modify_caller(unsigned long *callsite, 198 void (*func) (void), bool enable) 199 { 200 unsigned long caller_fn = (unsigned long) func; 201 unsigned long pc = (unsigned long) callsite; 202 unsigned long branch = arm_gen_branch(pc, caller_fn); 203 unsigned long nop = 0xe1a00000; /* mov r0, r0 */ 204 unsigned long old = enable ? nop : branch; 205 unsigned long new = enable ? branch : nop; 206 207 return ftrace_modify_code(pc, old, new, true); 208 } 209 210 static int ftrace_modify_graph_caller(bool enable) 211 { 212 int ret; 213 214 ret = __ftrace_modify_caller(&ftrace_graph_call, 215 ftrace_graph_caller, 216 enable); 217 218 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 219 if (!ret) 220 ret = __ftrace_modify_caller(&ftrace_graph_regs_call, 221 ftrace_graph_regs_caller, 222 enable); 223 #endif 224 225 226 return ret; 227 } 228 229 int ftrace_enable_ftrace_graph_caller(void) 230 { 231 return ftrace_modify_graph_caller(true); 232 } 233 234 int ftrace_disable_ftrace_graph_caller(void) 235 { 236 return ftrace_modify_graph_caller(false); 237 } 238 #endif /* CONFIG_DYNAMIC_FTRACE */ 239 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 240