1 /* 2 * Dynamic function tracing support. 3 * 4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> 5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in> 6 * 7 * For licencing details, see COPYING. 8 * 9 * Defines low-level handling of mcount calls when the kernel 10 * is compiled with the -pg flag. When using dynamic ftrace, the 11 * mcount call-sites get patched with NOP till they are enabled. 12 * All code mutation routines here are called under stop_machine(). 13 */ 14 15 #include <linux/ftrace.h> 16 #include <linux/uaccess.h> 17 #include <linux/module.h> 18 #include <linux/stop_machine.h> 19 20 #include <asm/cacheflush.h> 21 #include <asm/opcodes.h> 22 #include <asm/ftrace.h> 23 #include <asm/insn.h> 24 #include <asm/set_memory.h> 25 #include <asm/patch.h> 26 27 #ifdef CONFIG_THUMB2_KERNEL 28 #define NOP 0xf85deb04 /* pop.w {lr} */ 29 #else 30 #define NOP 0xe8bd4000 /* pop {lr} */ 31 #endif 32 33 #ifdef CONFIG_DYNAMIC_FTRACE 34 35 static int __ftrace_modify_code(void *data) 36 { 37 int *command = data; 38 39 ftrace_modify_all_code(*command); 40 41 return 0; 42 } 43 44 void arch_ftrace_update_code(int command) 45 { 46 stop_machine(__ftrace_modify_code, &command, NULL); 47 } 48 49 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 50 { 51 return NOP; 52 } 53 54 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) 55 { 56 return addr; 57 } 58 59 int ftrace_arch_code_modify_prepare(void) 60 { 61 return 0; 62 } 63 64 int ftrace_arch_code_modify_post_process(void) 65 { 66 /* Make sure any TLB misses during machine stop are cleared. */ 67 flush_tlb_all(); 68 return 0; 69 } 70 71 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 72 { 73 return arm_gen_branch_link(pc, addr); 74 } 75 76 static int ftrace_modify_code(unsigned long pc, unsigned long old, 77 unsigned long new, bool validate) 78 { 79 unsigned long replaced; 80 81 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 82 old = __opcode_to_mem_thumb32(old); 83 new = __opcode_to_mem_thumb32(new); 84 } else { 85 old = __opcode_to_mem_arm(old); 86 new = __opcode_to_mem_arm(new); 87 } 88 89 if (validate) { 90 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) 91 return -EFAULT; 92 93 if (replaced != old) 94 return -EINVAL; 95 } 96 97 __patch_text((void *)pc, new); 98 99 return 0; 100 } 101 102 int ftrace_update_ftrace_func(ftrace_func_t func) 103 { 104 unsigned long pc; 105 unsigned long new; 106 int ret; 107 108 pc = (unsigned long)&ftrace_call; 109 new = ftrace_call_replace(pc, (unsigned long)func); 110 111 ret = ftrace_modify_code(pc, 0, new, false); 112 113 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 114 if (!ret) { 115 pc = (unsigned long)&ftrace_regs_call; 116 new = ftrace_call_replace(pc, (unsigned long)func); 117 118 ret = ftrace_modify_code(pc, 0, new, false); 119 } 120 #endif 121 122 return ret; 123 } 124 125 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 126 { 127 unsigned long new, old; 128 unsigned long ip = rec->ip; 129 130 old = ftrace_nop_replace(rec); 131 132 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 133 134 return ftrace_modify_code(rec->ip, old, new, true); 135 } 136 137 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 138 139 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 140 unsigned long addr) 141 { 142 unsigned long new, old; 143 unsigned long ip = rec->ip; 144 145 old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); 146 147 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 148 149 return ftrace_modify_code(rec->ip, old, new, true); 150 } 151 152 #endif 153 154 int ftrace_make_nop(struct module *mod, 155 struct dyn_ftrace *rec, unsigned long addr) 156 { 157 unsigned long ip = rec->ip; 158 unsigned long old; 159 unsigned long new; 160 int ret; 161 162 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 163 new = ftrace_nop_replace(rec); 164 ret = ftrace_modify_code(ip, old, new, true); 165 166 return ret; 167 } 168 169 int __init ftrace_dyn_arch_init(void) 170 { 171 return 0; 172 } 173 #endif /* CONFIG_DYNAMIC_FTRACE */ 174 175 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 176 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 177 unsigned long frame_pointer) 178 { 179 unsigned long return_hooker = (unsigned long) &return_to_handler; 180 unsigned long old; 181 182 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 183 return; 184 185 old = *parent; 186 *parent = return_hooker; 187 188 if (function_graph_enter(old, self_addr, frame_pointer, NULL)) 189 *parent = old; 190 } 191 192 #ifdef CONFIG_DYNAMIC_FTRACE 193 extern unsigned long ftrace_graph_call; 194 extern unsigned long ftrace_graph_call_old; 195 extern void ftrace_graph_caller_old(void); 196 extern unsigned long ftrace_graph_regs_call; 197 extern void ftrace_graph_regs_caller(void); 198 199 static int __ftrace_modify_caller(unsigned long *callsite, 200 void (*func) (void), bool enable) 201 { 202 unsigned long caller_fn = (unsigned long) func; 203 unsigned long pc = (unsigned long) callsite; 204 unsigned long branch = arm_gen_branch(pc, caller_fn); 205 unsigned long nop = 0xe1a00000; /* mov r0, r0 */ 206 unsigned long old = enable ? nop : branch; 207 unsigned long new = enable ? branch : nop; 208 209 return ftrace_modify_code(pc, old, new, true); 210 } 211 212 static int ftrace_modify_graph_caller(bool enable) 213 { 214 int ret; 215 216 ret = __ftrace_modify_caller(&ftrace_graph_call, 217 ftrace_graph_caller, 218 enable); 219 220 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 221 if (!ret) 222 ret = __ftrace_modify_caller(&ftrace_graph_regs_call, 223 ftrace_graph_regs_caller, 224 enable); 225 #endif 226 227 228 return ret; 229 } 230 231 int ftrace_enable_ftrace_graph_caller(void) 232 { 233 return ftrace_modify_graph_caller(true); 234 } 235 236 int ftrace_disable_ftrace_graph_caller(void) 237 { 238 return ftrace_modify_graph_caller(false); 239 } 240 #endif /* CONFIG_DYNAMIC_FTRACE */ 241 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 242