1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2013 Linaro Limited 4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 5 * Copyright (C) 2017 Andes Technology Corporation 6 */ 7 8 #include <linux/ftrace.h> 9 #include <linux/uaccess.h> 10 #include <linux/memory.h> 11 #include <asm/cacheflush.h> 12 #include <asm/patch.h> 13 14 #ifdef CONFIG_DYNAMIC_FTRACE 15 int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) 16 { 17 mutex_lock(&text_mutex); 18 return 0; 19 } 20 21 int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) 22 { 23 mutex_unlock(&text_mutex); 24 return 0; 25 } 26 27 static int ftrace_check_current_call(unsigned long hook_pos, 28 unsigned int *expected) 29 { 30 unsigned int replaced[2]; 31 unsigned int nops[2] = {NOP4, NOP4}; 32 33 /* we expect nops at the hook position */ 34 if (!expected) 35 expected = nops; 36 37 /* 38 * Read the text we want to modify; 39 * return must be -EFAULT on read error 40 */ 41 if (copy_from_kernel_nofault(replaced, (void *)hook_pos, 42 MCOUNT_INSN_SIZE)) 43 return -EFAULT; 44 45 /* 46 * Make sure it is what we expect it to be; 47 * return must be -EINVAL on failed comparison 48 */ 49 if (memcmp(expected, replaced, sizeof(replaced))) { 50 pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n", 51 (void *)hook_pos, expected[0], expected[1], replaced[0], 52 replaced[1]); 53 return -EINVAL; 54 } 55 56 return 0; 57 } 58 59 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, 60 bool enable) 61 { 62 unsigned int call[2]; 63 unsigned int nops[2] = {NOP4, NOP4}; 64 65 make_call(hook_pos, target, call); 66 67 /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */ 68 if (patch_text_nosync 69 ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE)) 70 return -EPERM; 71 72 return 0; 73 } 74 75 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 76 { 77 int ret = ftrace_check_current_call(rec->ip, NULL); 78 79 if (ret) 80 return ret; 81 82 return __ftrace_modify_call(rec->ip, addr, true); 83 } 84 85 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 86 unsigned long addr) 87 { 88 unsigned int call[2]; 89 int ret; 90 91 make_call(rec->ip, addr, call); 92 ret = ftrace_check_current_call(rec->ip, call); 93 94 if (ret) 95 return ret; 96 97 return __ftrace_modify_call(rec->ip, addr, false); 98 } 99 100 101 /* 102 * This is called early on, and isn't wrapped by 103 * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold 104 * text_mutex, which triggers a lockdep failure. SMP isn't running so we could 105 * just directly poke the text, but it's simpler to just take the lock 106 * ourselves. 107 */ 108 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 109 { 110 int out; 111 112 ftrace_arch_code_modify_prepare(); 113 out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); 114 ftrace_arch_code_modify_post_process(); 115 116 return out; 117 } 118 119 int ftrace_update_ftrace_func(ftrace_func_t func) 120 { 121 int ret = __ftrace_modify_call((unsigned long)&ftrace_call, 122 (unsigned long)func, true); 123 if (!ret) { 124 ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call, 125 (unsigned long)func, true); 126 } 127 128 return ret; 129 } 130 131 int __init ftrace_dyn_arch_init(void) 132 { 133 return 0; 134 } 135 #endif 136 137 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 138 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 139 unsigned long addr) 140 { 141 unsigned int call[2]; 142 int ret; 143 144 make_call(rec->ip, old_addr, call); 145 ret = ftrace_check_current_call(rec->ip, call); 146 147 if (ret) 148 return ret; 149 150 return __ftrace_modify_call(rec->ip, addr, true); 151 } 152 #endif 153 154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 155 /* 156 * Most of this function is copied from arm64. 157 */ 158 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 159 unsigned long frame_pointer) 160 { 161 unsigned long return_hooker = (unsigned long)&return_to_handler; 162 unsigned long old; 163 164 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 165 return; 166 167 /* 168 * We don't suffer access faults, so no extra fault-recovery assembly 169 * is needed here. 170 */ 171 old = *parent; 172 173 if (!function_graph_enter(old, self_addr, frame_pointer, parent)) 174 *parent = return_hooker; 175 } 176 177 #ifdef CONFIG_DYNAMIC_FTRACE 178 extern void ftrace_graph_call(void); 179 int ftrace_enable_ftrace_graph_caller(void) 180 { 181 unsigned int call[2]; 182 static int init_graph = 1; 183 int ret; 184 185 make_call(&ftrace_graph_call, &ftrace_stub, call); 186 187 /* 188 * When enabling graph tracer for the first time, ftrace_graph_call 189 * should contains a call to ftrace_stub. Once it has been disabled, 190 * the 8-bytes at the position becomes NOPs. 191 */ 192 if (init_graph) { 193 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, 194 call); 195 init_graph = 0; 196 } else { 197 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, 198 NULL); 199 } 200 201 if (ret) 202 return ret; 203 204 return __ftrace_modify_call((unsigned long)&ftrace_graph_call, 205 (unsigned long)&prepare_ftrace_return, true); 206 } 207 208 int ftrace_disable_ftrace_graph_caller(void) 209 { 210 unsigned int call[2]; 211 int ret; 212 213 make_call(&ftrace_graph_call, &prepare_ftrace_return, call); 214 215 /* 216 * This is to make sure that ftrace_enable_ftrace_graph_caller 217 * did the right thing. 218 */ 219 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, 220 call); 221 222 if (ret) 223 return ret; 224 225 return __ftrace_modify_call((unsigned long)&ftrace_graph_call, 226 (unsigned long)&prepare_ftrace_return, false); 227 } 228 #endif /* CONFIG_DYNAMIC_FTRACE */ 229 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 230