1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2013 Linaro Limited 4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 5 * Copyright (C) 2017 Andes Technology Corporation 6 */ 7 8 #include <linux/ftrace.h> 9 #include <linux/uaccess.h> 10 #include <asm/cacheflush.h> 11 #include <asm/patch.h> 12 13 #ifdef CONFIG_DYNAMIC_FTRACE 14 static int ftrace_check_current_call(unsigned long hook_pos, 15 unsigned int *expected) 16 { 17 unsigned int replaced[2]; 18 unsigned int nops[2] = {NOP4, NOP4}; 19 20 /* we expect nops at the hook position */ 21 if (!expected) 22 expected = nops; 23 24 /* 25 * Read the text we want to modify; 26 * return must be -EFAULT on read error 27 */ 28 if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE)) 29 return -EFAULT; 30 31 /* 32 * Make sure it is what we expect it to be; 33 * return must be -EINVAL on failed comparison 34 */ 35 if (memcmp(expected, replaced, sizeof(replaced))) { 36 pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n", 37 (void *)hook_pos, expected[0], expected[1], replaced[0], 38 replaced[1]); 39 return -EINVAL; 40 } 41 42 return 0; 43 } 44 45 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, 46 bool enable) 47 { 48 unsigned int call[2]; 49 unsigned int nops[2] = {NOP4, NOP4}; 50 51 make_call(hook_pos, target, call); 52 53 /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */ 54 if (riscv_patch_text_nosync 55 ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE)) 56 return -EPERM; 57 58 return 0; 59 } 60 61 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 62 { 63 int ret = ftrace_check_current_call(rec->ip, NULL); 64 65 if (ret) 66 return ret; 67 68 return __ftrace_modify_call(rec->ip, addr, true); 69 } 70 71 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 72 unsigned long addr) 73 { 74 unsigned int call[2]; 75 int ret; 76 77 make_call(rec->ip, addr, call); 78 ret = ftrace_check_current_call(rec->ip, call); 79 80 if (ret) 81 return ret; 82 83 return __ftrace_modify_call(rec->ip, addr, false); 84 } 85 86 int ftrace_update_ftrace_func(ftrace_func_t func) 87 { 88 int ret = __ftrace_modify_call((unsigned long)&ftrace_call, 89 (unsigned long)func, true); 90 if (!ret) { 91 ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call, 92 (unsigned long)func, true); 93 } 94 95 return ret; 96 } 97 98 int __init ftrace_dyn_arch_init(void) 99 { 100 return 0; 101 } 102 #endif 103 104 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 105 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 106 unsigned long addr) 107 { 108 unsigned int call[2]; 109 int ret; 110 111 make_call(rec->ip, old_addr, call); 112 ret = ftrace_check_current_call(rec->ip, call); 113 114 if (ret) 115 return ret; 116 117 return __ftrace_modify_call(rec->ip, addr, true); 118 } 119 #endif 120 121 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 122 /* 123 * Most of this function is copied from arm64. 124 */ 125 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 126 unsigned long frame_pointer) 127 { 128 unsigned long return_hooker = (unsigned long)&return_to_handler; 129 unsigned long old; 130 131 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 132 return; 133 134 /* 135 * We don't suffer access faults, so no extra fault-recovery assembly 136 * is needed here. 137 */ 138 old = *parent; 139 140 if (!function_graph_enter(old, self_addr, frame_pointer, parent)) 141 *parent = return_hooker; 142 } 143 144 #ifdef CONFIG_DYNAMIC_FTRACE 145 extern void ftrace_graph_call(void); 146 int ftrace_enable_ftrace_graph_caller(void) 147 { 148 unsigned int call[2]; 149 static int init_graph = 1; 150 int ret; 151 152 make_call(&ftrace_graph_call, &ftrace_stub, call); 153 154 /* 155 * When enabling graph tracer for the first time, ftrace_graph_call 156 * should contains a call to ftrace_stub. Once it has been disabled, 157 * the 8-bytes at the position becomes NOPs. 158 */ 159 if (init_graph) { 160 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, 161 call); 162 init_graph = 0; 163 } else { 164 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, 165 NULL); 166 } 167 168 if (ret) 169 return ret; 170 171 return __ftrace_modify_call((unsigned long)&ftrace_graph_call, 172 (unsigned long)&prepare_ftrace_return, true); 173 } 174 175 int ftrace_disable_ftrace_graph_caller(void) 176 { 177 unsigned int call[2]; 178 int ret; 179 180 make_call(&ftrace_graph_call, &prepare_ftrace_return, call); 181 182 /* 183 * This is to make sure that ftrace_enable_ftrace_graph_caller 184 * did the right thing. 185 */ 186 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call, 187 call); 188 189 if (ret) 190 return ret; 191 192 return __ftrace_modify_call((unsigned long)&ftrace_graph_call, 193 (unsigned long)&prepare_ftrace_return, false); 194 } 195 #endif /* CONFIG_DYNAMIC_FTRACE */ 196 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 197