1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracer architecture backend. 4 * 5 * Copyright IBM Corp. 2009,2014 6 * 7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 */ 10 11 #include <linux/moduleloader.h> 12 #include <linux/hardirq.h> 13 #include <linux/uaccess.h> 14 #include <linux/ftrace.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/kprobes.h> 18 #include <trace/syscall.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/text-patching.h> 21 #include <asm/cacheflush.h> 22 #include <asm/ftrace.lds.h> 23 #include <asm/nospec-branch.h> 24 #include <asm/set_memory.h> 25 #include "entry.h" 26 #include "ftrace.h" 27 28 /* 29 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8) 30 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags 31 * (since gcc 9 / clang 10) is used. 32 * In both cases the original and also the disabled function prologue contains 33 * only a single six byte instruction and looks like this: 34 * > brcl 0,0 # offset 0 35 * To enable ftrace the code gets patched like above and afterwards looks 36 * like this: 37 * > brasl %r0,ftrace_caller # offset 0 38 * 39 * The instruction will be patched by ftrace_make_call / ftrace_make_nop. 40 * The ftrace function gets called with a non-standard C function call ABI 41 * where r0 contains the return address. It is also expected that the called 42 * function only clobbers r0 and r1, but restores r2-r15. 43 * For module code we can't directly jump to ftrace caller, but need a 44 * trampoline (ftrace_plt), which clobbers also r1. 45 */ 46 47 void *ftrace_func __read_mostly = ftrace_stub; 48 struct ftrace_insn { 49 u16 opc; 50 s32 disp; 51 } __packed; 52 53 asm( 54 " .align 16\n" 55 "ftrace_shared_hotpatch_trampoline_br:\n" 56 " lmg %r0,%r1,2(%r1)\n" 57 " br %r1\n" 58 "ftrace_shared_hotpatch_trampoline_br_end:\n" 59 ); 60 61 #ifdef CONFIG_EXPOLINE 62 asm( 63 " .align 16\n" 64 "ftrace_shared_hotpatch_trampoline_ex:\n" 65 " lmg %r0,%r1,2(%r1)\n" 66 " ex %r0," __stringify(__LC_BR_R1) "(%r0)\n" 67 " j .\n" 68 "ftrace_shared_hotpatch_trampoline_ex_end:\n" 69 ); 70 71 asm( 72 " .align 16\n" 73 "ftrace_shared_hotpatch_trampoline_exrl:\n" 74 " lmg %r0,%r1,2(%r1)\n" 75 " .insn ril,0xc60000000000,%r0,0f\n" /* exrl */ 76 " j .\n" 77 "0: br %r1\n" 78 "ftrace_shared_hotpatch_trampoline_exrl_end:\n" 79 ); 80 #endif /* CONFIG_EXPOLINE */ 81 82 #ifdef CONFIG_MODULES 83 static char *ftrace_plt; 84 #endif /* CONFIG_MODULES */ 85 86 static const char *ftrace_shared_hotpatch_trampoline(const char **end) 87 { 88 const char *tstart, *tend; 89 90 tstart = ftrace_shared_hotpatch_trampoline_br; 91 tend = ftrace_shared_hotpatch_trampoline_br_end; 92 #ifdef CONFIG_EXPOLINE 93 if (!nospec_disable) { 94 tstart = ftrace_shared_hotpatch_trampoline_ex; 95 tend = ftrace_shared_hotpatch_trampoline_ex_end; 96 if (test_facility(35)) { /* exrl */ 97 tstart = ftrace_shared_hotpatch_trampoline_exrl; 98 tend = ftrace_shared_hotpatch_trampoline_exrl_end; 99 } 100 } 101 #endif /* CONFIG_EXPOLINE */ 102 if (end) 103 *end = tend; 104 return tstart; 105 } 106 107 bool ftrace_need_init_nop(void) 108 { 109 return true; 110 } 111 112 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 113 { 114 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline = 115 __ftrace_hotpatch_trampolines_start; 116 static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 }; 117 static struct ftrace_hotpatch_trampoline *trampoline; 118 struct ftrace_hotpatch_trampoline **next_trampoline; 119 struct ftrace_hotpatch_trampoline *trampolines_end; 120 struct ftrace_hotpatch_trampoline tmp; 121 struct ftrace_insn *insn; 122 const char *shared; 123 s32 disp; 124 125 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) != 126 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE); 127 128 next_trampoline = &next_vmlinux_trampoline; 129 trampolines_end = __ftrace_hotpatch_trampolines_end; 130 shared = ftrace_shared_hotpatch_trampoline(NULL); 131 #ifdef CONFIG_MODULES 132 if (mod) { 133 next_trampoline = &mod->arch.next_trampoline; 134 trampolines_end = mod->arch.trampolines_end; 135 shared = ftrace_plt; 136 } 137 #endif 138 139 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end)) 140 return -ENOMEM; 141 trampoline = (*next_trampoline)++; 142 143 /* Check for the compiler-generated fentry nop (brcl 0, .). */ 144 if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig)))) 145 return -EINVAL; 146 147 /* Generate the trampoline. */ 148 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */ 149 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2; 150 tmp.interceptor = FTRACE_ADDR; 151 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn); 152 s390_kernel_write(trampoline, &tmp, sizeof(tmp)); 153 154 /* Generate a jump to the trampoline. */ 155 disp = ((char *)trampoline - (char *)rec->ip) / 2; 156 insn = (struct ftrace_insn *)rec->ip; 157 s390_kernel_write(&insn->disp, &disp, sizeof(disp)); 158 159 return 0; 160 } 161 162 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 163 unsigned long addr) 164 { 165 return 0; 166 } 167 168 static void brcl_disable(void *brcl) 169 { 170 u8 op = 0x04; /* set mask field to zero */ 171 172 s390_kernel_write((char *)brcl + 1, &op, sizeof(op)); 173 } 174 175 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 176 unsigned long addr) 177 { 178 brcl_disable((void *)rec->ip); 179 return 0; 180 } 181 182 static void brcl_enable(void *brcl) 183 { 184 u8 op = 0xf4; /* set mask field to all ones */ 185 186 s390_kernel_write((char *)brcl + 1, &op, sizeof(op)); 187 } 188 189 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 190 { 191 brcl_enable((void *)rec->ip); 192 return 0; 193 } 194 195 int ftrace_update_ftrace_func(ftrace_func_t func) 196 { 197 ftrace_func = func; 198 return 0; 199 } 200 201 void arch_ftrace_update_code(int command) 202 { 203 ftrace_modify_all_code(command); 204 } 205 206 int ftrace_arch_code_modify_post_process(void) 207 { 208 /* 209 * Flush any pre-fetched instructions on all 210 * CPUs to make the new code visible. 211 */ 212 text_poke_sync_lock(); 213 return 0; 214 } 215 216 #ifdef CONFIG_MODULES 217 218 static int __init ftrace_plt_init(void) 219 { 220 const char *start, *end; 221 222 ftrace_plt = module_alloc(PAGE_SIZE); 223 if (!ftrace_plt) 224 panic("cannot allocate ftrace plt\n"); 225 226 start = ftrace_shared_hotpatch_trampoline(&end); 227 memcpy(ftrace_plt, start, end - start); 228 set_memory_ro((unsigned long)ftrace_plt, 1); 229 return 0; 230 } 231 device_initcall(ftrace_plt_init); 232 233 #endif /* CONFIG_MODULES */ 234 235 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 236 /* 237 * Hook the return address and push it in the stack of return addresses 238 * in current thread info. 239 */ 240 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp, 241 unsigned long ip) 242 { 243 if (unlikely(ftrace_graph_is_dead())) 244 goto out; 245 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 246 goto out; 247 ip -= MCOUNT_INSN_SIZE; 248 if (!function_graph_enter(ra, ip, 0, (void *) sp)) 249 ra = (unsigned long) return_to_handler; 250 out: 251 return ra; 252 } 253 NOKPROBE_SYMBOL(prepare_ftrace_return); 254 255 /* 256 * Patch the kernel code at ftrace_graph_caller location. The instruction 257 * there is branch relative on condition. To enable the ftrace graph code 258 * block, we simply patch the mask field of the instruction to zero and 259 * turn the instruction into a nop. 260 * To disable the ftrace graph code the mask field will be patched to 261 * all ones, which turns the instruction into an unconditional branch. 262 */ 263 int ftrace_enable_ftrace_graph_caller(void) 264 { 265 brcl_disable(ftrace_graph_caller); 266 text_poke_sync_lock(); 267 return 0; 268 } 269 270 int ftrace_disable_ftrace_graph_caller(void) 271 { 272 brcl_enable(ftrace_graph_caller); 273 text_poke_sync_lock(); 274 return 0; 275 } 276 277 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 278 279 #ifdef CONFIG_KPROBES_ON_FTRACE 280 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 281 struct ftrace_ops *ops, struct ftrace_regs *fregs) 282 { 283 struct kprobe_ctlblk *kcb; 284 struct pt_regs *regs; 285 struct kprobe *p; 286 int bit; 287 288 bit = ftrace_test_recursion_trylock(ip, parent_ip); 289 if (bit < 0) 290 return; 291 292 regs = ftrace_get_regs(fregs); 293 p = get_kprobe((kprobe_opcode_t *)ip); 294 if (unlikely(!p) || kprobe_disabled(p)) 295 goto out; 296 297 if (kprobe_running()) { 298 kprobes_inc_nmissed_count(p); 299 goto out; 300 } 301 302 __this_cpu_write(current_kprobe, p); 303 304 kcb = get_kprobe_ctlblk(); 305 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 306 307 instruction_pointer_set(regs, ip); 308 309 if (!p->pre_handler || !p->pre_handler(p, regs)) { 310 311 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE); 312 313 if (unlikely(p->post_handler)) { 314 kcb->kprobe_status = KPROBE_HIT_SSDONE; 315 p->post_handler(p, regs, 0); 316 } 317 } 318 __this_cpu_write(current_kprobe, NULL); 319 out: 320 ftrace_test_recursion_unlock(bit); 321 } 322 NOKPROBE_SYMBOL(kprobe_ftrace_handler); 323 324 int arch_prepare_kprobe_ftrace(struct kprobe *p) 325 { 326 p->ainsn.insn = NULL; 327 return 0; 328 } 329 #endif 330