1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracer architecture backend. 4 * 5 * Copyright IBM Corp. 2009,2014 6 * 7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 */ 10 11 #include <linux/moduleloader.h> 12 #include <linux/hardirq.h> 13 #include <linux/uaccess.h> 14 #include <linux/ftrace.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/kprobes.h> 18 #include <trace/syscall.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/text-patching.h> 21 #include <asm/cacheflush.h> 22 #include <asm/ftrace.lds.h> 23 #include <asm/nospec-branch.h> 24 #include <asm/set_memory.h> 25 #include "entry.h" 26 #include "ftrace.h" 27 28 /* 29 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8) 30 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags 31 * (since gcc 9 / clang 10) is used. 32 * In both cases the original and also the disabled function prologue contains 33 * only a single six byte instruction and looks like this: 34 * > brcl 0,0 # offset 0 35 * To enable ftrace the code gets patched like above and afterwards looks 36 * like this: 37 * > brasl %r0,ftrace_caller # offset 0 38 * 39 * The instruction will be patched by ftrace_make_call / ftrace_make_nop. 40 * The ftrace function gets called with a non-standard C function call ABI 41 * where r0 contains the return address. It is also expected that the called 42 * function only clobbers r0 and r1, but restores r2-r15. 43 * For module code we can't directly jump to ftrace caller, but need a 44 * trampoline (ftrace_plt), which clobbers also r1. 45 */ 46 47 void *ftrace_func __read_mostly = ftrace_stub; 48 struct ftrace_insn { 49 u16 opc; 50 s32 disp; 51 } __packed; 52 53 asm( 54 " .align 16\n" 55 "ftrace_shared_hotpatch_trampoline_br:\n" 56 " lmg %r0,%r1,2(%r1)\n" 57 " br %r1\n" 58 "ftrace_shared_hotpatch_trampoline_br_end:\n" 59 ); 60 61 #ifdef CONFIG_EXPOLINE 62 asm( 63 " .align 16\n" 64 "ftrace_shared_hotpatch_trampoline_ex:\n" 65 " lmg %r0,%r1,2(%r1)\n" 66 " ex %r0," __stringify(__LC_BR_R1) "(%r0)\n" 67 " j .\n" 68 "ftrace_shared_hotpatch_trampoline_ex_end:\n" 69 ); 70 71 asm( 72 " .align 16\n" 73 "ftrace_shared_hotpatch_trampoline_exrl:\n" 74 " lmg %r0,%r1,2(%r1)\n" 75 " .insn ril,0xc60000000000,%r0,0f\n" /* exrl */ 76 " j .\n" 77 "0: br %r1\n" 78 "ftrace_shared_hotpatch_trampoline_exrl_end:\n" 79 ); 80 #endif /* CONFIG_EXPOLINE */ 81 82 #ifdef CONFIG_MODULES 83 static char *ftrace_plt; 84 #endif /* CONFIG_MODULES */ 85 86 static const char *ftrace_shared_hotpatch_trampoline(const char **end) 87 { 88 const char *tstart, *tend; 89 90 tstart = ftrace_shared_hotpatch_trampoline_br; 91 tend = ftrace_shared_hotpatch_trampoline_br_end; 92 #ifdef CONFIG_EXPOLINE 93 if (!nospec_disable) { 94 tstart = ftrace_shared_hotpatch_trampoline_ex; 95 tend = ftrace_shared_hotpatch_trampoline_ex_end; 96 if (test_facility(35)) { /* exrl */ 97 tstart = ftrace_shared_hotpatch_trampoline_exrl; 98 tend = ftrace_shared_hotpatch_trampoline_exrl_end; 99 } 100 } 101 #endif /* CONFIG_EXPOLINE */ 102 if (end) 103 *end = tend; 104 return tstart; 105 } 106 107 bool ftrace_need_init_nop(void) 108 { 109 return true; 110 } 111 112 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 113 { 114 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline = 115 __ftrace_hotpatch_trampolines_start; 116 static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 }; 117 static struct ftrace_hotpatch_trampoline *trampoline; 118 struct ftrace_hotpatch_trampoline **next_trampoline; 119 struct ftrace_hotpatch_trampoline *trampolines_end; 120 struct ftrace_hotpatch_trampoline tmp; 121 struct ftrace_insn *insn; 122 const char *shared; 123 s32 disp; 124 125 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) != 126 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE); 127 128 next_trampoline = &next_vmlinux_trampoline; 129 trampolines_end = __ftrace_hotpatch_trampolines_end; 130 shared = ftrace_shared_hotpatch_trampoline(NULL); 131 #ifdef CONFIG_MODULES 132 if (mod) { 133 next_trampoline = &mod->arch.next_trampoline; 134 trampolines_end = mod->arch.trampolines_end; 135 shared = ftrace_plt; 136 } 137 #endif 138 139 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end)) 140 return -ENOMEM; 141 trampoline = (*next_trampoline)++; 142 143 /* Check for the compiler-generated fentry nop (brcl 0, .). */ 144 if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig)))) 145 return -EINVAL; 146 147 /* Generate the trampoline. */ 148 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */ 149 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2; 150 tmp.interceptor = FTRACE_ADDR; 151 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn); 152 s390_kernel_write(trampoline, &tmp, sizeof(tmp)); 153 154 /* Generate a jump to the trampoline. */ 155 disp = ((char *)trampoline - (char *)rec->ip) / 2; 156 insn = (struct ftrace_insn *)rec->ip; 157 s390_kernel_write(&insn->disp, &disp, sizeof(disp)); 158 159 return 0; 160 } 161 162 static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec) 163 { 164 struct ftrace_hotpatch_trampoline *trampoline; 165 struct ftrace_insn insn; 166 s64 disp; 167 u16 opc; 168 169 if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn))) 170 return ERR_PTR(-EFAULT); 171 disp = (s64)insn.disp * 2; 172 trampoline = (void *)(rec->ip + disp); 173 if (get_kernel_nofault(opc, &trampoline->brasl_opc)) 174 return ERR_PTR(-EFAULT); 175 if (opc != 0xc015) 176 return ERR_PTR(-EINVAL); 177 return trampoline; 178 } 179 180 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 181 unsigned long addr) 182 { 183 struct ftrace_hotpatch_trampoline *trampoline; 184 u64 old; 185 186 trampoline = ftrace_get_trampoline(rec); 187 if (IS_ERR(trampoline)) 188 return PTR_ERR(trampoline); 189 if (get_kernel_nofault(old, &trampoline->interceptor)) 190 return -EFAULT; 191 if (old != old_addr) 192 return -EINVAL; 193 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 194 return 0; 195 } 196 197 static void brcl_disable(void *brcl) 198 { 199 u8 op = 0x04; /* set mask field to zero */ 200 201 s390_kernel_write((char *)brcl + 1, &op, sizeof(op)); 202 } 203 204 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 205 unsigned long addr) 206 { 207 brcl_disable((void *)rec->ip); 208 return 0; 209 } 210 211 static void brcl_enable(void *brcl) 212 { 213 u8 op = 0xf4; /* set mask field to all ones */ 214 215 s390_kernel_write((char *)brcl + 1, &op, sizeof(op)); 216 } 217 218 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 219 { 220 struct ftrace_hotpatch_trampoline *trampoline; 221 222 trampoline = ftrace_get_trampoline(rec); 223 if (IS_ERR(trampoline)) 224 return PTR_ERR(trampoline); 225 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 226 brcl_enable((void *)rec->ip); 227 return 0; 228 } 229 230 int ftrace_update_ftrace_func(ftrace_func_t func) 231 { 232 ftrace_func = func; 233 return 0; 234 } 235 236 void arch_ftrace_update_code(int command) 237 { 238 ftrace_modify_all_code(command); 239 } 240 241 int ftrace_arch_code_modify_post_process(void) 242 { 243 /* 244 * Flush any pre-fetched instructions on all 245 * CPUs to make the new code visible. 246 */ 247 text_poke_sync_lock(); 248 return 0; 249 } 250 251 #ifdef CONFIG_MODULES 252 253 static int __init ftrace_plt_init(void) 254 { 255 const char *start, *end; 256 257 ftrace_plt = module_alloc(PAGE_SIZE); 258 if (!ftrace_plt) 259 panic("cannot allocate ftrace plt\n"); 260 261 start = ftrace_shared_hotpatch_trampoline(&end); 262 memcpy(ftrace_plt, start, end - start); 263 set_memory_ro((unsigned long)ftrace_plt, 1); 264 return 0; 265 } 266 device_initcall(ftrace_plt_init); 267 268 #endif /* CONFIG_MODULES */ 269 270 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 271 /* 272 * Hook the return address and push it in the stack of return addresses 273 * in current thread info. 274 */ 275 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp, 276 unsigned long ip) 277 { 278 if (unlikely(ftrace_graph_is_dead())) 279 goto out; 280 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 281 goto out; 282 ip -= MCOUNT_INSN_SIZE; 283 if (!function_graph_enter(ra, ip, 0, (void *) sp)) 284 ra = (unsigned long) return_to_handler; 285 out: 286 return ra; 287 } 288 NOKPROBE_SYMBOL(prepare_ftrace_return); 289 290 /* 291 * Patch the kernel code at ftrace_graph_caller location. The instruction 292 * there is branch relative on condition. To enable the ftrace graph code 293 * block, we simply patch the mask field of the instruction to zero and 294 * turn the instruction into a nop. 295 * To disable the ftrace graph code the mask field will be patched to 296 * all ones, which turns the instruction into an unconditional branch. 297 */ 298 int ftrace_enable_ftrace_graph_caller(void) 299 { 300 brcl_disable(ftrace_graph_caller); 301 text_poke_sync_lock(); 302 return 0; 303 } 304 305 int ftrace_disable_ftrace_graph_caller(void) 306 { 307 brcl_enable(ftrace_graph_caller); 308 text_poke_sync_lock(); 309 return 0; 310 } 311 312 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 313 314 #ifdef CONFIG_KPROBES_ON_FTRACE 315 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 316 struct ftrace_ops *ops, struct ftrace_regs *fregs) 317 { 318 struct kprobe_ctlblk *kcb; 319 struct pt_regs *regs; 320 struct kprobe *p; 321 int bit; 322 323 bit = ftrace_test_recursion_trylock(ip, parent_ip); 324 if (bit < 0) 325 return; 326 327 regs = ftrace_get_regs(fregs); 328 p = get_kprobe((kprobe_opcode_t *)ip); 329 if (!regs || unlikely(!p) || kprobe_disabled(p)) 330 goto out; 331 332 if (kprobe_running()) { 333 kprobes_inc_nmissed_count(p); 334 goto out; 335 } 336 337 __this_cpu_write(current_kprobe, p); 338 339 kcb = get_kprobe_ctlblk(); 340 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 341 342 instruction_pointer_set(regs, ip); 343 344 if (!p->pre_handler || !p->pre_handler(p, regs)) { 345 346 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE); 347 348 if (unlikely(p->post_handler)) { 349 kcb->kprobe_status = KPROBE_HIT_SSDONE; 350 p->post_handler(p, regs, 0); 351 } 352 } 353 __this_cpu_write(current_kprobe, NULL); 354 out: 355 ftrace_test_recursion_unlock(bit); 356 } 357 NOKPROBE_SYMBOL(kprobe_ftrace_handler); 358 359 int arch_prepare_kprobe_ftrace(struct kprobe *p) 360 { 361 p->ainsn.insn = NULL; 362 return 0; 363 } 364 #endif 365