1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracer architecture backend. 4 * 5 * Copyright IBM Corp. 2009,2014 6 * 7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 */ 10 11 #include <linux/moduleloader.h> 12 #include <linux/hardirq.h> 13 #include <linux/uaccess.h> 14 #include <linux/ftrace.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/kprobes.h> 18 #include <trace/syscall.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/cacheflush.h> 21 #include <asm/set_memory.h> 22 #include "entry.h" 23 24 /* 25 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8) 26 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags 27 * (since gcc 9 / clang 10) is used. 28 * In both cases the original and also the disabled function prologue contains 29 * only a single six byte instruction and looks like this: 30 * > brcl 0,0 # offset 0 31 * To enable ftrace the code gets patched like above and afterwards looks 32 * like this: 33 * > brasl %r0,ftrace_caller # offset 0 34 * 35 * The instruction will be patched by ftrace_make_call / ftrace_make_nop. 36 * The ftrace function gets called with a non-standard C function call ABI 37 * where r0 contains the return address. It is also expected that the called 38 * function only clobbers r0 and r1, but restores r2-r15. 39 * For module code we can't directly jump to ftrace caller, but need a 40 * trampoline (ftrace_plt), which clobbers also r1. 41 */ 42 43 unsigned long ftrace_plt; 44 45 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 46 unsigned long addr) 47 { 48 return 0; 49 } 50 51 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 52 unsigned long addr) 53 { 54 struct ftrace_insn orig, new, old; 55 56 if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old))) 57 return -EFAULT; 58 /* Replace ftrace call with a nop. */ 59 ftrace_generate_call_insn(&orig, rec->ip); 60 ftrace_generate_nop_insn(&new); 61 62 /* Verify that the to be replaced code matches what we expect. */ 63 if (memcmp(&orig, &old, sizeof(old))) 64 return -EINVAL; 65 s390_kernel_write((void *) rec->ip, &new, sizeof(new)); 66 return 0; 67 } 68 69 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 70 { 71 struct ftrace_insn orig, new, old; 72 73 if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old))) 74 return -EFAULT; 75 /* Replace nop with an ftrace call. */ 76 ftrace_generate_nop_insn(&orig); 77 ftrace_generate_call_insn(&new, rec->ip); 78 79 /* Verify that the to be replaced code matches what we expect. */ 80 if (memcmp(&orig, &old, sizeof(old))) 81 return -EINVAL; 82 s390_kernel_write((void *) rec->ip, &new, sizeof(new)); 83 return 0; 84 } 85 86 int ftrace_update_ftrace_func(ftrace_func_t func) 87 { 88 return 0; 89 } 90 91 int __init ftrace_dyn_arch_init(void) 92 { 93 return 0; 94 } 95 96 #ifdef CONFIG_MODULES 97 98 static int __init ftrace_plt_init(void) 99 { 100 unsigned int *ip; 101 102 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE); 103 if (!ftrace_plt) 104 panic("cannot allocate ftrace plt\n"); 105 ip = (unsigned int *) ftrace_plt; 106 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 107 ip[1] = 0x100a0004; 108 ip[2] = 0x07f10000; 109 ip[3] = FTRACE_ADDR >> 32; 110 ip[4] = FTRACE_ADDR & 0xffffffff; 111 set_memory_ro(ftrace_plt, 1); 112 return 0; 113 } 114 device_initcall(ftrace_plt_init); 115 116 #endif /* CONFIG_MODULES */ 117 118 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 119 /* 120 * Hook the return address and push it in the stack of return addresses 121 * in current thread info. 122 */ 123 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp, 124 unsigned long ip) 125 { 126 if (unlikely(ftrace_graph_is_dead())) 127 goto out; 128 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 129 goto out; 130 ip -= MCOUNT_INSN_SIZE; 131 if (!function_graph_enter(ra, ip, 0, (void *) sp)) 132 ra = (unsigned long) return_to_handler; 133 out: 134 return ra; 135 } 136 NOKPROBE_SYMBOL(prepare_ftrace_return); 137 138 /* 139 * Patch the kernel code at ftrace_graph_caller location. The instruction 140 * there is branch relative on condition. To enable the ftrace graph code 141 * block, we simply patch the mask field of the instruction to zero and 142 * turn the instruction into a nop. 143 * To disable the ftrace graph code the mask field will be patched to 144 * all ones, which turns the instruction into an unconditional branch. 145 */ 146 int ftrace_enable_ftrace_graph_caller(void) 147 { 148 u8 op = 0x04; /* set mask field to zero */ 149 150 s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); 151 return 0; 152 } 153 154 int ftrace_disable_ftrace_graph_caller(void) 155 { 156 u8 op = 0xf4; /* set mask field to all ones */ 157 158 s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); 159 return 0; 160 } 161 162 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 163 164 #ifdef CONFIG_KPROBES_ON_FTRACE 165 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 166 struct ftrace_ops *ops, struct ftrace_regs *fregs) 167 { 168 struct kprobe_ctlblk *kcb; 169 struct pt_regs *regs; 170 struct kprobe *p; 171 int bit; 172 173 bit = ftrace_test_recursion_trylock(ip, parent_ip); 174 if (bit < 0) 175 return; 176 177 regs = ftrace_get_regs(fregs); 178 preempt_disable_notrace(); 179 p = get_kprobe((kprobe_opcode_t *)ip); 180 if (unlikely(!p) || kprobe_disabled(p)) 181 goto out; 182 183 if (kprobe_running()) { 184 kprobes_inc_nmissed_count(p); 185 goto out; 186 } 187 188 __this_cpu_write(current_kprobe, p); 189 190 kcb = get_kprobe_ctlblk(); 191 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 192 193 instruction_pointer_set(regs, ip); 194 195 if (!p->pre_handler || !p->pre_handler(p, regs)) { 196 197 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE); 198 199 if (unlikely(p->post_handler)) { 200 kcb->kprobe_status = KPROBE_HIT_SSDONE; 201 p->post_handler(p, regs, 0); 202 } 203 } 204 __this_cpu_write(current_kprobe, NULL); 205 out: 206 preempt_enable_notrace(); 207 ftrace_test_recursion_unlock(bit); 208 } 209 NOKPROBE_SYMBOL(kprobe_ftrace_handler); 210 211 int arch_prepare_kprobe_ftrace(struct kprobe *p) 212 { 213 p->ainsn.insn = NULL; 214 return 0; 215 } 216 #endif 217