1 /* 2 * Dynamic function tracer architecture backend. 3 * 4 * Copyright IBM Corp. 2009,2014 5 * 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 */ 9 10 #include <linux/hardirq.h> 11 #include <linux/uaccess.h> 12 #include <linux/ftrace.h> 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/kprobes.h> 16 #include <trace/syscall.h> 17 #include <asm/asm-offsets.h> 18 #include "entry.h" 19 20 void mcount_replace_code(void); 21 void ftrace_disable_code(void); 22 void ftrace_enable_insn(void); 23 24 /* 25 * The mcount code looks like this: 26 * stg %r14,8(%r15) # offset 0 27 * larl %r1,<&counter> # offset 6 28 * brasl %r14,_mcount # offset 12 29 * lg %r14,8(%r15) # offset 18 30 * Total length is 24 bytes. The complete mcount block initially gets replaced 31 * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop 32 * only patch the jg/lg instruction within the block. 33 * Note: we do not patch the first instruction to an unconditional branch, 34 * since that would break kprobes/jprobes. It is easier to leave the larl 35 * instruction in and only modify the second instruction. 36 * The enabled ftrace code block looks like this: 37 * larl %r0,.+24 # offset 0 38 * > lg %r1,__LC_FTRACE_FUNC # offset 6 39 * br %r1 # offset 12 40 * brcl 0,0 # offset 14 41 * brc 0,0 # offset 20 42 * The ftrace function gets called with a non-standard C function call ABI 43 * where r0 contains the return address. It is also expected that the called 44 * function only clobbers r0 and r1, but restores r2-r15. 45 * The return point of the ftrace function has offset 24, so execution 46 * continues behind the mcount block. 47 * larl %r0,.+24 # offset 0 48 * > jg .+18 # offset 6 49 * br %r1 # offset 12 50 * brcl 0,0 # offset 14 51 * brc 0,0 # offset 20 52 * The jg instruction branches to offset 24 to skip as many instructions 53 * as possible. 54 */ 55 asm( 56 " .align 4\n" 57 "mcount_replace_code:\n" 58 " larl %r0,0f\n" 59 "ftrace_disable_code:\n" 60 " jg 0f\n" 61 " br %r1\n" 62 " brcl 0,0\n" 63 " brc 0,0\n" 64 "0:\n" 65 " .align 4\n" 66 "ftrace_enable_insn:\n" 67 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); 68 69 #define MCOUNT_BLOCK_SIZE 24 70 #define MCOUNT_INSN_OFFSET 6 71 #define FTRACE_INSN_SIZE 6 72 73 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 74 unsigned long addr) 75 { 76 return 0; 77 } 78 79 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 80 unsigned long addr) 81 { 82 /* Initial replacement of the whole mcount block */ 83 if (addr == MCOUNT_ADDR) { 84 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, 85 mcount_replace_code, 86 MCOUNT_BLOCK_SIZE)) 87 return -EPERM; 88 return 0; 89 } 90 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, 91 MCOUNT_INSN_SIZE)) 92 return -EPERM; 93 return 0; 94 } 95 96 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 97 { 98 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, 99 FTRACE_INSN_SIZE)) 100 return -EPERM; 101 return 0; 102 } 103 104 int ftrace_update_ftrace_func(ftrace_func_t func) 105 { 106 return 0; 107 } 108 109 int __init ftrace_dyn_arch_init(void) 110 { 111 return 0; 112 } 113 114 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 115 /* 116 * Hook the return address and push it in the stack of return addresses 117 * in current thread info. 118 */ 119 unsigned long __kprobes prepare_ftrace_return(unsigned long parent, 120 unsigned long ip) 121 { 122 struct ftrace_graph_ent trace; 123 124 if (unlikely(ftrace_graph_is_dead())) 125 goto out; 126 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 127 goto out; 128 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; 129 trace.func = ip; 130 trace.depth = current->curr_ret_stack + 1; 131 /* Only trace if the calling function expects to. */ 132 if (!ftrace_graph_entry(&trace)) 133 goto out; 134 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) 135 goto out; 136 parent = (unsigned long) return_to_handler; 137 out: 138 return parent; 139 } 140 141 /* 142 * Patch the kernel code at ftrace_graph_caller location. The instruction 143 * there is branch relative on condition. To enable the ftrace graph code 144 * block, we simply patch the mask field of the instruction to zero and 145 * turn the instruction into a nop. 146 * To disable the ftrace graph code the mask field will be patched to 147 * all ones, which turns the instruction into an unconditional branch. 148 */ 149 int ftrace_enable_ftrace_graph_caller(void) 150 { 151 u8 op = 0x04; /* set mask field to zero */ 152 153 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); 154 } 155 156 int ftrace_disable_ftrace_graph_caller(void) 157 { 158 u8 op = 0xf4; /* set mask field to all ones */ 159 160 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); 161 } 162 163 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 164