xref: /openbmc/linux/arch/ia64/kernel/ftrace.c (revision 2fa5ebe3)
1 /*
2  * Dynamic function tracing support.
3  *
4  * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com>
5  *
6  * For licencing details, see COPYING.
7  *
8  * Defines low-level handling of mcount calls when the kernel
9  * is compiled with the -pg flag. When using dynamic ftrace, the
10  * mcount call-sites get patched lazily with NOP till they are
11  * enabled. All code mutation routines here take effect atomically.
12  */
13 
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 
17 #include <asm/cacheflush.h>
18 #include <asm/patch.h>
19 
20 /* In IA64, each function will be added below two bundles with -pg option */
21 static unsigned char __attribute__((aligned(8)))
22 ftrace_orig_code[MCOUNT_INSN_SIZE] = {
23 	0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
24 	0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */
25 	0x05, 0x00, 0xc4, 0x00,             /* mov r42=b0 */
26 	0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */
27 	0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
28 	0x08, 0x00, 0x00, 0x50              /* br.call.sptk.many b0 = _mcount;; */
29 };
30 
31 struct ftrace_orig_insn {
32 	u64 dummy1, dummy2, dummy3;
33 	u64 dummy4:64-41+13;
34 	u64 imm20:20;
35 	u64 dummy5:3;
36 	u64 sign:1;
37 	u64 dummy6:4;
38 };
39 
40 /* mcount stub will be converted below for nop */
41 static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = {
42 	0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
43 	0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
44 	0x00, 0x00, 0x04, 0x00,             /* nop.i 0x0 */
45 	0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
46 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */
47 	0x00, 0x00, 0x04, 0x00
48 };
49 
50 static unsigned char *ftrace_nop_replace(void)
51 {
52 	return ftrace_nop_code;
53 }
54 
55 /*
56  * mcount stub will be converted below for call
57  * Note: Just the last instruction is changed against nop
58  * */
59 static unsigned char __attribute__((aligned(8)))
60 ftrace_call_code[MCOUNT_INSN_SIZE] = {
61 	0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
62 	0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
63 	0x00, 0x00, 0x04, 0x00,             /* nop.i 0x0 */
64 	0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
65 	0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/
66 	0xf8, 0xff, 0xff, 0xc8
67 };
68 
69 struct ftrace_call_insn {
70 	u64 dummy1, dummy2;
71 	u64 dummy3:48;
72 	u64 imm39_l:16;
73 	u64 imm39_h:23;
74 	u64 dummy4:13;
75 	u64 imm20:20;
76 	u64 dummy5:3;
77 	u64 i:1;
78 	u64 dummy6:4;
79 };
80 
81 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
82 {
83 	struct ftrace_call_insn *code = (void *)ftrace_call_code;
84 	unsigned long offset = addr - (ip + 0x10);
85 
86 	code->imm39_l = offset >> 24;
87 	code->imm39_h = offset >> 40;
88 	code->imm20 = offset >> 4;
89 	code->i = offset >> 63;
90 	return ftrace_call_code;
91 }
92 
93 static int
94 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
95 		   unsigned char *new_code, int do_check)
96 {
97 	unsigned char replaced[MCOUNT_INSN_SIZE];
98 
99 	/*
100 	 * Note:
101 	 * We are paranoid about modifying text, as if a bug was to happen, it
102 	 * could cause us to read or write to someplace that could cause harm.
103 	 * Carefully read and modify the code with probe_kernel_*(), and make
104 	 * sure what we read is what we expected it to be before modifying it.
105 	 */
106 
107 	if (!do_check)
108 		goto skip_check;
109 
110 	/* read the text we want to modify */
111 	if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
112 		return -EFAULT;
113 
114 	/* Make sure it is what we expect it to be */
115 	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
116 		return -EINVAL;
117 
118 skip_check:
119 	/* replace the text with the new text */
120 	if (copy_to_kernel_nofault(((void *)ip), new_code, MCOUNT_INSN_SIZE))
121 		return -EPERM;
122 	flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
123 
124 	return 0;
125 }
126 
127 static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr)
128 {
129 	unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE];
130 	unsigned long ip = rec->ip;
131 
132 	if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
133 		return -EFAULT;
134 	if (rec->flags & FTRACE_FL_CONVERTED) {
135 		struct ftrace_call_insn *call_insn, *tmp_call;
136 
137 		call_insn = (void *)ftrace_call_code;
138 		tmp_call = (void *)replaced;
139 		call_insn->imm39_l = tmp_call->imm39_l;
140 		call_insn->imm39_h = tmp_call->imm39_h;
141 		call_insn->imm20 = tmp_call->imm20;
142 		call_insn->i = tmp_call->i;
143 		if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0)
144 			return -EINVAL;
145 		return 0;
146 	} else {
147 		struct ftrace_orig_insn *call_insn, *tmp_call;
148 
149 		call_insn = (void *)ftrace_orig_code;
150 		tmp_call = (void *)replaced;
151 		call_insn->sign = tmp_call->sign;
152 		call_insn->imm20 = tmp_call->imm20;
153 		if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0)
154 			return -EINVAL;
155 		return 0;
156 	}
157 }
158 
159 int ftrace_make_nop(struct module *mod,
160 		    struct dyn_ftrace *rec, unsigned long addr)
161 {
162 	int ret;
163 	char *new;
164 
165 	ret = ftrace_make_nop_check(rec, addr);
166 	if (ret)
167 		return ret;
168 	new = ftrace_nop_replace();
169 	return ftrace_modify_code(rec->ip, NULL, new, 0);
170 }
171 
172 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
173 {
174 	unsigned long ip = rec->ip;
175 	unsigned char *old, *new;
176 
177 	old=  ftrace_nop_replace();
178 	new = ftrace_call_replace(ip, addr);
179 	return ftrace_modify_code(ip, old, new, 1);
180 }
181 
182 /* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */
183 int ftrace_update_ftrace_func(ftrace_func_t func)
184 {
185 	unsigned long ip;
186 	unsigned long addr = ((struct fnptr *)ftrace_call)->ip;
187 
188 	if (func == ftrace_stub)
189 		return 0;
190 	ip = ((struct fnptr *)func)->ip;
191 
192 	ia64_patch_imm64(addr + 2, ip);
193 
194 	flush_icache_range(addr, addr + 16);
195 	return 0;
196 }
197