xref: /openbmc/linux/arch/powerpc/kernel/optprobes.c (revision 6d99a79c)
1 /*
2  * Code for Kernel probes Jump optimization.
3  *
4  * Copyright 2017, Anju T, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kprobes.h>
13 #include <linux/jump_label.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <asm/kprobes.h>
18 #include <asm/ptrace.h>
19 #include <asm/cacheflush.h>
20 #include <asm/code-patching.h>
21 #include <asm/sstep.h>
22 #include <asm/ppc-opcode.h>
23 
24 #define TMPL_CALL_HDLR_IDX	\
25 	(optprobe_template_call_handler - optprobe_template_entry)
26 #define TMPL_EMULATE_IDX	\
27 	(optprobe_template_call_emulate - optprobe_template_entry)
28 #define TMPL_RET_IDX		\
29 	(optprobe_template_ret - optprobe_template_entry)
30 #define TMPL_OP_IDX		\
31 	(optprobe_template_op_address - optprobe_template_entry)
32 #define TMPL_INSN_IDX		\
33 	(optprobe_template_insn - optprobe_template_entry)
34 #define TMPL_END_IDX		\
35 	(optprobe_template_end - optprobe_template_entry)
36 
37 DEFINE_INSN_CACHE_OPS(ppc_optinsn);
38 
39 static bool insn_page_in_use;
40 
41 static void *__ppc_alloc_insn_page(void)
42 {
43 	if (insn_page_in_use)
44 		return NULL;
45 	insn_page_in_use = true;
46 	return &optinsn_slot;
47 }
48 
49 static void __ppc_free_insn_page(void *page __maybe_unused)
50 {
51 	insn_page_in_use = false;
52 }
53 
54 struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
55 	.mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
56 	.pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
57 	/* insn_size initialized later */
58 	.alloc = __ppc_alloc_insn_page,
59 	.free = __ppc_free_insn_page,
60 	.nr_garbage = 0,
61 };
62 
63 /*
64  * Check if we can optimize this probe. Returns NIP post-emulation if this can
65  * be optimized and 0 otherwise.
66  */
67 static unsigned long can_optimize(struct kprobe *p)
68 {
69 	struct pt_regs regs;
70 	struct instruction_op op;
71 	unsigned long nip = 0;
72 
73 	/*
74 	 * kprobe placed for kretprobe during boot time
75 	 * has a 'nop' instruction, which can be emulated.
76 	 * So further checks can be skipped.
77 	 */
78 	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
79 		return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
80 
81 	/*
82 	 * We only support optimizing kernel addresses, but not
83 	 * module addresses.
84 	 *
85 	 * FIXME: Optimize kprobes placed in module addresses.
86 	 */
87 	if (!is_kernel_addr((unsigned long)p->addr))
88 		return 0;
89 
90 	memset(&regs, 0, sizeof(struct pt_regs));
91 	regs.nip = (unsigned long)p->addr;
92 	regs.trap = 0x0;
93 	regs.msr = MSR_KERNEL;
94 
95 	/*
96 	 * Kprobe placed in conditional branch instructions are
97 	 * not optimized, as we can't predict the nip prior with
98 	 * dummy pt_regs and can not ensure that the return branch
99 	 * from detour buffer falls in the range of address (i.e 32MB).
100 	 * A branch back from trampoline is set up in the detour buffer
101 	 * to the nip returned by the analyse_instr() here.
102 	 *
103 	 * Ensure that the instruction is not a conditional branch,
104 	 * and that can be emulated.
105 	 */
106 	if (!is_conditional_branch(*p->ainsn.insn) &&
107 			analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
108 		emulate_update_regs(&regs, &op);
109 		nip = regs.nip;
110 	}
111 
112 	return nip;
113 }
114 
115 static void optimized_callback(struct optimized_kprobe *op,
116 			       struct pt_regs *regs)
117 {
118 	/* This is possible if op is under delayed unoptimizing */
119 	if (kprobe_disabled(&op->kp))
120 		return;
121 
122 	preempt_disable();
123 
124 	if (kprobe_running()) {
125 		kprobes_inc_nmissed_count(&op->kp);
126 	} else {
127 		__this_cpu_write(current_kprobe, &op->kp);
128 		regs->nip = (unsigned long)op->kp.addr;
129 		get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
130 		opt_pre_handler(&op->kp, regs);
131 		__this_cpu_write(current_kprobe, NULL);
132 	}
133 
134 	preempt_enable_no_resched();
135 }
136 NOKPROBE_SYMBOL(optimized_callback);
137 
138 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
139 {
140 	if (op->optinsn.insn) {
141 		free_ppc_optinsn_slot(op->optinsn.insn, 1);
142 		op->optinsn.insn = NULL;
143 	}
144 }
145 
146 /*
147  * emulate_step() requires insn to be emulated as
148  * second parameter. Load register 'r4' with the
149  * instruction.
150  */
151 void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
152 {
153 	/* addis r4,0,(insn)@h */
154 	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
155 			  ((val >> 16) & 0xffff));
156 	addr++;
157 
158 	/* ori r4,r4,(insn)@l */
159 	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
160 			  ___PPC_RS(4) | (val & 0xffff));
161 }
162 
163 /*
164  * Generate instructions to load provided immediate 64-bit value
165  * to register 'r3' and patch these instructions at 'addr'.
166  */
167 void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
168 {
169 	/* lis r3,(op)@highest */
170 	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
171 			  ((val >> 48) & 0xffff));
172 	addr++;
173 
174 	/* ori r3,r3,(op)@higher */
175 	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
176 			  ___PPC_RS(3) | ((val >> 32) & 0xffff));
177 	addr++;
178 
179 	/* rldicr r3,r3,32,31 */
180 	patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
181 			  ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
182 	addr++;
183 
184 	/* oris r3,r3,(op)@h */
185 	patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
186 			  ___PPC_RS(3) | ((val >> 16) & 0xffff));
187 	addr++;
188 
189 	/* ori r3,r3,(op)@l */
190 	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
191 			  ___PPC_RS(3) | (val & 0xffff));
192 }
193 
194 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
195 {
196 	kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
197 	kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
198 	long b_offset;
199 	unsigned long nip, size;
200 	int rc, i;
201 
202 	kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
203 
204 	nip = can_optimize(p);
205 	if (!nip)
206 		return -EILSEQ;
207 
208 	/* Allocate instruction slot for detour buffer */
209 	buff = get_ppc_optinsn_slot();
210 	if (!buff)
211 		return -ENOMEM;
212 
213 	/*
214 	 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
215 	 *
216 	 * The target address has to be relatively nearby, to permit use
217 	 * of branch instruction in powerpc, because the address is specified
218 	 * in an immediate field in the instruction opcode itself, ie 24 bits
219 	 * in the opcode specify the address. Therefore the address should
220 	 * be within 32MB on either side of the current instruction.
221 	 */
222 	b_offset = (unsigned long)buff - (unsigned long)p->addr;
223 	if (!is_offset_in_branch_range(b_offset))
224 		goto error;
225 
226 	/* Check if the return address is also within 32MB range */
227 	b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
228 			(unsigned long)nip;
229 	if (!is_offset_in_branch_range(b_offset))
230 		goto error;
231 
232 	/* Setup template */
233 	/* We can optimize this via patch_instruction_window later */
234 	size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
235 	pr_devel("Copying template to %p, size %lu\n", buff, size);
236 	for (i = 0; i < size; i++) {
237 		rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
238 		if (rc < 0)
239 			goto error;
240 	}
241 
242 	/*
243 	 * Fixup the template with instructions to:
244 	 * 1. load the address of the actual probepoint
245 	 */
246 	patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
247 
248 	/*
249 	 * 2. branch to optimized_callback() and emulate_step()
250 	 */
251 	op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
252 	emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
253 	if (!op_callback_addr || !emulate_step_addr) {
254 		WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
255 		goto error;
256 	}
257 
258 	branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
259 				(unsigned long)op_callback_addr,
260 				BRANCH_SET_LINK);
261 
262 	branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
263 				(unsigned long)emulate_step_addr,
264 				BRANCH_SET_LINK);
265 
266 	if (!branch_op_callback || !branch_emulate_step)
267 		goto error;
268 
269 	patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
270 	patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
271 
272 	/*
273 	 * 3. load instruction to be emulated into relevant register, and
274 	 */
275 	patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
276 
277 	/*
278 	 * 4. branch back from trampoline
279 	 */
280 	patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
281 
282 	flush_icache_range((unsigned long)buff,
283 			   (unsigned long)(&buff[TMPL_END_IDX]));
284 
285 	op->optinsn.insn = buff;
286 
287 	return 0;
288 
289 error:
290 	free_ppc_optinsn_slot(buff, 0);
291 	return -ERANGE;
292 
293 }
294 
295 int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
296 {
297 	return optinsn->insn != NULL;
298 }
299 
300 /*
301  * On powerpc, Optprobes always replaces one instruction (4 bytes
302  * aligned and 4 bytes long). It is impossible to encounter another
303  * kprobe in this address range. So always return 0.
304  */
305 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
306 {
307 	return 0;
308 }
309 
310 void arch_optimize_kprobes(struct list_head *oplist)
311 {
312 	struct optimized_kprobe *op;
313 	struct optimized_kprobe *tmp;
314 
315 	list_for_each_entry_safe(op, tmp, oplist, list) {
316 		/*
317 		 * Backup instructions which will be replaced
318 		 * by jump address
319 		 */
320 		memcpy(op->optinsn.copied_insn, op->kp.addr,
321 					       RELATIVEJUMP_SIZE);
322 		patch_instruction(op->kp.addr,
323 			create_branch((unsigned int *)op->kp.addr,
324 				      (unsigned long)op->optinsn.insn, 0));
325 		list_del_init(&op->list);
326 	}
327 }
328 
329 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
330 {
331 	arch_arm_kprobe(&op->kp);
332 }
333 
334 void arch_unoptimize_kprobes(struct list_head *oplist,
335 			     struct list_head *done_list)
336 {
337 	struct optimized_kprobe *op;
338 	struct optimized_kprobe *tmp;
339 
340 	list_for_each_entry_safe(op, tmp, oplist, list) {
341 		arch_unoptimize_kprobe(op);
342 		list_move(&op->list, done_list);
343 	}
344 }
345 
346 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
347 				 unsigned long addr)
348 {
349 	return ((unsigned long)op->kp.addr <= addr &&
350 		(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
351 }
352