xref: /openbmc/linux/arch/arm64/kernel/probes/uprobes.c (revision 8795a739)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
4  */
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/uprobes.h>
8 #include <asm/cacheflush.h>
9 
10 #include "decode-insn.h"
11 
12 #define UPROBE_INV_FAULT_CODE	UINT_MAX
13 
14 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
15 		void *src, unsigned long len)
16 {
17 	void *xol_page_kaddr = kmap_atomic(page);
18 	void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
19 
20 	/* Initialize the slot */
21 	memcpy(dst, src, len);
22 
23 	/* flush caches (dcache/icache) */
24 	sync_icache_aliases(dst, len);
25 
26 	kunmap_atomic(xol_page_kaddr);
27 }
28 
29 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
30 {
31 	return instruction_pointer(regs);
32 }
33 
34 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
35 		unsigned long addr)
36 {
37 	probe_opcode_t insn;
38 
39 	/* TODO: Currently we do not support AARCH32 instruction probing */
40 	if (mm->context.flags & MMCF_AARCH32)
41 		return -ENOTSUPP;
42 	else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
43 		return -EINVAL;
44 
45 	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
46 
47 	switch (arm_probe_decode_insn(insn, &auprobe->api)) {
48 	case INSN_REJECTED:
49 		return -EINVAL;
50 
51 	case INSN_GOOD_NO_SLOT:
52 		auprobe->simulate = true;
53 		break;
54 
55 	default:
56 		break;
57 	}
58 
59 	return 0;
60 }
61 
62 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
63 {
64 	struct uprobe_task *utask = current->utask;
65 
66 	/* Initialize with an invalid fault code to detect if ol insn trapped */
67 	current->thread.fault_code = UPROBE_INV_FAULT_CODE;
68 
69 	/* Instruction points to execute ol */
70 	instruction_pointer_set(regs, utask->xol_vaddr);
71 
72 	user_enable_single_step(current);
73 
74 	return 0;
75 }
76 
77 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
78 {
79 	struct uprobe_task *utask = current->utask;
80 
81 	WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
82 
83 	/* Instruction points to execute next to breakpoint address */
84 	instruction_pointer_set(regs, utask->vaddr + 4);
85 
86 	user_disable_single_step(current);
87 
88 	return 0;
89 }
90 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
91 {
92 	/*
93 	 * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
94 	 * insn itself is trapped, then detect the case with the help of
95 	 * invalid fault code which is being set in arch_uprobe_pre_xol
96 	 */
97 	if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
98 		return true;
99 
100 	return false;
101 }
102 
103 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
104 {
105 	probe_opcode_t insn;
106 	unsigned long addr;
107 
108 	if (!auprobe->simulate)
109 		return false;
110 
111 	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
112 	addr = instruction_pointer(regs);
113 
114 	if (auprobe->api.handler)
115 		auprobe->api.handler(insn, addr, regs);
116 
117 	return true;
118 }
119 
120 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
121 {
122 	struct uprobe_task *utask = current->utask;
123 
124 	/*
125 	 * Task has received a fatal signal, so reset back to probbed
126 	 * address.
127 	 */
128 	instruction_pointer_set(regs, utask->vaddr);
129 
130 	user_disable_single_step(current);
131 }
132 
133 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
134 		struct pt_regs *regs)
135 {
136 	/*
137 	 * If a simple branch instruction (B) was called for retprobed
138 	 * assembly label then return true even when regs->sp and ret->stack
139 	 * are same. It will ensure that cleanup and reporting of return
140 	 * instances corresponding to callee label is done when
141 	 * handle_trampoline for called function is executed.
142 	 */
143 	if (ctx == RP_CHECK_CHAIN_CALL)
144 		return regs->sp <= ret->stack;
145 	else
146 		return regs->sp < ret->stack;
147 }
148 
149 unsigned long
150 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
151 				  struct pt_regs *regs)
152 {
153 	unsigned long orig_ret_vaddr;
154 
155 	orig_ret_vaddr = procedure_link_pointer(regs);
156 	/* Replace the return addr with trampoline addr */
157 	procedure_link_pointer_set(regs, trampoline_vaddr);
158 
159 	return orig_ret_vaddr;
160 }
161 
162 int arch_uprobe_exception_notify(struct notifier_block *self,
163 				 unsigned long val, void *data)
164 {
165 	return NOTIFY_DONE;
166 }
167 
168 static int uprobe_breakpoint_handler(struct pt_regs *regs,
169 		unsigned int esr)
170 {
171 	if (uprobe_pre_sstep_notifier(regs))
172 		return DBG_HOOK_HANDLED;
173 
174 	return DBG_HOOK_ERROR;
175 }
176 
177 static int uprobe_single_step_handler(struct pt_regs *regs,
178 		unsigned int esr)
179 {
180 	struct uprobe_task *utask = current->utask;
181 
182 	WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
183 	if (uprobe_post_sstep_notifier(regs))
184 		return DBG_HOOK_HANDLED;
185 
186 	return DBG_HOOK_ERROR;
187 }
188 
189 /* uprobe breakpoint handler hook */
190 static struct break_hook uprobes_break_hook = {
191 	.imm = UPROBES_BRK_IMM,
192 	.fn = uprobe_breakpoint_handler,
193 };
194 
195 /* uprobe single step handler hook */
196 static struct step_hook uprobes_step_hook = {
197 	.fn = uprobe_single_step_handler,
198 };
199 
200 static int __init arch_init_uprobes(void)
201 {
202 	register_user_break_hook(&uprobes_break_hook);
203 	register_user_step_hook(&uprobes_step_hook);
204 
205 	return 0;
206 }
207 
208 device_initcall(arch_init_uprobes);
209