1 /*
2  * arch/arm64/kernel/probes/decode-insn.c
3  *
4  * Copyright (C) 2013 Linaro Limited.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/kallsyms.h>
20 #include <asm/kprobes.h>
21 #include <asm/insn.h>
22 #include <asm/sections.h>
23 
24 #include "decode-insn.h"
25 #include "simulate-insn.h"
26 
27 static bool __kprobes aarch64_insn_is_steppable(u32 insn)
28 {
29 	/*
30 	 * Branch instructions will write a new value into the PC which is
31 	 * likely to be relative to the XOL address and therefore invalid.
32 	 * Deliberate generation of an exception during stepping is also not
33 	 * currently safe. Lastly, MSR instructions can do any number of nasty
34 	 * things we can't handle during single-stepping.
35 	 */
36 	if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
37 		if (aarch64_insn_is_branch(insn) ||
38 		    aarch64_insn_is_msr_imm(insn) ||
39 		    aarch64_insn_is_msr_reg(insn) ||
40 		    aarch64_insn_is_exception(insn) ||
41 		    aarch64_insn_is_eret(insn))
42 			return false;
43 
44 		/*
45 		 * The MRS instruction may not return a correct value when
46 		 * executing in the single-stepping environment. We do make one
47 		 * exception, for reading the DAIF bits.
48 		 */
49 		if (aarch64_insn_is_mrs(insn))
50 			return aarch64_insn_extract_system_reg(insn)
51 			     != AARCH64_INSN_SPCLREG_DAIF;
52 
53 		/*
54 		 * The HINT instruction is is problematic when single-stepping,
55 		 * except for the NOP case.
56 		 */
57 		if (aarch64_insn_is_hint(insn))
58 			return aarch64_insn_is_nop(insn);
59 
60 		return true;
61 	}
62 
63 	/*
64 	 * Instructions which load PC relative literals are not going to work
65 	 * when executed from an XOL slot. Instructions doing an exclusive
66 	 * load/store are not going to complete successfully when single-step
67 	 * exception handling happens in the middle of the sequence.
68 	 */
69 	if (aarch64_insn_uses_literal(insn) ||
70 	    aarch64_insn_is_exclusive(insn))
71 		return false;
72 
73 	return true;
74 }
75 
76 /* Return:
77  *   INSN_REJECTED     If instruction is one not allowed to kprobe,
78  *   INSN_GOOD         If instruction is supported and uses instruction slot,
79  *   INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
80  */
81 static enum kprobe_insn __kprobes
82 arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
83 {
84 	/*
85 	 * Instructions reading or modifying the PC won't work from the XOL
86 	 * slot.
87 	 */
88 	if (aarch64_insn_is_steppable(insn))
89 		return INSN_GOOD;
90 
91 	if (aarch64_insn_is_bcond(insn)) {
92 		asi->handler = simulate_b_cond;
93 	} else if (aarch64_insn_is_cbz(insn) ||
94 	    aarch64_insn_is_cbnz(insn)) {
95 		asi->handler = simulate_cbz_cbnz;
96 	} else if (aarch64_insn_is_tbz(insn) ||
97 	    aarch64_insn_is_tbnz(insn)) {
98 		asi->handler = simulate_tbz_tbnz;
99 	} else if (aarch64_insn_is_adr_adrp(insn)) {
100 		asi->handler = simulate_adr_adrp;
101 	} else if (aarch64_insn_is_b(insn) ||
102 	    aarch64_insn_is_bl(insn)) {
103 		asi->handler = simulate_b_bl;
104 	} else if (aarch64_insn_is_br(insn) ||
105 	    aarch64_insn_is_blr(insn) ||
106 	    aarch64_insn_is_ret(insn)) {
107 		asi->handler = simulate_br_blr_ret;
108 	} else if (aarch64_insn_is_ldr_lit(insn)) {
109 		asi->handler = simulate_ldr_literal;
110 	} else if (aarch64_insn_is_ldrsw_lit(insn)) {
111 		asi->handler = simulate_ldrsw_literal;
112 	} else {
113 		/*
114 		 * Instruction cannot be stepped out-of-line and we don't
115 		 * (yet) simulate it.
116 		 */
117 		return INSN_REJECTED;
118 	}
119 
120 	return INSN_GOOD_NO_SLOT;
121 }
122 
123 static bool __kprobes
124 is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
125 {
126 	while (scan_start >= scan_end) {
127 		/*
128 		 * atomic region starts from exclusive load and ends with
129 		 * exclusive store.
130 		 */
131 		if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
132 			return false;
133 		else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
134 			return true;
135 		scan_start--;
136 	}
137 
138 	return false;
139 }
140 
141 enum kprobe_insn __kprobes
142 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
143 {
144 	enum kprobe_insn decoded;
145 	kprobe_opcode_t insn = le32_to_cpu(*addr);
146 	kprobe_opcode_t *scan_end = NULL;
147 	unsigned long size = 0, offset = 0;
148 
149 	/*
150 	 * If there's a symbol defined in front of and near enough to
151 	 * the probe address assume it is the entry point to this
152 	 * code and use it to further limit how far back we search
153 	 * when determining if we're in an atomic sequence. If we could
154 	 * not find any symbol skip the atomic test altogether as we
155 	 * could otherwise end up searching irrelevant text/literals.
156 	 * KPROBES depends on KALLSYMS so this last case should never
157 	 * happen.
158 	 */
159 	if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) {
160 		if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t)))
161 			scan_end = addr - (offset / sizeof(kprobe_opcode_t));
162 		else
163 			scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
164 	}
165 	decoded = arm_probe_decode_insn(insn, asi);
166 
167 	if (decoded != INSN_REJECTED && scan_end)
168 		if (is_probed_address_atomic(addr - 1, scan_end))
169 			return INSN_REJECTED;
170 
171 	return decoded;
172 }
173