1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * arch/arm64/kernel/probes/decode-insn.c 4 * 5 * Copyright (C) 2013 Linaro Limited. 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/kprobes.h> 10 #include <linux/module.h> 11 #include <linux/kallsyms.h> 12 #include <asm/insn.h> 13 #include <asm/sections.h> 14 15 #include "decode-insn.h" 16 #include "simulate-insn.h" 17 18 static bool __kprobes aarch64_insn_is_steppable(u32 insn) 19 { 20 /* 21 * Branch instructions will write a new value into the PC which is 22 * likely to be relative to the XOL address and therefore invalid. 23 * Deliberate generation of an exception during stepping is also not 24 * currently safe. Lastly, MSR instructions can do any number of nasty 25 * things we can't handle during single-stepping. 26 */ 27 if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) { 28 if (aarch64_insn_is_branch(insn) || 29 aarch64_insn_is_msr_imm(insn) || 30 aarch64_insn_is_msr_reg(insn) || 31 aarch64_insn_is_exception(insn) || 32 aarch64_insn_is_eret(insn) || 33 aarch64_insn_is_eret_auth(insn)) 34 return false; 35 36 /* 37 * The MRS instruction may not return a correct value when 38 * executing in the single-stepping environment. We do make one 39 * exception, for reading the DAIF bits. 40 */ 41 if (aarch64_insn_is_mrs(insn)) 42 return aarch64_insn_extract_system_reg(insn) 43 != AARCH64_INSN_SPCLREG_DAIF; 44 45 /* 46 * The HINT instruction is steppable only if it is in whitelist 47 * and the rest of other such instructions are blocked for 48 * single stepping as they may cause exception or other 49 * unintended behaviour. 50 */ 51 if (aarch64_insn_is_hint(insn)) 52 return aarch64_insn_is_steppable_hint(insn); 53 54 return true; 55 } 56 57 /* 58 * Instructions which load PC relative literals are not going to work 59 * when executed from an XOL slot. Instructions doing an exclusive 60 * load/store are not going to complete successfully when single-step 61 * exception handling happens in the middle of the sequence. 62 */ 63 if (aarch64_insn_uses_literal(insn) || 64 aarch64_insn_is_exclusive(insn)) 65 return false; 66 67 return true; 68 } 69 70 /* Return: 71 * INSN_REJECTED If instruction is one not allowed to kprobe, 72 * INSN_GOOD If instruction is supported and uses instruction slot, 73 * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. 74 */ 75 enum probe_insn __kprobes 76 arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api) 77 { 78 /* 79 * Instructions reading or modifying the PC won't work from the XOL 80 * slot. 81 */ 82 if (aarch64_insn_is_steppable(insn)) 83 return INSN_GOOD; 84 85 if (aarch64_insn_is_bcond(insn)) { 86 api->handler = simulate_b_cond; 87 } else if (aarch64_insn_is_cbz(insn) || 88 aarch64_insn_is_cbnz(insn)) { 89 api->handler = simulate_cbz_cbnz; 90 } else if (aarch64_insn_is_tbz(insn) || 91 aarch64_insn_is_tbnz(insn)) { 92 api->handler = simulate_tbz_tbnz; 93 } else if (aarch64_insn_is_adr_adrp(insn)) { 94 api->handler = simulate_adr_adrp; 95 } else if (aarch64_insn_is_b(insn) || 96 aarch64_insn_is_bl(insn)) { 97 api->handler = simulate_b_bl; 98 } else if (aarch64_insn_is_br(insn) || 99 aarch64_insn_is_blr(insn) || 100 aarch64_insn_is_ret(insn)) { 101 api->handler = simulate_br_blr_ret; 102 } else if (aarch64_insn_is_ldr_lit(insn)) { 103 api->handler = simulate_ldr_literal; 104 } else if (aarch64_insn_is_ldrsw_lit(insn)) { 105 api->handler = simulate_ldrsw_literal; 106 } else { 107 /* 108 * Instruction cannot be stepped out-of-line and we don't 109 * (yet) simulate it. 110 */ 111 return INSN_REJECTED; 112 } 113 114 return INSN_GOOD_NO_SLOT; 115 } 116 117 #ifdef CONFIG_KPROBES 118 static bool __kprobes 119 is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) 120 { 121 while (scan_start >= scan_end) { 122 /* 123 * atomic region starts from exclusive load and ends with 124 * exclusive store. 125 */ 126 if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start))) 127 return false; 128 else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start))) 129 return true; 130 scan_start--; 131 } 132 133 return false; 134 } 135 136 enum probe_insn __kprobes 137 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) 138 { 139 enum probe_insn decoded; 140 probe_opcode_t insn = le32_to_cpu(*addr); 141 probe_opcode_t *scan_end = NULL; 142 unsigned long size = 0, offset = 0; 143 144 /* 145 * If there's a symbol defined in front of and near enough to 146 * the probe address assume it is the entry point to this 147 * code and use it to further limit how far back we search 148 * when determining if we're in an atomic sequence. If we could 149 * not find any symbol skip the atomic test altogether as we 150 * could otherwise end up searching irrelevant text/literals. 151 * KPROBES depends on KALLSYMS so this last case should never 152 * happen. 153 */ 154 if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) { 155 if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t))) 156 scan_end = addr - (offset / sizeof(kprobe_opcode_t)); 157 else 158 scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; 159 } 160 decoded = arm_probe_decode_insn(insn, &asi->api); 161 162 if (decoded != INSN_REJECTED && scan_end) 163 if (is_probed_address_atomic(addr - 1, scan_end)) 164 return INSN_REJECTED; 165 166 return decoded; 167 } 168 #endif 169