1/* SPDX-License-Identifier: GPL-2.0 */ 2 3#include <linux/stringify.h> 4#include <linux/linkage.h> 5#include <asm/dwarf2.h> 6#include <asm/cpufeatures.h> 7#include <asm/alternative.h> 8#include <asm/asm-offsets.h> 9#include <asm/export.h> 10#include <asm/nospec-branch.h> 11#include <asm/unwind_hints.h> 12#include <asm/percpu.h> 13#include <asm/frame.h> 14#include <asm/nops.h> 15 16 .section .text.__x86.indirect_thunk 17 18 19.macro POLINE reg 20 ANNOTATE_INTRA_FUNCTION_CALL 21 call .Ldo_rop_\@ 22 int3 23.Ldo_rop_\@: 24 mov %\reg, (%_ASM_SP) 25 UNWIND_HINT_FUNC 26.endm 27 28.macro RETPOLINE reg 29 POLINE \reg 30 RET 31.endm 32 33.macro THUNK reg 34 35 .align RETPOLINE_THUNK_SIZE 36SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) 37 UNWIND_HINT_UNDEFINED 38 ANNOTATE_NOENDBR 39 40 ALTERNATIVE_2 __stringify(RETPOLINE \reg), \ 41 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \ 42 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE) 43 44.endm 45 46/* 47 * Despite being an assembler file we can't just use .irp here 48 * because __KSYM_DEPS__ only uses the C preprocessor and would 49 * only see one instance of "__x86_indirect_thunk_\reg" rather 50 * than one per register with the correct names. So we do it 51 * the simple and nasty way... 52 * 53 * Worse, you can only have a single EXPORT_SYMBOL per line, 54 * and CPP can't insert newlines, so we have to repeat everything 55 * at least twice. 56 */ 57 58#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) 59 60 .align RETPOLINE_THUNK_SIZE 61SYM_CODE_START(__x86_indirect_thunk_array) 62 63#define GEN(reg) THUNK reg 64#include <asm/GEN-for-each-reg.h> 65#undef GEN 66 67 .align RETPOLINE_THUNK_SIZE 68SYM_CODE_END(__x86_indirect_thunk_array) 69 70#define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) 71#include <asm/GEN-for-each-reg.h> 72#undef GEN 73 74#ifdef CONFIG_CALL_DEPTH_TRACKING 75.macro CALL_THUNK reg 76 .align RETPOLINE_THUNK_SIZE 77 78SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL) 79 UNWIND_HINT_UNDEFINED 80 ANNOTATE_NOENDBR 81 82 CALL_DEPTH_ACCOUNT 83 POLINE \reg 84 ANNOTATE_UNRET_SAFE 85 ret 86 int3 87.endm 88 89 .align RETPOLINE_THUNK_SIZE 90SYM_CODE_START(__x86_indirect_call_thunk_array) 91 92#define GEN(reg) CALL_THUNK reg 93#include <asm/GEN-for-each-reg.h> 94#undef GEN 95 96 .align RETPOLINE_THUNK_SIZE 97SYM_CODE_END(__x86_indirect_call_thunk_array) 98 99#define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg) 100#include <asm/GEN-for-each-reg.h> 101#undef GEN 102 103.macro JUMP_THUNK reg 104 .align RETPOLINE_THUNK_SIZE 105 106SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL) 107 UNWIND_HINT_UNDEFINED 108 ANNOTATE_NOENDBR 109 POLINE \reg 110 ANNOTATE_UNRET_SAFE 111 ret 112 int3 113.endm 114 115 .align RETPOLINE_THUNK_SIZE 116SYM_CODE_START(__x86_indirect_jump_thunk_array) 117 118#define GEN(reg) JUMP_THUNK reg 119#include <asm/GEN-for-each-reg.h> 120#undef GEN 121 122 .align RETPOLINE_THUNK_SIZE 123SYM_CODE_END(__x86_indirect_jump_thunk_array) 124 125#define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg) 126#include <asm/GEN-for-each-reg.h> 127#undef GEN 128#endif 129/* 130 * This function name is magical and is used by -mfunction-return=thunk-extern 131 * for the compiler to generate JMPs to it. 132 */ 133#ifdef CONFIG_RETHUNK 134 135/* 136 * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at 137 * special addresses: 138 * 139 * - srso_untrain_ret_alias() is 2M aligned 140 * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 141 * and 20 in its virtual address are set (while those bits in the 142 * srso_untrain_ret_alias() function are cleared). 143 * 144 * This guarantees that those two addresses will alias in the branch 145 * target buffer of Zen3/4 generations, leading to any potential 146 * poisoned entries at that BTB slot to get evicted. 147 * 148 * As a result, srso_safe_ret_alias() becomes a safe return. 149 */ 150#ifdef CONFIG_CPU_SRSO 151 .section .text.__x86.rethunk_untrain 152 153SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) 154 ANNOTATE_NOENDBR 155 ASM_NOP2 156 lfence 157 jmp __x86_return_thunk 158SYM_FUNC_END(srso_untrain_ret_alias) 159__EXPORT_THUNK(srso_untrain_ret_alias) 160 161 .section .text.__x86.rethunk_safe 162#endif 163 164/* Needs a definition for the __x86_return_thunk alternative below. */ 165SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) 166#ifdef CONFIG_CPU_SRSO 167 add $8, %_ASM_SP 168 UNWIND_HINT_FUNC 169#endif 170 ANNOTATE_UNRET_SAFE 171 ret 172 int3 173SYM_FUNC_END(srso_safe_ret_alias) 174 175 .section .text.__x86.return_thunk 176 177/* 178 * Safety details here pertain to the AMD Zen{1,2} microarchitecture: 179 * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for 180 * alignment within the BTB. 181 * 2) The instruction at zen_untrain_ret must contain, and not 182 * end with, the 0xc3 byte of the RET. 183 * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread 184 * from re-poisioning the BTB prediction. 185 */ 186 .align 64 187 .skip 64 - (__ret - zen_untrain_ret), 0xcc 188SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) 189 ANNOTATE_NOENDBR 190 /* 191 * As executed from zen_untrain_ret, this is: 192 * 193 * TEST $0xcc, %bl 194 * LFENCE 195 * JMP __x86_return_thunk 196 * 197 * Executing the TEST instruction has a side effect of evicting any BTB 198 * prediction (potentially attacker controlled) attached to the RET, as 199 * __x86_return_thunk + 1 isn't an instruction boundary at the moment. 200 */ 201 .byte 0xf6 202 203 /* 204 * As executed from __x86_return_thunk, this is a plain RET. 205 * 206 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. 207 * 208 * We subsequently jump backwards and architecturally execute the RET. 209 * This creates a correct BTB prediction (type=ret), but in the 210 * meantime we suffer Straight Line Speculation (because the type was 211 * no branch) which is halted by the INT3. 212 * 213 * With SMT enabled and STIBP active, a sibling thread cannot poison 214 * RET's prediction to a type of its choice, but can evict the 215 * prediction due to competitive sharing. If the prediction is 216 * evicted, __x86_return_thunk will suffer Straight Line Speculation 217 * which will be contained safely by the INT3. 218 */ 219SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) 220 ret 221 int3 222SYM_CODE_END(__ret) 223 224 /* 225 * Ensure the TEST decoding / BTB invalidation is complete. 226 */ 227 lfence 228 229 /* 230 * Jump back and execute the RET in the middle of the TEST instruction. 231 * INT3 is for SLS protection. 232 */ 233 jmp __ret 234 int3 235SYM_FUNC_END(zen_untrain_ret) 236__EXPORT_THUNK(zen_untrain_ret) 237 238/* 239 * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() 240 * above. On kernel entry, srso_untrain_ret() is executed which is a 241 * 242 * movabs $0xccccccc308c48348,%rax 243 * 244 * and when the return thunk executes the inner label srso_safe_ret() 245 * later, it is a stack manipulation and a RET which is mispredicted and 246 * thus a "safe" one to use. 247 */ 248 .align 64 249 .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc 250SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) 251 ANNOTATE_NOENDBR 252 .byte 0x48, 0xb8 253 254SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) 255 add $8, %_ASM_SP 256 ret 257 int3 258 int3 259 int3 260 lfence 261 call srso_safe_ret 262 int3 263SYM_CODE_END(srso_safe_ret) 264SYM_FUNC_END(srso_untrain_ret) 265__EXPORT_THUNK(srso_untrain_ret) 266 267SYM_FUNC_START(__x86_return_thunk) 268 ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ 269 "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS 270 int3 271SYM_CODE_END(__x86_return_thunk) 272EXPORT_SYMBOL(__x86_return_thunk) 273 274#endif /* CONFIG_RETHUNK */ 275 276#ifdef CONFIG_CALL_DEPTH_TRACKING 277 278 .align 64 279SYM_FUNC_START(__x86_return_skl) 280 ANNOTATE_NOENDBR 281 /* 282 * Keep the hotpath in a 16byte I-fetch for the non-debug 283 * case. 284 */ 285 CALL_THUNKS_DEBUG_INC_RETS 286 shlq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth) 287 jz 1f 288 ANNOTATE_UNRET_SAFE 289 ret 290 int3 2911: 292 CALL_THUNKS_DEBUG_INC_STUFFS 293 .rept 16 294 ANNOTATE_INTRA_FUNCTION_CALL 295 call 2f 296 int3 2972: 298 .endr 299 add $(8*16), %rsp 300 301 CREDIT_CALL_DEPTH 302 303 ANNOTATE_UNRET_SAFE 304 ret 305 int3 306SYM_FUNC_END(__x86_return_skl) 307 308#endif /* CONFIG_CALL_DEPTH_TRACKING */ 309