1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/jump_label.h> 3 #include <asm/unwind_hints.h> 4 #include <asm/cpufeatures.h> 5 #include <asm/page_types.h> 6 #include <asm/percpu.h> 7 #include <asm/asm-offsets.h> 8 #include <asm/processor-flags.h> 9 #include <asm/ptrace-abi.h> 10 #include <asm/msr.h> 11 #include <asm/nospec-branch.h> 12 13 /* 14 15 x86 function call convention, 64-bit: 16 ------------------------------------- 17 arguments | callee-saved | extra caller-saved | return 18 [callee-clobbered] | | [callee-clobbered] | 19 --------------------------------------------------------------------------- 20 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] 21 22 ( rsp is obviously invariant across normal function calls. (gcc can 'merge' 23 functions when it sees tail-call optimization possibilities) rflags is 24 clobbered. Leftover arguments are passed over the stack frame.) 25 26 [*] In the frame-pointers case rbp is fixed to the stack frame. 27 28 [**] for struct return values wider than 64 bits the return convention is a 29 bit more complex: up to 128 bits width we return small structures 30 straight in rax, rdx. For structures larger than that (3 words or 31 larger) the caller puts a pointer to an on-stack return struct 32 [allocated in the caller's stack frame] into the first argument - i.e. 33 into rdi. All other arguments shift up by one in this case. 34 Fortunately this case is rare in the kernel. 35 36 For 32-bit we have the following conventions - kernel is built with 37 -mregparm=3 and -freg-struct-return: 38 39 x86 function calling convention, 32-bit: 40 ---------------------------------------- 41 arguments | callee-saved | extra caller-saved | return 42 [callee-clobbered] | | [callee-clobbered] | 43 ------------------------------------------------------------------------- 44 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**] 45 46 ( here too esp is obviously invariant across normal function calls. eflags 47 is clobbered. Leftover arguments are passed over the stack frame. ) 48 49 [*] In the frame-pointers case ebp is fixed to the stack frame. 50 51 [**] We build with -freg-struct-return, which on 32-bit means similar 52 semantics as on 64-bit: edx can be used for a second return value 53 (i.e. covering integer and structure sizes up to 64 bits) - after that 54 it gets more complex and more expensive: 3-word or larger struct returns 55 get done in the caller's frame and the pointer to the return struct goes 56 into regparm0, i.e. eax - the other arguments shift up and the 57 function's register parameters degenerate to regparm=2 in essence. 58 59 */ 60 61 #ifdef CONFIG_X86_64 62 63 /* 64 * 64-bit system call stack frame layout defines and helpers, 65 * for assembly code: 66 */ 67 68 .macro PUSH_REGS rdx=%rdx rax=%rax save_ret=0 69 .if \save_ret 70 pushq %rsi /* pt_regs->si */ 71 movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ 72 movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ 73 .else 74 pushq %rdi /* pt_regs->di */ 75 pushq %rsi /* pt_regs->si */ 76 .endif 77 pushq \rdx /* pt_regs->dx */ 78 pushq %rcx /* pt_regs->cx */ 79 pushq \rax /* pt_regs->ax */ 80 pushq %r8 /* pt_regs->r8 */ 81 pushq %r9 /* pt_regs->r9 */ 82 pushq %r10 /* pt_regs->r10 */ 83 pushq %r11 /* pt_regs->r11 */ 84 pushq %rbx /* pt_regs->rbx */ 85 pushq %rbp /* pt_regs->rbp */ 86 pushq %r12 /* pt_regs->r12 */ 87 pushq %r13 /* pt_regs->r13 */ 88 pushq %r14 /* pt_regs->r14 */ 89 pushq %r15 /* pt_regs->r15 */ 90 UNWIND_HINT_REGS 91 92 .if \save_ret 93 pushq %rsi /* return address on top of stack */ 94 .endif 95 .endm 96 97 .macro CLEAR_REGS 98 /* 99 * Sanitize registers of values that a speculation attack might 100 * otherwise want to exploit. The lower registers are likely clobbered 101 * well before they could be put to use in a speculative execution 102 * gadget. 103 */ 104 xorl %edx, %edx /* nospec dx */ 105 xorl %ecx, %ecx /* nospec cx */ 106 xorl %r8d, %r8d /* nospec r8 */ 107 xorl %r9d, %r9d /* nospec r9 */ 108 xorl %r10d, %r10d /* nospec r10 */ 109 xorl %r11d, %r11d /* nospec r11 */ 110 xorl %ebx, %ebx /* nospec rbx */ 111 xorl %ebp, %ebp /* nospec rbp */ 112 xorl %r12d, %r12d /* nospec r12 */ 113 xorl %r13d, %r13d /* nospec r13 */ 114 xorl %r14d, %r14d /* nospec r14 */ 115 xorl %r15d, %r15d /* nospec r15 */ 116 117 .endm 118 119 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 120 PUSH_REGS rdx=\rdx, rax=\rax, save_ret=\save_ret 121 CLEAR_REGS 122 .endm 123 124 .macro POP_REGS pop_rdi=1 125 popq %r15 126 popq %r14 127 popq %r13 128 popq %r12 129 popq %rbp 130 popq %rbx 131 popq %r11 132 popq %r10 133 popq %r9 134 popq %r8 135 popq %rax 136 popq %rcx 137 popq %rdx 138 popq %rsi 139 .if \pop_rdi 140 popq %rdi 141 .endif 142 .endm 143 144 #ifdef CONFIG_PAGE_TABLE_ISOLATION 145 146 /* 147 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two 148 * halves: 149 */ 150 #define PTI_USER_PGTABLE_BIT PAGE_SHIFT 151 #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) 152 #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT 153 #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) 154 #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) 155 156 .macro SET_NOFLUSH_BIT reg:req 157 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg 158 .endm 159 160 .macro ADJUST_KERNEL_CR3 reg:req 161 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID 162 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ 163 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg 164 .endm 165 166 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 167 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 168 mov %cr3, \scratch_reg 169 ADJUST_KERNEL_CR3 \scratch_reg 170 mov \scratch_reg, %cr3 171 .Lend_\@: 172 .endm 173 174 #define THIS_CPU_user_pcid_flush_mask \ 175 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask 176 177 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req 178 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 179 mov %cr3, \scratch_reg 180 181 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID 182 183 /* 184 * Test if the ASID needs a flush. 185 */ 186 movq \scratch_reg, \scratch_reg2 187 andq $(0x7FF), \scratch_reg /* mask ASID */ 188 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask 189 jnc .Lnoflush_\@ 190 191 /* Flush needed, clear the bit */ 192 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 193 movq \scratch_reg2, \scratch_reg 194 jmp .Lwrcr3_pcid_\@ 195 196 .Lnoflush_\@: 197 movq \scratch_reg2, \scratch_reg 198 SET_NOFLUSH_BIT \scratch_reg 199 200 .Lwrcr3_pcid_\@: 201 /* Flip the ASID to the user version */ 202 orq $(PTI_USER_PCID_MASK), \scratch_reg 203 204 .Lwrcr3_\@: 205 /* Flip the PGD to the user version */ 206 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg 207 mov \scratch_reg, %cr3 208 .Lend_\@: 209 .endm 210 211 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req 212 pushq %rax 213 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax 214 popq %rax 215 .endm 216 217 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req 218 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI 219 movq %cr3, \scratch_reg 220 movq \scratch_reg, \save_reg 221 /* 222 * Test the user pagetable bit. If set, then the user page tables 223 * are active. If clear CR3 already has the kernel page table 224 * active. 225 */ 226 bt $PTI_USER_PGTABLE_BIT, \scratch_reg 227 jnc .Ldone_\@ 228 229 ADJUST_KERNEL_CR3 \scratch_reg 230 movq \scratch_reg, %cr3 231 232 .Ldone_\@: 233 .endm 234 235 .macro RESTORE_CR3 scratch_reg:req save_reg:req 236 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 237 238 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID 239 240 /* 241 * KERNEL pages can always resume with NOFLUSH as we do 242 * explicit flushes. 243 */ 244 bt $PTI_USER_PGTABLE_BIT, \save_reg 245 jnc .Lnoflush_\@ 246 247 /* 248 * Check if there's a pending flush for the user ASID we're 249 * about to set. 250 */ 251 movq \save_reg, \scratch_reg 252 andq $(0x7FF), \scratch_reg 253 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask 254 jnc .Lnoflush_\@ 255 256 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 257 jmp .Lwrcr3_\@ 258 259 .Lnoflush_\@: 260 SET_NOFLUSH_BIT \save_reg 261 262 .Lwrcr3_\@: 263 /* 264 * The CR3 write could be avoided when not changing its value, 265 * but would require a CR3 read *and* a scratch register. 266 */ 267 movq \save_reg, %cr3 268 .Lend_\@: 269 .endm 270 271 #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ 272 273 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 274 .endm 275 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req 276 .endm 277 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req 278 .endm 279 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req 280 .endm 281 .macro RESTORE_CR3 scratch_reg:req save_reg:req 282 .endm 283 284 #endif 285 286 /* 287 * IBRS kernel mitigation for Spectre_v2. 288 * 289 * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers 290 * the regs it uses (AX, CX, DX). Must be called before the first RET 291 * instruction (NOTE! UNTRAIN_RET includes a RET instruction) 292 * 293 * The optional argument is used to save/restore the current value, 294 * which is used on the paranoid paths. 295 * 296 * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. 297 */ 298 .macro IBRS_ENTER save_reg 299 #ifdef CONFIG_CPU_IBRS_ENTRY 300 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS 301 movl $MSR_IA32_SPEC_CTRL, %ecx 302 303 .ifnb \save_reg 304 rdmsr 305 shl $32, %rdx 306 or %rdx, %rax 307 mov %rax, \save_reg 308 test $SPEC_CTRL_IBRS, %eax 309 jz .Ldo_wrmsr_\@ 310 lfence 311 jmp .Lend_\@ 312 .Ldo_wrmsr_\@: 313 .endif 314 315 movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx 316 movl %edx, %eax 317 shr $32, %rdx 318 wrmsr 319 .Lend_\@: 320 #endif 321 .endm 322 323 /* 324 * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) 325 * regs. Must be called after the last RET. 326 */ 327 .macro IBRS_EXIT save_reg 328 #ifdef CONFIG_CPU_IBRS_ENTRY 329 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS 330 movl $MSR_IA32_SPEC_CTRL, %ecx 331 332 .ifnb \save_reg 333 mov \save_reg, %rdx 334 .else 335 movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx 336 andl $(~SPEC_CTRL_IBRS), %edx 337 .endif 338 339 movl %edx, %eax 340 shr $32, %rdx 341 wrmsr 342 .Lend_\@: 343 #endif 344 .endm 345 346 /* 347 * Mitigate Spectre v1 for conditional swapgs code paths. 348 * 349 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to 350 * prevent a speculative swapgs when coming from kernel space. 351 * 352 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path, 353 * to prevent the swapgs from getting speculatively skipped when coming from 354 * user space. 355 */ 356 .macro FENCE_SWAPGS_USER_ENTRY 357 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER 358 .endm 359 .macro FENCE_SWAPGS_KERNEL_ENTRY 360 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL 361 .endm 362 363 .macro STACKLEAK_ERASE_NOCLOBBER 364 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 365 PUSH_AND_CLEAR_REGS 366 call stackleak_erase 367 POP_REGS 368 #endif 369 .endm 370 371 .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req 372 rdgsbase \save_reg 373 GET_PERCPU_BASE \scratch_reg 374 wrgsbase \scratch_reg 375 .endm 376 377 #else /* CONFIG_X86_64 */ 378 # undef UNWIND_HINT_IRET_REGS 379 # define UNWIND_HINT_IRET_REGS 380 #endif /* !CONFIG_X86_64 */ 381 382 .macro STACKLEAK_ERASE 383 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 384 call stackleak_erase 385 #endif 386 .endm 387 388 #ifdef CONFIG_SMP 389 390 /* 391 * CPU/node NR is loaded from the limit (size) field of a special segment 392 * descriptor entry in GDT. 393 */ 394 .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req 395 movq $__CPUNODE_SEG, \reg 396 lsl \reg, \reg 397 .endm 398 399 /* 400 * Fetch the per-CPU GSBASE value for this processor and put it in @reg. 401 * We normally use %gs for accessing per-CPU data, but we are setting up 402 * %gs here and obviously can not use %gs itself to access per-CPU data. 403 * 404 * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and 405 * may not restore the host's value until the CPU returns to userspace. 406 * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives 407 * while running KVM's run loop. 408 */ 409 .macro GET_PERCPU_BASE reg:req 410 LOAD_CPU_AND_NODE_SEG_LIMIT \reg 411 andq $VDSO_CPUNODE_MASK, \reg 412 movq __per_cpu_offset(, \reg, 8), \reg 413 .endm 414 415 #else 416 417 .macro GET_PERCPU_BASE reg:req 418 movq pcpu_unit_offsets(%rip), \reg 419 .endm 420 421 #endif /* CONFIG_SMP */ 422