1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2478dc89cSAndy Lutomirski #include <linux/jump_label.h> 38c1f7558SJosh Poimboeuf #include <asm/unwind_hints.h> 48a09317bSDave Hansen #include <asm/cpufeatures.h> 58a09317bSDave Hansen #include <asm/page_types.h> 66fd166aaSPeter Zijlstra #include <asm/percpu.h> 76fd166aaSPeter Zijlstra #include <asm/asm-offsets.h> 86fd166aaSPeter Zijlstra #include <asm/processor-flags.h> 96627eb25SH. Peter Anvin (Intel) #include <asm/ptrace-abi.h> 10*2dbb887eSPeter Zijlstra #include <asm/msr.h> 11*2dbb887eSPeter Zijlstra #include <asm/nospec-branch.h> 12478dc89cSAndy Lutomirski 13d36f9479SIngo Molnar /* 14d36f9479SIngo Molnar 15d36f9479SIngo Molnar x86 function call convention, 64-bit: 16d36f9479SIngo Molnar ------------------------------------- 17d36f9479SIngo Molnar arguments | callee-saved | extra caller-saved | return 18d36f9479SIngo Molnar [callee-clobbered] | | [callee-clobbered] | 19d36f9479SIngo Molnar --------------------------------------------------------------------------- 20d36f9479SIngo Molnar rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] 21d36f9479SIngo Molnar 22d36f9479SIngo Molnar ( rsp is obviously invariant across normal function calls. (gcc can 'merge' 23d36f9479SIngo Molnar functions when it sees tail-call optimization possibilities) rflags is 24d36f9479SIngo Molnar clobbered. Leftover arguments are passed over the stack frame.) 25d36f9479SIngo Molnar 26d36f9479SIngo Molnar [*] In the frame-pointers case rbp is fixed to the stack frame. 27d36f9479SIngo Molnar 28d36f9479SIngo Molnar [**] for struct return values wider than 64 bits the return convention is a 29d36f9479SIngo Molnar bit more complex: up to 128 bits width we return small structures 30d36f9479SIngo Molnar straight in rax, rdx. For structures larger than that (3 words or 31d36f9479SIngo Molnar larger) the caller puts a pointer to an on-stack return struct 32d36f9479SIngo Molnar [allocated in the caller's stack frame] into the first argument - i.e. 33d36f9479SIngo Molnar into rdi. All other arguments shift up by one in this case. 34d36f9479SIngo Molnar Fortunately this case is rare in the kernel. 35d36f9479SIngo Molnar 36d36f9479SIngo Molnar For 32-bit we have the following conventions - kernel is built with 37d36f9479SIngo Molnar -mregparm=3 and -freg-struct-return: 38d36f9479SIngo Molnar 39d36f9479SIngo Molnar x86 function calling convention, 32-bit: 40d36f9479SIngo Molnar ---------------------------------------- 41d36f9479SIngo Molnar arguments | callee-saved | extra caller-saved | return 42d36f9479SIngo Molnar [callee-clobbered] | | [callee-clobbered] | 43d36f9479SIngo Molnar ------------------------------------------------------------------------- 44d36f9479SIngo Molnar eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**] 45d36f9479SIngo Molnar 46d36f9479SIngo Molnar ( here too esp is obviously invariant across normal function calls. eflags 47d36f9479SIngo Molnar is clobbered. Leftover arguments are passed over the stack frame. ) 48d36f9479SIngo Molnar 49d36f9479SIngo Molnar [*] In the frame-pointers case ebp is fixed to the stack frame. 50d36f9479SIngo Molnar 51d36f9479SIngo Molnar [**] We build with -freg-struct-return, which on 32-bit means similar 52d36f9479SIngo Molnar semantics as on 64-bit: edx can be used for a second return value 53d36f9479SIngo Molnar (i.e. covering integer and structure sizes up to 64 bits) - after that 54d36f9479SIngo Molnar it gets more complex and more expensive: 3-word or larger struct returns 55d36f9479SIngo Molnar get done in the caller's frame and the pointer to the return struct goes 56d36f9479SIngo Molnar into regparm0, i.e. eax - the other arguments shift up and the 57d36f9479SIngo Molnar function's register parameters degenerate to regparm=2 in essence. 58d36f9479SIngo Molnar 59d36f9479SIngo Molnar */ 60d36f9479SIngo Molnar 61d36f9479SIngo Molnar #ifdef CONFIG_X86_64 62d36f9479SIngo Molnar 63d36f9479SIngo Molnar /* 64d36f9479SIngo Molnar * 64-bit system call stack frame layout defines and helpers, 65d36f9479SIngo Molnar * for assembly code: 66d36f9479SIngo Molnar */ 67d36f9479SIngo Molnar 68036c07c0SJosh Poimboeuf .macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 699e809d15SDominik Brodowski .if \save_ret 709e809d15SDominik Brodowski pushq %rsi /* pt_regs->si */ 719e809d15SDominik Brodowski movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ 729e809d15SDominik Brodowski movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ 739e809d15SDominik Brodowski .else 743f01daecSDominik Brodowski pushq %rdi /* pt_regs->di */ 753f01daecSDominik Brodowski pushq %rsi /* pt_regs->si */ 769e809d15SDominik Brodowski .endif 7730907fd1SDominik Brodowski pushq \rdx /* pt_regs->dx */ 78036c07c0SJosh Poimboeuf pushq \rcx /* pt_regs->cx */ 7930907fd1SDominik Brodowski pushq \rax /* pt_regs->ax */ 803f01daecSDominik Brodowski pushq %r8 /* pt_regs->r8 */ 813f01daecSDominik Brodowski pushq %r9 /* pt_regs->r9 */ 823f01daecSDominik Brodowski pushq %r10 /* pt_regs->r10 */ 833f01daecSDominik Brodowski pushq %r11 /* pt_regs->r11 */ 843f01daecSDominik Brodowski pushq %rbx /* pt_regs->rbx */ 853f01daecSDominik Brodowski pushq %rbp /* pt_regs->rbp */ 863f01daecSDominik Brodowski pushq %r12 /* pt_regs->r12 */ 873f01daecSDominik Brodowski pushq %r13 /* pt_regs->r13 */ 883f01daecSDominik Brodowski pushq %r14 /* pt_regs->r14 */ 893f01daecSDominik Brodowski pushq %r15 /* pt_regs->r15 */ 903f01daecSDominik Brodowski UNWIND_HINT_REGS 9106a9750eSJosh Poimboeuf 929e809d15SDominik Brodowski .if \save_ret 939e809d15SDominik Brodowski pushq %rsi /* return address on top of stack */ 949e809d15SDominik Brodowski .endif 9529e97589SH. Peter Anvin (Intel) .endm 9606a9750eSJosh Poimboeuf 9729e97589SH. Peter Anvin (Intel) .macro CLEAR_REGS 9806a9750eSJosh Poimboeuf /* 9906a9750eSJosh Poimboeuf * Sanitize registers of values that a speculation attack might 10006a9750eSJosh Poimboeuf * otherwise want to exploit. The lower registers are likely clobbered 10106a9750eSJosh Poimboeuf * well before they could be put to use in a speculative execution 10206a9750eSJosh Poimboeuf * gadget. 10306a9750eSJosh Poimboeuf */ 1048c42819bSPeter Zijlstra xorl %esi, %esi /* nospec si */ 10506a9750eSJosh Poimboeuf xorl %edx, %edx /* nospec dx */ 10606a9750eSJosh Poimboeuf xorl %ecx, %ecx /* nospec cx */ 10706a9750eSJosh Poimboeuf xorl %r8d, %r8d /* nospec r8 */ 10806a9750eSJosh Poimboeuf xorl %r9d, %r9d /* nospec r9 */ 10906a9750eSJosh Poimboeuf xorl %r10d, %r10d /* nospec r10 */ 11006a9750eSJosh Poimboeuf xorl %r11d, %r11d /* nospec r11 */ 11106a9750eSJosh Poimboeuf xorl %ebx, %ebx /* nospec rbx */ 11206a9750eSJosh Poimboeuf xorl %ebp, %ebp /* nospec rbp */ 11306a9750eSJosh Poimboeuf xorl %r12d, %r12d /* nospec r12 */ 11406a9750eSJosh Poimboeuf xorl %r13d, %r13d /* nospec r13 */ 11506a9750eSJosh Poimboeuf xorl %r14d, %r14d /* nospec r14 */ 11606a9750eSJosh Poimboeuf xorl %r15d, %r15d /* nospec r15 */ 11706a9750eSJosh Poimboeuf 1183f01daecSDominik Brodowski .endm 1193f01daecSDominik Brodowski 120036c07c0SJosh Poimboeuf .macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 121036c07c0SJosh Poimboeuf PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret 12229e97589SH. Peter Anvin (Intel) CLEAR_REGS 12329e97589SH. Peter Anvin (Intel) .endm 12429e97589SH. Peter Anvin (Intel) 1251b331eeeSPeter Zijlstra .macro POP_REGS pop_rdi=1 126e872045bSAndy Lutomirski popq %r15 127e872045bSAndy Lutomirski popq %r14 128e872045bSAndy Lutomirski popq %r13 129e872045bSAndy Lutomirski popq %r12 130e872045bSAndy Lutomirski popq %rbp 131e872045bSAndy Lutomirski popq %rbx 132e872045bSAndy Lutomirski popq %r11 133e872045bSAndy Lutomirski popq %r10 134e872045bSAndy Lutomirski popq %r9 135e872045bSAndy Lutomirski popq %r8 136e872045bSAndy Lutomirski popq %rax 137e872045bSAndy Lutomirski popq %rcx 138e872045bSAndy Lutomirski popq %rdx 139e872045bSAndy Lutomirski popq %rsi 140502af0d7SDominik Brodowski .if \pop_rdi 141e872045bSAndy Lutomirski popq %rdi 142502af0d7SDominik Brodowski .endif 143d36f9479SIngo Molnar .endm 144d36f9479SIngo Molnar 1458a09317bSDave Hansen #ifdef CONFIG_PAGE_TABLE_ISOLATION 1468a09317bSDave Hansen 1476fd166aaSPeter Zijlstra /* 1486fd166aaSPeter Zijlstra * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two 1496fd166aaSPeter Zijlstra * halves: 1506fd166aaSPeter Zijlstra */ 151f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_BIT PAGE_SHIFT 152f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) 153f10ee3dcSThomas Gleixner #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT 154f10ee3dcSThomas Gleixner #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) 155f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) 1568a09317bSDave Hansen 1576fd166aaSPeter Zijlstra .macro SET_NOFLUSH_BIT reg:req 1586fd166aaSPeter Zijlstra bts $X86_CR3_PCID_NOFLUSH_BIT, \reg 1598a09317bSDave Hansen .endm 1608a09317bSDave Hansen 1616fd166aaSPeter Zijlstra .macro ADJUST_KERNEL_CR3 reg:req 1626fd166aaSPeter Zijlstra ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID 1636fd166aaSPeter Zijlstra /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ 164f10ee3dcSThomas Gleixner andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg 1658a09317bSDave Hansen .endm 1668a09317bSDave Hansen 1678a09317bSDave Hansen .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 168aa8c6248SThomas Gleixner ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 1698a09317bSDave Hansen mov %cr3, \scratch_reg 1708a09317bSDave Hansen ADJUST_KERNEL_CR3 \scratch_reg 1718a09317bSDave Hansen mov \scratch_reg, %cr3 172aa8c6248SThomas Gleixner .Lend_\@: 1738a09317bSDave Hansen .endm 1748a09317bSDave Hansen 1756fd166aaSPeter Zijlstra #define THIS_CPU_user_pcid_flush_mask \ 1766fd166aaSPeter Zijlstra PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask 1776fd166aaSPeter Zijlstra 1786fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req 179aa8c6248SThomas Gleixner ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 1808a09317bSDave Hansen mov %cr3, \scratch_reg 1816fd166aaSPeter Zijlstra 1826fd166aaSPeter Zijlstra ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID 1836fd166aaSPeter Zijlstra 1846fd166aaSPeter Zijlstra /* 1856fd166aaSPeter Zijlstra * Test if the ASID needs a flush. 1866fd166aaSPeter Zijlstra */ 1876fd166aaSPeter Zijlstra movq \scratch_reg, \scratch_reg2 1886fd166aaSPeter Zijlstra andq $(0x7FF), \scratch_reg /* mask ASID */ 1896fd166aaSPeter Zijlstra bt \scratch_reg, THIS_CPU_user_pcid_flush_mask 1906fd166aaSPeter Zijlstra jnc .Lnoflush_\@ 1916fd166aaSPeter Zijlstra 1926fd166aaSPeter Zijlstra /* Flush needed, clear the bit */ 1936fd166aaSPeter Zijlstra btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 1946fd166aaSPeter Zijlstra movq \scratch_reg2, \scratch_reg 195f10ee3dcSThomas Gleixner jmp .Lwrcr3_pcid_\@ 1966fd166aaSPeter Zijlstra 1976fd166aaSPeter Zijlstra .Lnoflush_\@: 1986fd166aaSPeter Zijlstra movq \scratch_reg2, \scratch_reg 1996fd166aaSPeter Zijlstra SET_NOFLUSH_BIT \scratch_reg 2006fd166aaSPeter Zijlstra 201f10ee3dcSThomas Gleixner .Lwrcr3_pcid_\@: 202f10ee3dcSThomas Gleixner /* Flip the ASID to the user version */ 203f10ee3dcSThomas Gleixner orq $(PTI_USER_PCID_MASK), \scratch_reg 204f10ee3dcSThomas Gleixner 2056fd166aaSPeter Zijlstra .Lwrcr3_\@: 206f10ee3dcSThomas Gleixner /* Flip the PGD to the user version */ 207f10ee3dcSThomas Gleixner orq $(PTI_USER_PGTABLE_MASK), \scratch_reg 2088a09317bSDave Hansen mov \scratch_reg, %cr3 209aa8c6248SThomas Gleixner .Lend_\@: 2108a09317bSDave Hansen .endm 2118a09317bSDave Hansen 2126fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req 2136fd166aaSPeter Zijlstra pushq %rax 2146fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax 2156fd166aaSPeter Zijlstra popq %rax 2166fd166aaSPeter Zijlstra .endm 2176fd166aaSPeter Zijlstra 2188a09317bSDave Hansen .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req 219aa8c6248SThomas Gleixner ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI 2208a09317bSDave Hansen movq %cr3, \scratch_reg 2218a09317bSDave Hansen movq \scratch_reg, \save_reg 2228a09317bSDave Hansen /* 223f10ee3dcSThomas Gleixner * Test the user pagetable bit. If set, then the user page tables 224f10ee3dcSThomas Gleixner * are active. If clear CR3 already has the kernel page table 225f10ee3dcSThomas Gleixner * active. 2268a09317bSDave Hansen */ 227f10ee3dcSThomas Gleixner bt $PTI_USER_PGTABLE_BIT, \scratch_reg 228f10ee3dcSThomas Gleixner jnc .Ldone_\@ 2298a09317bSDave Hansen 2308a09317bSDave Hansen ADJUST_KERNEL_CR3 \scratch_reg 2318a09317bSDave Hansen movq \scratch_reg, %cr3 2328a09317bSDave Hansen 2338a09317bSDave Hansen .Ldone_\@: 2348a09317bSDave Hansen .endm 2358a09317bSDave Hansen 23621e94459SPeter Zijlstra .macro RESTORE_CR3 scratch_reg:req save_reg:req 237aa8c6248SThomas Gleixner ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 23821e94459SPeter Zijlstra 23921e94459SPeter Zijlstra ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID 24021e94459SPeter Zijlstra 24121e94459SPeter Zijlstra /* 24221e94459SPeter Zijlstra * KERNEL pages can always resume with NOFLUSH as we do 24321e94459SPeter Zijlstra * explicit flushes. 24421e94459SPeter Zijlstra */ 245f10ee3dcSThomas Gleixner bt $PTI_USER_PGTABLE_BIT, \save_reg 24621e94459SPeter Zijlstra jnc .Lnoflush_\@ 24721e94459SPeter Zijlstra 24821e94459SPeter Zijlstra /* 24921e94459SPeter Zijlstra * Check if there's a pending flush for the user ASID we're 25021e94459SPeter Zijlstra * about to set. 25121e94459SPeter Zijlstra */ 25221e94459SPeter Zijlstra movq \save_reg, \scratch_reg 25321e94459SPeter Zijlstra andq $(0x7FF), \scratch_reg 25421e94459SPeter Zijlstra bt \scratch_reg, THIS_CPU_user_pcid_flush_mask 25521e94459SPeter Zijlstra jnc .Lnoflush_\@ 25621e94459SPeter Zijlstra 25721e94459SPeter Zijlstra btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 25821e94459SPeter Zijlstra jmp .Lwrcr3_\@ 25921e94459SPeter Zijlstra 26021e94459SPeter Zijlstra .Lnoflush_\@: 26121e94459SPeter Zijlstra SET_NOFLUSH_BIT \save_reg 26221e94459SPeter Zijlstra 26321e94459SPeter Zijlstra .Lwrcr3_\@: 2648a09317bSDave Hansen /* 2658a09317bSDave Hansen * The CR3 write could be avoided when not changing its value, 2668a09317bSDave Hansen * but would require a CR3 read *and* a scratch register. 2678a09317bSDave Hansen */ 2688a09317bSDave Hansen movq \save_reg, %cr3 269aa8c6248SThomas Gleixner .Lend_\@: 2708a09317bSDave Hansen .endm 2718a09317bSDave Hansen 2728a09317bSDave Hansen #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ 2738a09317bSDave Hansen 2748a09317bSDave Hansen .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 2758a09317bSDave Hansen .endm 2766fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req 2776fd166aaSPeter Zijlstra .endm 2786fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req 2798a09317bSDave Hansen .endm 2808a09317bSDave Hansen .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req 2818a09317bSDave Hansen .endm 28221e94459SPeter Zijlstra .macro RESTORE_CR3 scratch_reg:req save_reg:req 2838a09317bSDave Hansen .endm 2848a09317bSDave Hansen 2858a09317bSDave Hansen #endif 2868a09317bSDave Hansen 28718ec54fdSJosh Poimboeuf /* 288*2dbb887eSPeter Zijlstra * IBRS kernel mitigation for Spectre_v2. 289*2dbb887eSPeter Zijlstra * 290*2dbb887eSPeter Zijlstra * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers 291*2dbb887eSPeter Zijlstra * the regs it uses (AX, CX, DX). Must be called before the first RET 292*2dbb887eSPeter Zijlstra * instruction (NOTE! UNTRAIN_RET includes a RET instruction) 293*2dbb887eSPeter Zijlstra * 294*2dbb887eSPeter Zijlstra * The optional argument is used to save/restore the current value, 295*2dbb887eSPeter Zijlstra * which is used on the paranoid paths. 296*2dbb887eSPeter Zijlstra * 297*2dbb887eSPeter Zijlstra * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. 298*2dbb887eSPeter Zijlstra */ 299*2dbb887eSPeter Zijlstra .macro IBRS_ENTER save_reg 300*2dbb887eSPeter Zijlstra ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS 301*2dbb887eSPeter Zijlstra movl $MSR_IA32_SPEC_CTRL, %ecx 302*2dbb887eSPeter Zijlstra 303*2dbb887eSPeter Zijlstra .ifnb \save_reg 304*2dbb887eSPeter Zijlstra rdmsr 305*2dbb887eSPeter Zijlstra shl $32, %rdx 306*2dbb887eSPeter Zijlstra or %rdx, %rax 307*2dbb887eSPeter Zijlstra mov %rax, \save_reg 308*2dbb887eSPeter Zijlstra test $SPEC_CTRL_IBRS, %eax 309*2dbb887eSPeter Zijlstra jz .Ldo_wrmsr_\@ 310*2dbb887eSPeter Zijlstra lfence 311*2dbb887eSPeter Zijlstra jmp .Lend_\@ 312*2dbb887eSPeter Zijlstra .Ldo_wrmsr_\@: 313*2dbb887eSPeter Zijlstra .endif 314*2dbb887eSPeter Zijlstra 315*2dbb887eSPeter Zijlstra movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx 316*2dbb887eSPeter Zijlstra movl %edx, %eax 317*2dbb887eSPeter Zijlstra shr $32, %rdx 318*2dbb887eSPeter Zijlstra wrmsr 319*2dbb887eSPeter Zijlstra .Lend_\@: 320*2dbb887eSPeter Zijlstra .endm 321*2dbb887eSPeter Zijlstra 322*2dbb887eSPeter Zijlstra /* 323*2dbb887eSPeter Zijlstra * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) 324*2dbb887eSPeter Zijlstra * regs. Must be called after the last RET. 325*2dbb887eSPeter Zijlstra */ 326*2dbb887eSPeter Zijlstra .macro IBRS_EXIT save_reg 327*2dbb887eSPeter Zijlstra ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS 328*2dbb887eSPeter Zijlstra movl $MSR_IA32_SPEC_CTRL, %ecx 329*2dbb887eSPeter Zijlstra 330*2dbb887eSPeter Zijlstra .ifnb \save_reg 331*2dbb887eSPeter Zijlstra mov \save_reg, %rdx 332*2dbb887eSPeter Zijlstra .else 333*2dbb887eSPeter Zijlstra movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx 334*2dbb887eSPeter Zijlstra andl $(~SPEC_CTRL_IBRS), %edx 335*2dbb887eSPeter Zijlstra .endif 336*2dbb887eSPeter Zijlstra 337*2dbb887eSPeter Zijlstra movl %edx, %eax 338*2dbb887eSPeter Zijlstra shr $32, %rdx 339*2dbb887eSPeter Zijlstra wrmsr 340*2dbb887eSPeter Zijlstra .Lend_\@: 341*2dbb887eSPeter Zijlstra .endm 342*2dbb887eSPeter Zijlstra 343*2dbb887eSPeter Zijlstra /* 34418ec54fdSJosh Poimboeuf * Mitigate Spectre v1 for conditional swapgs code paths. 34518ec54fdSJosh Poimboeuf * 34618ec54fdSJosh Poimboeuf * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to 34718ec54fdSJosh Poimboeuf * prevent a speculative swapgs when coming from kernel space. 34818ec54fdSJosh Poimboeuf * 34918ec54fdSJosh Poimboeuf * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path, 35018ec54fdSJosh Poimboeuf * to prevent the swapgs from getting speculatively skipped when coming from 35118ec54fdSJosh Poimboeuf * user space. 35218ec54fdSJosh Poimboeuf */ 35318ec54fdSJosh Poimboeuf .macro FENCE_SWAPGS_USER_ENTRY 35418ec54fdSJosh Poimboeuf ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER 35518ec54fdSJosh Poimboeuf .endm 35618ec54fdSJosh Poimboeuf .macro FENCE_SWAPGS_KERNEL_ENTRY 35718ec54fdSJosh Poimboeuf ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL 35818ec54fdSJosh Poimboeuf .endm 35918ec54fdSJosh Poimboeuf 360afaef01cSAlexander Popov .macro STACKLEAK_ERASE_NOCLOBBER 361afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 362afaef01cSAlexander Popov PUSH_AND_CLEAR_REGS 363afaef01cSAlexander Popov call stackleak_erase 364afaef01cSAlexander Popov POP_REGS 365afaef01cSAlexander Popov #endif 366afaef01cSAlexander Popov .endm 367afaef01cSAlexander Popov 368c82965f9SChang S. Bae .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req 369c82965f9SChang S. Bae rdgsbase \save_reg 370c82965f9SChang S. Bae GET_PERCPU_BASE \scratch_reg 371c82965f9SChang S. Bae wrgsbase \scratch_reg 372c82965f9SChang S. Bae .endm 373c82965f9SChang S. Bae 374633260faSThomas Gleixner #else /* CONFIG_X86_64 */ 375633260faSThomas Gleixner # undef UNWIND_HINT_IRET_REGS 376633260faSThomas Gleixner # define UNWIND_HINT_IRET_REGS 377633260faSThomas Gleixner #endif /* !CONFIG_X86_64 */ 378d36f9479SIngo Molnar 379afaef01cSAlexander Popov .macro STACKLEAK_ERASE 380afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 381afaef01cSAlexander Popov call stackleak_erase 382afaef01cSAlexander Popov #endif 383afaef01cSAlexander Popov .endm 384eaad9812SChang S. Bae 385eaad9812SChang S. Bae #ifdef CONFIG_SMP 386eaad9812SChang S. Bae 387eaad9812SChang S. Bae /* 388eaad9812SChang S. Bae * CPU/node NR is loaded from the limit (size) field of a special segment 389eaad9812SChang S. Bae * descriptor entry in GDT. 390eaad9812SChang S. Bae */ 391eaad9812SChang S. Bae .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req 392eaad9812SChang S. Bae movq $__CPUNODE_SEG, \reg 393eaad9812SChang S. Bae lsl \reg, \reg 394eaad9812SChang S. Bae .endm 395eaad9812SChang S. Bae 396eaad9812SChang S. Bae /* 397eaad9812SChang S. Bae * Fetch the per-CPU GSBASE value for this processor and put it in @reg. 398eaad9812SChang S. Bae * We normally use %gs for accessing per-CPU data, but we are setting up 399eaad9812SChang S. Bae * %gs here and obviously can not use %gs itself to access per-CPU data. 4006a3ea3e6SSean Christopherson * 4016a3ea3e6SSean Christopherson * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and 4026a3ea3e6SSean Christopherson * may not restore the host's value until the CPU returns to userspace. 4036a3ea3e6SSean Christopherson * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives 4046a3ea3e6SSean Christopherson * while running KVM's run loop. 405eaad9812SChang S. Bae */ 406eaad9812SChang S. Bae .macro GET_PERCPU_BASE reg:req 4076a3ea3e6SSean Christopherson LOAD_CPU_AND_NODE_SEG_LIMIT \reg 408eaad9812SChang S. Bae andq $VDSO_CPUNODE_MASK, \reg 409eaad9812SChang S. Bae movq __per_cpu_offset(, \reg, 8), \reg 410eaad9812SChang S. Bae .endm 411eaad9812SChang S. Bae 412eaad9812SChang S. Bae #else 413eaad9812SChang S. Bae 414eaad9812SChang S. Bae .macro GET_PERCPU_BASE reg:req 415eaad9812SChang S. Bae movq pcpu_unit_offsets(%rip), \reg 416eaad9812SChang S. Bae .endm 417eaad9812SChang S. Bae 418eaad9812SChang S. Bae #endif /* CONFIG_SMP */ 419