1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/jump_label.h> 3 #include <asm/unwind_hints.h> 4 #include <asm/cpufeatures.h> 5 #include <asm/page_types.h> 6 #include <asm/percpu.h> 7 #include <asm/asm-offsets.h> 8 #include <asm/processor-flags.h> 9 #include <asm/ptrace-abi.h> 10 11 /* 12 13 x86 function call convention, 64-bit: 14 ------------------------------------- 15 arguments | callee-saved | extra caller-saved | return 16 [callee-clobbered] | | [callee-clobbered] | 17 --------------------------------------------------------------------------- 18 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] 19 20 ( rsp is obviously invariant across normal function calls. (gcc can 'merge' 21 functions when it sees tail-call optimization possibilities) rflags is 22 clobbered. Leftover arguments are passed over the stack frame.) 23 24 [*] In the frame-pointers case rbp is fixed to the stack frame. 25 26 [**] for struct return values wider than 64 bits the return convention is a 27 bit more complex: up to 128 bits width we return small structures 28 straight in rax, rdx. For structures larger than that (3 words or 29 larger) the caller puts a pointer to an on-stack return struct 30 [allocated in the caller's stack frame] into the first argument - i.e. 31 into rdi. All other arguments shift up by one in this case. 32 Fortunately this case is rare in the kernel. 33 34 For 32-bit we have the following conventions - kernel is built with 35 -mregparm=3 and -freg-struct-return: 36 37 x86 function calling convention, 32-bit: 38 ---------------------------------------- 39 arguments | callee-saved | extra caller-saved | return 40 [callee-clobbered] | | [callee-clobbered] | 41 ------------------------------------------------------------------------- 42 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**] 43 44 ( here too esp is obviously invariant across normal function calls. eflags 45 is clobbered. Leftover arguments are passed over the stack frame. ) 46 47 [*] In the frame-pointers case ebp is fixed to the stack frame. 48 49 [**] We build with -freg-struct-return, which on 32-bit means similar 50 semantics as on 64-bit: edx can be used for a second return value 51 (i.e. covering integer and structure sizes up to 64 bits) - after that 52 it gets more complex and more expensive: 3-word or larger struct returns 53 get done in the caller's frame and the pointer to the return struct goes 54 into regparm0, i.e. eax - the other arguments shift up and the 55 function's register parameters degenerate to regparm=2 in essence. 56 57 */ 58 59 #ifdef CONFIG_X86_64 60 61 /* 62 * 64-bit system call stack frame layout defines and helpers, 63 * for assembly code: 64 */ 65 66 .macro PUSH_REGS rdx=%rdx rax=%rax save_ret=0 67 .if \save_ret 68 pushq %rsi /* pt_regs->si */ 69 movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ 70 movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ 71 .else 72 pushq %rdi /* pt_regs->di */ 73 pushq %rsi /* pt_regs->si */ 74 .endif 75 pushq \rdx /* pt_regs->dx */ 76 pushq %rcx /* pt_regs->cx */ 77 pushq \rax /* pt_regs->ax */ 78 pushq %r8 /* pt_regs->r8 */ 79 pushq %r9 /* pt_regs->r9 */ 80 pushq %r10 /* pt_regs->r10 */ 81 pushq %r11 /* pt_regs->r11 */ 82 pushq %rbx /* pt_regs->rbx */ 83 pushq %rbp /* pt_regs->rbp */ 84 pushq %r12 /* pt_regs->r12 */ 85 pushq %r13 /* pt_regs->r13 */ 86 pushq %r14 /* pt_regs->r14 */ 87 pushq %r15 /* pt_regs->r15 */ 88 UNWIND_HINT_REGS 89 90 .if \save_ret 91 pushq %rsi /* return address on top of stack */ 92 .endif 93 .endm 94 95 .macro CLEAR_REGS 96 /* 97 * Sanitize registers of values that a speculation attack might 98 * otherwise want to exploit. The lower registers are likely clobbered 99 * well before they could be put to use in a speculative execution 100 * gadget. 101 */ 102 xorl %edx, %edx /* nospec dx */ 103 xorl %ecx, %ecx /* nospec cx */ 104 xorl %r8d, %r8d /* nospec r8 */ 105 xorl %r9d, %r9d /* nospec r9 */ 106 xorl %r10d, %r10d /* nospec r10 */ 107 xorl %r11d, %r11d /* nospec r11 */ 108 xorl %ebx, %ebx /* nospec rbx */ 109 xorl %ebp, %ebp /* nospec rbp */ 110 xorl %r12d, %r12d /* nospec r12 */ 111 xorl %r13d, %r13d /* nospec r13 */ 112 xorl %r14d, %r14d /* nospec r14 */ 113 xorl %r15d, %r15d /* nospec r15 */ 114 115 .endm 116 117 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 118 PUSH_REGS rdx=\rdx, rax=\rax, save_ret=\save_ret 119 CLEAR_REGS 120 .endm 121 122 .macro POP_REGS pop_rdi=1 123 popq %r15 124 popq %r14 125 popq %r13 126 popq %r12 127 popq %rbp 128 popq %rbx 129 popq %r11 130 popq %r10 131 popq %r9 132 popq %r8 133 popq %rax 134 popq %rcx 135 popq %rdx 136 popq %rsi 137 .if \pop_rdi 138 popq %rdi 139 .endif 140 .endm 141 142 #ifdef CONFIG_PAGE_TABLE_ISOLATION 143 144 /* 145 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two 146 * halves: 147 */ 148 #define PTI_USER_PGTABLE_BIT PAGE_SHIFT 149 #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) 150 #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT 151 #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) 152 #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) 153 154 .macro SET_NOFLUSH_BIT reg:req 155 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg 156 .endm 157 158 .macro ADJUST_KERNEL_CR3 reg:req 159 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID 160 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ 161 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg 162 .endm 163 164 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 165 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 166 mov %cr3, \scratch_reg 167 ADJUST_KERNEL_CR3 \scratch_reg 168 mov \scratch_reg, %cr3 169 .Lend_\@: 170 .endm 171 172 #define THIS_CPU_user_pcid_flush_mask \ 173 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask 174 175 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req 176 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 177 mov %cr3, \scratch_reg 178 179 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID 180 181 /* 182 * Test if the ASID needs a flush. 183 */ 184 movq \scratch_reg, \scratch_reg2 185 andq $(0x7FF), \scratch_reg /* mask ASID */ 186 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask 187 jnc .Lnoflush_\@ 188 189 /* Flush needed, clear the bit */ 190 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 191 movq \scratch_reg2, \scratch_reg 192 jmp .Lwrcr3_pcid_\@ 193 194 .Lnoflush_\@: 195 movq \scratch_reg2, \scratch_reg 196 SET_NOFLUSH_BIT \scratch_reg 197 198 .Lwrcr3_pcid_\@: 199 /* Flip the ASID to the user version */ 200 orq $(PTI_USER_PCID_MASK), \scratch_reg 201 202 .Lwrcr3_\@: 203 /* Flip the PGD to the user version */ 204 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg 205 mov \scratch_reg, %cr3 206 .Lend_\@: 207 .endm 208 209 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req 210 pushq %rax 211 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax 212 popq %rax 213 .endm 214 215 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req 216 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI 217 movq %cr3, \scratch_reg 218 movq \scratch_reg, \save_reg 219 /* 220 * Test the user pagetable bit. If set, then the user page tables 221 * are active. If clear CR3 already has the kernel page table 222 * active. 223 */ 224 bt $PTI_USER_PGTABLE_BIT, \scratch_reg 225 jnc .Ldone_\@ 226 227 ADJUST_KERNEL_CR3 \scratch_reg 228 movq \scratch_reg, %cr3 229 230 .Ldone_\@: 231 .endm 232 233 .macro RESTORE_CR3 scratch_reg:req save_reg:req 234 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 235 236 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID 237 238 /* 239 * KERNEL pages can always resume with NOFLUSH as we do 240 * explicit flushes. 241 */ 242 bt $PTI_USER_PGTABLE_BIT, \save_reg 243 jnc .Lnoflush_\@ 244 245 /* 246 * Check if there's a pending flush for the user ASID we're 247 * about to set. 248 */ 249 movq \save_reg, \scratch_reg 250 andq $(0x7FF), \scratch_reg 251 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask 252 jnc .Lnoflush_\@ 253 254 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 255 jmp .Lwrcr3_\@ 256 257 .Lnoflush_\@: 258 SET_NOFLUSH_BIT \save_reg 259 260 .Lwrcr3_\@: 261 /* 262 * The CR3 write could be avoided when not changing its value, 263 * but would require a CR3 read *and* a scratch register. 264 */ 265 movq \save_reg, %cr3 266 .Lend_\@: 267 .endm 268 269 #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ 270 271 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 272 .endm 273 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req 274 .endm 275 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req 276 .endm 277 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req 278 .endm 279 .macro RESTORE_CR3 scratch_reg:req save_reg:req 280 .endm 281 282 #endif 283 284 /* 285 * Mitigate Spectre v1 for conditional swapgs code paths. 286 * 287 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to 288 * prevent a speculative swapgs when coming from kernel space. 289 * 290 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path, 291 * to prevent the swapgs from getting speculatively skipped when coming from 292 * user space. 293 */ 294 .macro FENCE_SWAPGS_USER_ENTRY 295 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER 296 .endm 297 .macro FENCE_SWAPGS_KERNEL_ENTRY 298 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL 299 .endm 300 301 .macro STACKLEAK_ERASE_NOCLOBBER 302 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 303 PUSH_AND_CLEAR_REGS 304 call stackleak_erase 305 POP_REGS 306 #endif 307 .endm 308 309 .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req 310 rdgsbase \save_reg 311 GET_PERCPU_BASE \scratch_reg 312 wrgsbase \scratch_reg 313 .endm 314 315 #else /* CONFIG_X86_64 */ 316 # undef UNWIND_HINT_IRET_REGS 317 # define UNWIND_HINT_IRET_REGS 318 #endif /* !CONFIG_X86_64 */ 319 320 .macro STACKLEAK_ERASE 321 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 322 call stackleak_erase 323 #endif 324 .endm 325 326 #ifdef CONFIG_SMP 327 328 /* 329 * CPU/node NR is loaded from the limit (size) field of a special segment 330 * descriptor entry in GDT. 331 */ 332 .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req 333 movq $__CPUNODE_SEG, \reg 334 lsl \reg, \reg 335 .endm 336 337 /* 338 * Fetch the per-CPU GSBASE value for this processor and put it in @reg. 339 * We normally use %gs for accessing per-CPU data, but we are setting up 340 * %gs here and obviously can not use %gs itself to access per-CPU data. 341 * 342 * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and 343 * may not restore the host's value until the CPU returns to userspace. 344 * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives 345 * while running KVM's run loop. 346 */ 347 .macro GET_PERCPU_BASE reg:req 348 LOAD_CPU_AND_NODE_SEG_LIMIT \reg 349 andq $VDSO_CPUNODE_MASK, \reg 350 movq __per_cpu_offset(, \reg, 8), \reg 351 .endm 352 353 #else 354 355 .macro GET_PERCPU_BASE reg:req 356 movq pcpu_unit_offsets(%rip), \reg 357 .endm 358 359 #endif /* CONFIG_SMP */ 360