1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/jump_label.h> 3 #include <asm/unwind_hints.h> 4 #include <asm/cpufeatures.h> 5 #include <asm/page_types.h> 6 #include <asm/percpu.h> 7 #include <asm/asm-offsets.h> 8 #include <asm/processor-flags.h> 9 10 /* 11 12 x86 function call convention, 64-bit: 13 ------------------------------------- 14 arguments | callee-saved | extra caller-saved | return 15 [callee-clobbered] | | [callee-clobbered] | 16 --------------------------------------------------------------------------- 17 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] 18 19 ( rsp is obviously invariant across normal function calls. (gcc can 'merge' 20 functions when it sees tail-call optimization possibilities) rflags is 21 clobbered. Leftover arguments are passed over the stack frame.) 22 23 [*] In the frame-pointers case rbp is fixed to the stack frame. 24 25 [**] for struct return values wider than 64 bits the return convention is a 26 bit more complex: up to 128 bits width we return small structures 27 straight in rax, rdx. For structures larger than that (3 words or 28 larger) the caller puts a pointer to an on-stack return struct 29 [allocated in the caller's stack frame] into the first argument - i.e. 30 into rdi. All other arguments shift up by one in this case. 31 Fortunately this case is rare in the kernel. 32 33 For 32-bit we have the following conventions - kernel is built with 34 -mregparm=3 and -freg-struct-return: 35 36 x86 function calling convention, 32-bit: 37 ---------------------------------------- 38 arguments | callee-saved | extra caller-saved | return 39 [callee-clobbered] | | [callee-clobbered] | 40 ------------------------------------------------------------------------- 41 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**] 42 43 ( here too esp is obviously invariant across normal function calls. eflags 44 is clobbered. Leftover arguments are passed over the stack frame. ) 45 46 [*] In the frame-pointers case ebp is fixed to the stack frame. 47 48 [**] We build with -freg-struct-return, which on 32-bit means similar 49 semantics as on 64-bit: edx can be used for a second return value 50 (i.e. covering integer and structure sizes up to 64 bits) - after that 51 it gets more complex and more expensive: 3-word or larger struct returns 52 get done in the caller's frame and the pointer to the return struct goes 53 into regparm0, i.e. eax - the other arguments shift up and the 54 function's register parameters degenerate to regparm=2 in essence. 55 56 */ 57 58 #ifdef CONFIG_X86_64 59 60 /* 61 * 64-bit system call stack frame layout defines and helpers, 62 * for assembly code: 63 */ 64 65 /* The layout forms the "struct pt_regs" on the stack: */ 66 /* 67 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry 68 * unless syscall needs a complete, fully filled "struct pt_regs". 69 */ 70 #define R15 0*8 71 #define R14 1*8 72 #define R13 2*8 73 #define R12 3*8 74 #define RBP 4*8 75 #define RBX 5*8 76 /* These regs are callee-clobbered. Always saved on kernel entry. */ 77 #define R11 6*8 78 #define R10 7*8 79 #define R9 8*8 80 #define R8 9*8 81 #define RAX 10*8 82 #define RCX 11*8 83 #define RDX 12*8 84 #define RSI 13*8 85 #define RDI 14*8 86 /* 87 * On syscall entry, this is syscall#. On CPU exception, this is error code. 88 * On hw interrupt, it's IRQ number: 89 */ 90 #define ORIG_RAX 15*8 91 /* Return frame for iretq */ 92 #define RIP 16*8 93 #define CS 17*8 94 #define EFLAGS 18*8 95 #define RSP 19*8 96 #define SS 20*8 97 98 #define SIZEOF_PTREGS 21*8 99 100 .macro ALLOC_PT_GPREGS_ON_STACK 101 addq $-(15*8), %rsp 102 .endm 103 104 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 105 .if \r11 106 movq %r11, 6*8+\offset(%rsp) 107 .endif 108 .if \r8910 109 movq %r10, 7*8+\offset(%rsp) 110 movq %r9, 8*8+\offset(%rsp) 111 movq %r8, 9*8+\offset(%rsp) 112 .endif 113 .if \rax 114 movq %rax, 10*8+\offset(%rsp) 115 .endif 116 .if \rcx 117 movq %rcx, 11*8+\offset(%rsp) 118 .endif 119 movq %rdx, 12*8+\offset(%rsp) 120 movq %rsi, 13*8+\offset(%rsp) 121 movq %rdi, 14*8+\offset(%rsp) 122 UNWIND_HINT_REGS offset=\offset extra=0 123 .endm 124 .macro SAVE_C_REGS offset=0 125 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 126 .endm 127 .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0 128 SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1 129 .endm 130 .macro SAVE_C_REGS_EXCEPT_R891011 131 SAVE_C_REGS_HELPER 0, 1, 1, 0, 0 132 .endm 133 .macro SAVE_C_REGS_EXCEPT_RCX_R891011 134 SAVE_C_REGS_HELPER 0, 1, 0, 0, 0 135 .endm 136 .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11 137 SAVE_C_REGS_HELPER 0, 0, 0, 1, 0 138 .endm 139 140 .macro SAVE_EXTRA_REGS offset=0 141 movq %r15, 0*8+\offset(%rsp) 142 movq %r14, 1*8+\offset(%rsp) 143 movq %r13, 2*8+\offset(%rsp) 144 movq %r12, 3*8+\offset(%rsp) 145 movq %rbp, 4*8+\offset(%rsp) 146 movq %rbx, 5*8+\offset(%rsp) 147 UNWIND_HINT_REGS offset=\offset 148 .endm 149 150 .macro POP_EXTRA_REGS 151 popq %r15 152 popq %r14 153 popq %r13 154 popq %r12 155 popq %rbp 156 popq %rbx 157 .endm 158 159 .macro POP_C_REGS 160 popq %r11 161 popq %r10 162 popq %r9 163 popq %r8 164 popq %rax 165 popq %rcx 166 popq %rdx 167 popq %rsi 168 popq %rdi 169 .endm 170 171 .macro icebp 172 .byte 0xf1 173 .endm 174 175 /* 176 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The 177 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding 178 * is just setting the LSB, which makes it an invalid stack address and is also 179 * a signal to the unwinder that it's a pt_regs pointer in disguise. 180 * 181 * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts 182 * the original rbp. 183 */ 184 .macro ENCODE_FRAME_POINTER ptregs_offset=0 185 #ifdef CONFIG_FRAME_POINTER 186 .if \ptregs_offset 187 leaq \ptregs_offset(%rsp), %rbp 188 .else 189 mov %rsp, %rbp 190 .endif 191 orq $0x1, %rbp 192 #endif 193 .endm 194 195 #ifdef CONFIG_PAGE_TABLE_ISOLATION 196 197 /* 198 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two 199 * halves: 200 */ 201 #define PTI_SWITCH_PGTABLES_MASK (1<<PAGE_SHIFT) 202 #define PTI_SWITCH_MASK (PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT)) 203 204 .macro SET_NOFLUSH_BIT reg:req 205 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg 206 .endm 207 208 .macro ADJUST_KERNEL_CR3 reg:req 209 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID 210 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ 211 andq $(~PTI_SWITCH_MASK), \reg 212 .endm 213 214 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 215 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 216 mov %cr3, \scratch_reg 217 ADJUST_KERNEL_CR3 \scratch_reg 218 mov \scratch_reg, %cr3 219 .Lend_\@: 220 .endm 221 222 #define THIS_CPU_user_pcid_flush_mask \ 223 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask 224 225 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req 226 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 227 mov %cr3, \scratch_reg 228 229 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID 230 231 /* 232 * Test if the ASID needs a flush. 233 */ 234 movq \scratch_reg, \scratch_reg2 235 andq $(0x7FF), \scratch_reg /* mask ASID */ 236 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask 237 jnc .Lnoflush_\@ 238 239 /* Flush needed, clear the bit */ 240 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 241 movq \scratch_reg2, \scratch_reg 242 jmp .Lwrcr3_\@ 243 244 .Lnoflush_\@: 245 movq \scratch_reg2, \scratch_reg 246 SET_NOFLUSH_BIT \scratch_reg 247 248 .Lwrcr3_\@: 249 /* Flip the PGD and ASID to the user version */ 250 orq $(PTI_SWITCH_MASK), \scratch_reg 251 mov \scratch_reg, %cr3 252 .Lend_\@: 253 .endm 254 255 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req 256 pushq %rax 257 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax 258 popq %rax 259 .endm 260 261 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req 262 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI 263 movq %cr3, \scratch_reg 264 movq \scratch_reg, \save_reg 265 /* 266 * Is the "switch mask" all zero? That means that both of 267 * these are zero: 268 * 269 * 1. The user/kernel PCID bit, and 270 * 2. The user/kernel "bit" that points CR3 to the 271 * bottom half of the 8k PGD 272 * 273 * That indicates a kernel CR3 value, not a user CR3. 274 */ 275 testq $(PTI_SWITCH_MASK), \scratch_reg 276 jz .Ldone_\@ 277 278 ADJUST_KERNEL_CR3 \scratch_reg 279 movq \scratch_reg, %cr3 280 281 .Ldone_\@: 282 .endm 283 284 .macro RESTORE_CR3 scratch_reg:req save_reg:req 285 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 286 287 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID 288 289 /* 290 * KERNEL pages can always resume with NOFLUSH as we do 291 * explicit flushes. 292 */ 293 bt $X86_CR3_PTI_SWITCH_BIT, \save_reg 294 jnc .Lnoflush_\@ 295 296 /* 297 * Check if there's a pending flush for the user ASID we're 298 * about to set. 299 */ 300 movq \save_reg, \scratch_reg 301 andq $(0x7FF), \scratch_reg 302 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask 303 jnc .Lnoflush_\@ 304 305 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 306 jmp .Lwrcr3_\@ 307 308 .Lnoflush_\@: 309 SET_NOFLUSH_BIT \save_reg 310 311 .Lwrcr3_\@: 312 /* 313 * The CR3 write could be avoided when not changing its value, 314 * but would require a CR3 read *and* a scratch register. 315 */ 316 movq \save_reg, %cr3 317 .Lend_\@: 318 .endm 319 320 #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ 321 322 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 323 .endm 324 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req 325 .endm 326 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req 327 .endm 328 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req 329 .endm 330 .macro RESTORE_CR3 scratch_reg:req save_reg:req 331 .endm 332 333 #endif 334 335 #endif /* CONFIG_X86_64 */ 336 337 /* 338 * This does 'call enter_from_user_mode' unless we can avoid it based on 339 * kernel config or using the static jump infrastructure. 340 */ 341 .macro CALL_enter_from_user_mode 342 #ifdef CONFIG_CONTEXT_TRACKING 343 #ifdef HAVE_JUMP_LABEL 344 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 345 #endif 346 call enter_from_user_mode 347 .Lafter_call_\@: 348 #endif 349 .endm 350