1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * common.c - C code for kernel entry and exit 4 * Copyright (c) 2015 Andrew Lutomirski 5 * 6 * Based on asm and ptrace code by many authors. The code here originated 7 * in ptrace.c and signal.c. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/sched/task_stack.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/errno.h> 16 #include <linux/ptrace.h> 17 #include <linux/tracehook.h> 18 #include <linux/audit.h> 19 #include <linux/seccomp.h> 20 #include <linux/signal.h> 21 #include <linux/export.h> 22 #include <linux/context_tracking.h> 23 #include <linux/user-return-notifier.h> 24 #include <linux/nospec.h> 25 #include <linux/uprobes.h> 26 #include <linux/livepatch.h> 27 #include <linux/syscalls.h> 28 #include <linux/uaccess.h> 29 30 #include <asm/desc.h> 31 #include <asm/traps.h> 32 #include <asm/vdso.h> 33 #include <asm/cpufeature.h> 34 #include <asm/fpu/api.h> 35 #include <asm/nospec-branch.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/syscalls.h> 39 40 #ifdef CONFIG_CONTEXT_TRACKING 41 /* Called on entry from user mode with IRQs off. */ 42 __visible inline void enter_from_user_mode(void) 43 { 44 CT_WARN_ON(ct_state() != CONTEXT_USER); 45 user_exit_irqoff(); 46 } 47 #else 48 static inline void enter_from_user_mode(void) {} 49 #endif 50 51 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) 52 { 53 #ifdef CONFIG_X86_64 54 if (arch == AUDIT_ARCH_X86_64) { 55 audit_syscall_entry(regs->orig_ax, regs->di, 56 regs->si, regs->dx, regs->r10); 57 } else 58 #endif 59 { 60 audit_syscall_entry(regs->orig_ax, regs->bx, 61 regs->cx, regs->dx, regs->si); 62 } 63 } 64 65 /* 66 * Returns the syscall nr to run (which should match regs->orig_ax) or -1 67 * to skip the syscall. 68 */ 69 static long syscall_trace_enter(struct pt_regs *regs) 70 { 71 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; 72 73 struct thread_info *ti = current_thread_info(); 74 unsigned long ret = 0; 75 u32 work; 76 77 if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 78 BUG_ON(regs != task_pt_regs(current)); 79 80 work = READ_ONCE(ti->flags); 81 82 if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) { 83 ret = tracehook_report_syscall_entry(regs); 84 if (ret || (work & _TIF_SYSCALL_EMU)) 85 return -1L; 86 } 87 88 #ifdef CONFIG_SECCOMP 89 /* 90 * Do seccomp after ptrace, to catch any tracer changes. 91 */ 92 if (work & _TIF_SECCOMP) { 93 struct seccomp_data sd; 94 95 sd.arch = arch; 96 sd.nr = regs->orig_ax; 97 sd.instruction_pointer = regs->ip; 98 #ifdef CONFIG_X86_64 99 if (arch == AUDIT_ARCH_X86_64) { 100 sd.args[0] = regs->di; 101 sd.args[1] = regs->si; 102 sd.args[2] = regs->dx; 103 sd.args[3] = regs->r10; 104 sd.args[4] = regs->r8; 105 sd.args[5] = regs->r9; 106 } else 107 #endif 108 { 109 sd.args[0] = regs->bx; 110 sd.args[1] = regs->cx; 111 sd.args[2] = regs->dx; 112 sd.args[3] = regs->si; 113 sd.args[4] = regs->di; 114 sd.args[5] = regs->bp; 115 } 116 117 ret = __secure_computing(&sd); 118 if (ret == -1) 119 return ret; 120 } 121 #endif 122 123 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 124 trace_sys_enter(regs, regs->orig_ax); 125 126 do_audit_syscall_entry(regs, arch); 127 128 return ret ?: regs->orig_ax; 129 } 130 131 #define EXIT_TO_USERMODE_LOOP_FLAGS \ 132 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 133 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) 134 135 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) 136 { 137 /* 138 * In order to return to user mode, we need to have IRQs off with 139 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags 140 * can be set at any time on preemptible kernels if we have IRQs on, 141 * so we need to loop. Disabling preemption wouldn't help: doing the 142 * work to clear some of the flags can sleep. 143 */ 144 while (true) { 145 /* We have work to do. */ 146 local_irq_enable(); 147 148 if (cached_flags & _TIF_NEED_RESCHED) 149 schedule(); 150 151 if (cached_flags & _TIF_UPROBE) 152 uprobe_notify_resume(regs); 153 154 if (cached_flags & _TIF_PATCH_PENDING) 155 klp_update_patch_state(current); 156 157 /* deal with pending signal delivery */ 158 if (cached_flags & _TIF_SIGPENDING) 159 do_signal(regs); 160 161 if (cached_flags & _TIF_NOTIFY_RESUME) { 162 clear_thread_flag(TIF_NOTIFY_RESUME); 163 tracehook_notify_resume(regs); 164 rseq_handle_notify_resume(NULL, regs); 165 } 166 167 if (cached_flags & _TIF_USER_RETURN_NOTIFY) 168 fire_user_return_notifiers(); 169 170 /* Disable IRQs and retry */ 171 local_irq_disable(); 172 173 cached_flags = READ_ONCE(current_thread_info()->flags); 174 175 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) 176 break; 177 } 178 } 179 180 /* Called with IRQs disabled. */ 181 __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) 182 { 183 struct thread_info *ti = current_thread_info(); 184 u32 cached_flags; 185 186 addr_limit_user_check(); 187 188 lockdep_assert_irqs_disabled(); 189 lockdep_sys_exit(); 190 191 cached_flags = READ_ONCE(ti->flags); 192 193 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) 194 exit_to_usermode_loop(regs, cached_flags); 195 196 /* Reload ti->flags; we may have rescheduled above. */ 197 cached_flags = READ_ONCE(ti->flags); 198 199 fpregs_assert_state_consistent(); 200 if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD)) 201 switch_fpu_return(); 202 203 #ifdef CONFIG_COMPAT 204 /* 205 * Compat syscalls set TS_COMPAT. Make sure we clear it before 206 * returning to user mode. We need to clear it *after* signal 207 * handling, because syscall restart has a fixup for compat 208 * syscalls. The fixup is exercised by the ptrace_syscall_32 209 * selftest. 210 * 211 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer 212 * special case only applies after poking regs and before the 213 * very next return to user mode. 214 */ 215 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); 216 #endif 217 218 user_enter_irqoff(); 219 220 mds_user_clear_cpu_buffers(); 221 } 222 223 #define SYSCALL_EXIT_WORK_FLAGS \ 224 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 225 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) 226 227 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags) 228 { 229 bool step; 230 231 audit_syscall_exit(regs); 232 233 if (cached_flags & _TIF_SYSCALL_TRACEPOINT) 234 trace_sys_exit(regs, regs->ax); 235 236 /* 237 * If TIF_SYSCALL_EMU is set, we only get here because of 238 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 239 * We already reported this syscall instruction in 240 * syscall_trace_enter(). 241 */ 242 step = unlikely( 243 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)) 244 == _TIF_SINGLESTEP); 245 if (step || cached_flags & _TIF_SYSCALL_TRACE) 246 tracehook_report_syscall_exit(regs, step); 247 } 248 249 /* 250 * Called with IRQs on and fully valid regs. Returns with IRQs off in a 251 * state such that we can immediately switch to user mode. 252 */ 253 __visible inline void syscall_return_slowpath(struct pt_regs *regs) 254 { 255 struct thread_info *ti = current_thread_info(); 256 u32 cached_flags = READ_ONCE(ti->flags); 257 258 CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 259 260 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && 261 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax)) 262 local_irq_enable(); 263 264 rseq_syscall(regs); 265 266 /* 267 * First do one-time work. If these work items are enabled, we 268 * want to run them exactly once per syscall exit with IRQs on. 269 */ 270 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS)) 271 syscall_slow_exit_work(regs, cached_flags); 272 273 local_irq_disable(); 274 prepare_exit_to_usermode(regs); 275 } 276 277 #ifdef CONFIG_X86_64 278 __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs) 279 { 280 struct thread_info *ti; 281 282 enter_from_user_mode(); 283 local_irq_enable(); 284 ti = current_thread_info(); 285 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) 286 nr = syscall_trace_enter(regs); 287 288 /* 289 * NB: Native and x32 syscalls are dispatched from the same 290 * table. The only functional difference is the x32 bit in 291 * regs->orig_ax, which changes the behavior of some syscalls. 292 */ 293 nr &= __SYSCALL_MASK; 294 if (likely(nr < NR_syscalls)) { 295 nr = array_index_nospec(nr, NR_syscalls); 296 regs->ax = sys_call_table[nr](regs); 297 } 298 299 syscall_return_slowpath(regs); 300 } 301 #endif 302 303 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 304 /* 305 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does 306 * all entry and exit work and returns with IRQs off. This function is 307 * extremely hot in workloads that use it, and it's usually called from 308 * do_fast_syscall_32, so forcibly inline it to improve performance. 309 */ 310 static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) 311 { 312 struct thread_info *ti = current_thread_info(); 313 unsigned int nr = (unsigned int)regs->orig_ax; 314 315 #ifdef CONFIG_IA32_EMULATION 316 ti->status |= TS_COMPAT; 317 #endif 318 319 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { 320 /* 321 * Subtlety here: if ptrace pokes something larger than 322 * 2^32-1 into orig_ax, this truncates it. This may or 323 * may not be necessary, but it matches the old asm 324 * behavior. 325 */ 326 nr = syscall_trace_enter(regs); 327 } 328 329 if (likely(nr < IA32_NR_syscalls)) { 330 nr = array_index_nospec(nr, IA32_NR_syscalls); 331 #ifdef CONFIG_IA32_EMULATION 332 regs->ax = ia32_sys_call_table[nr](regs); 333 #else 334 /* 335 * It's possible that a 32-bit syscall implementation 336 * takes a 64-bit parameter but nonetheless assumes that 337 * the high bits are zero. Make sure we zero-extend all 338 * of the args. 339 */ 340 regs->ax = ia32_sys_call_table[nr]( 341 (unsigned int)regs->bx, (unsigned int)regs->cx, 342 (unsigned int)regs->dx, (unsigned int)regs->si, 343 (unsigned int)regs->di, (unsigned int)regs->bp); 344 #endif /* CONFIG_IA32_EMULATION */ 345 } 346 347 syscall_return_slowpath(regs); 348 } 349 350 /* Handles int $0x80 */ 351 __visible void do_int80_syscall_32(struct pt_regs *regs) 352 { 353 enter_from_user_mode(); 354 local_irq_enable(); 355 do_syscall_32_irqs_on(regs); 356 } 357 358 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */ 359 __visible long do_fast_syscall_32(struct pt_regs *regs) 360 { 361 /* 362 * Called using the internal vDSO SYSENTER/SYSCALL32 calling 363 * convention. Adjust regs so it looks like we entered using int80. 364 */ 365 366 unsigned long landing_pad = (unsigned long)current->mm->context.vdso + 367 vdso_image_32.sym_int80_landing_pad; 368 369 /* 370 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward 371 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction. 372 * Fix it up. 373 */ 374 regs->ip = landing_pad; 375 376 enter_from_user_mode(); 377 378 local_irq_enable(); 379 380 /* Fetch EBP from where the vDSO stashed it. */ 381 if ( 382 #ifdef CONFIG_X86_64 383 /* 384 * Micro-optimization: the pointer we're following is explicitly 385 * 32 bits, so it can't be out of range. 386 */ 387 __get_user(*(u32 *)®s->bp, 388 (u32 __user __force *)(unsigned long)(u32)regs->sp) 389 #else 390 get_user(*(u32 *)®s->bp, 391 (u32 __user __force *)(unsigned long)(u32)regs->sp) 392 #endif 393 ) { 394 395 /* User code screwed up. */ 396 local_irq_disable(); 397 regs->ax = -EFAULT; 398 prepare_exit_to_usermode(regs); 399 return 0; /* Keep it simple: use IRET. */ 400 } 401 402 /* Now this is just like a normal syscall. */ 403 do_syscall_32_irqs_on(regs); 404 405 #ifdef CONFIG_X86_64 406 /* 407 * Opportunistic SYSRETL: if possible, try to return using SYSRETL. 408 * SYSRETL is available on all 64-bit CPUs, so we don't need to 409 * bother with SYSEXIT. 410 * 411 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, 412 * because the ECX fixup above will ensure that this is essentially 413 * never the case. 414 */ 415 return regs->cs == __USER32_CS && regs->ss == __USER_DS && 416 regs->ip == landing_pad && 417 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0; 418 #else 419 /* 420 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT. 421 * 422 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, 423 * because the ECX fixup above will ensure that this is essentially 424 * never the case. 425 * 426 * We don't allow syscalls at all from VM86 mode, but we still 427 * need to check VM, because we might be returning from sys_vm86. 428 */ 429 return static_cpu_has(X86_FEATURE_SEP) && 430 regs->cs == __USER_CS && regs->ss == __USER_DS && 431 regs->ip == landing_pad && 432 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0; 433 #endif 434 } 435 #endif 436