1 /* 2 * common.c - C code for kernel entry and exit 3 * Copyright (c) 2015 Andrew Lutomirski 4 * GPL v2 5 * 6 * Based on asm and ptrace code by many authors. The code here originated 7 * in ptrace.c and signal.c. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/sched/task_stack.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/errno.h> 16 #include <linux/ptrace.h> 17 #include <linux/tracehook.h> 18 #include <linux/audit.h> 19 #include <linux/seccomp.h> 20 #include <linux/signal.h> 21 #include <linux/export.h> 22 #include <linux/context_tracking.h> 23 #include <linux/user-return-notifier.h> 24 #include <linux/nospec.h> 25 #include <linux/uprobes.h> 26 #include <linux/livepatch.h> 27 #include <linux/syscalls.h> 28 29 #include <asm/desc.h> 30 #include <asm/traps.h> 31 #include <asm/vdso.h> 32 #include <linux/uaccess.h> 33 #include <asm/cpufeature.h> 34 35 #define CREATE_TRACE_POINTS 36 #include <trace/events/syscalls.h> 37 38 #ifdef CONFIG_CONTEXT_TRACKING 39 /* Called on entry from user mode with IRQs off. */ 40 __visible inline void enter_from_user_mode(void) 41 { 42 CT_WARN_ON(ct_state() != CONTEXT_USER); 43 user_exit_irqoff(); 44 } 45 #else 46 static inline void enter_from_user_mode(void) {} 47 #endif 48 49 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) 50 { 51 #ifdef CONFIG_X86_64 52 if (arch == AUDIT_ARCH_X86_64) { 53 audit_syscall_entry(regs->orig_ax, regs->di, 54 regs->si, regs->dx, regs->r10); 55 } else 56 #endif 57 { 58 audit_syscall_entry(regs->orig_ax, regs->bx, 59 regs->cx, regs->dx, regs->si); 60 } 61 } 62 63 /* 64 * Returns the syscall nr to run (which should match regs->orig_ax) or -1 65 * to skip the syscall. 66 */ 67 static long syscall_trace_enter(struct pt_regs *regs) 68 { 69 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; 70 71 struct thread_info *ti = current_thread_info(); 72 unsigned long ret = 0; 73 bool emulated = false; 74 u32 work; 75 76 if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 77 BUG_ON(regs != task_pt_regs(current)); 78 79 work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; 80 81 if (unlikely(work & _TIF_SYSCALL_EMU)) 82 emulated = true; 83 84 if ((emulated || (work & _TIF_SYSCALL_TRACE)) && 85 tracehook_report_syscall_entry(regs)) 86 return -1L; 87 88 if (emulated) 89 return -1L; 90 91 #ifdef CONFIG_SECCOMP 92 /* 93 * Do seccomp after ptrace, to catch any tracer changes. 94 */ 95 if (work & _TIF_SECCOMP) { 96 struct seccomp_data sd; 97 98 sd.arch = arch; 99 sd.nr = regs->orig_ax; 100 sd.instruction_pointer = regs->ip; 101 #ifdef CONFIG_X86_64 102 if (arch == AUDIT_ARCH_X86_64) { 103 sd.args[0] = regs->di; 104 sd.args[1] = regs->si; 105 sd.args[2] = regs->dx; 106 sd.args[3] = regs->r10; 107 sd.args[4] = regs->r8; 108 sd.args[5] = regs->r9; 109 } else 110 #endif 111 { 112 sd.args[0] = regs->bx; 113 sd.args[1] = regs->cx; 114 sd.args[2] = regs->dx; 115 sd.args[3] = regs->si; 116 sd.args[4] = regs->di; 117 sd.args[5] = regs->bp; 118 } 119 120 ret = __secure_computing(&sd); 121 if (ret == -1) 122 return ret; 123 } 124 #endif 125 126 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 127 trace_sys_enter(regs, regs->orig_ax); 128 129 do_audit_syscall_entry(regs, arch); 130 131 return ret ?: regs->orig_ax; 132 } 133 134 #define EXIT_TO_USERMODE_LOOP_FLAGS \ 135 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 136 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) 137 138 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) 139 { 140 /* 141 * In order to return to user mode, we need to have IRQs off with 142 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags 143 * can be set at any time on preemptable kernels if we have IRQs on, 144 * so we need to loop. Disabling preemption wouldn't help: doing the 145 * work to clear some of the flags can sleep. 146 */ 147 while (true) { 148 /* We have work to do. */ 149 local_irq_enable(); 150 151 if (cached_flags & _TIF_NEED_RESCHED) 152 schedule(); 153 154 if (cached_flags & _TIF_UPROBE) 155 uprobe_notify_resume(regs); 156 157 if (cached_flags & _TIF_PATCH_PENDING) 158 klp_update_patch_state(current); 159 160 /* deal with pending signal delivery */ 161 if (cached_flags & _TIF_SIGPENDING) 162 do_signal(regs); 163 164 if (cached_flags & _TIF_NOTIFY_RESUME) { 165 clear_thread_flag(TIF_NOTIFY_RESUME); 166 tracehook_notify_resume(regs); 167 } 168 169 if (cached_flags & _TIF_USER_RETURN_NOTIFY) 170 fire_user_return_notifiers(); 171 172 /* Disable IRQs and retry */ 173 local_irq_disable(); 174 175 cached_flags = READ_ONCE(current_thread_info()->flags); 176 177 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) 178 break; 179 } 180 } 181 182 /* Called with IRQs disabled. */ 183 __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) 184 { 185 struct thread_info *ti = current_thread_info(); 186 u32 cached_flags; 187 188 addr_limit_user_check(); 189 190 lockdep_assert_irqs_disabled(); 191 lockdep_sys_exit(); 192 193 cached_flags = READ_ONCE(ti->flags); 194 195 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) 196 exit_to_usermode_loop(regs, cached_flags); 197 198 #ifdef CONFIG_COMPAT 199 /* 200 * Compat syscalls set TS_COMPAT. Make sure we clear it before 201 * returning to user mode. We need to clear it *after* signal 202 * handling, because syscall restart has a fixup for compat 203 * syscalls. The fixup is exercised by the ptrace_syscall_32 204 * selftest. 205 * 206 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer 207 * special case only applies after poking regs and before the 208 * very next return to user mode. 209 */ 210 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); 211 #endif 212 213 user_enter_irqoff(); 214 } 215 216 #define SYSCALL_EXIT_WORK_FLAGS \ 217 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 218 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) 219 220 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags) 221 { 222 bool step; 223 224 audit_syscall_exit(regs); 225 226 if (cached_flags & _TIF_SYSCALL_TRACEPOINT) 227 trace_sys_exit(regs, regs->ax); 228 229 /* 230 * If TIF_SYSCALL_EMU is set, we only get here because of 231 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 232 * We already reported this syscall instruction in 233 * syscall_trace_enter(). 234 */ 235 step = unlikely( 236 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)) 237 == _TIF_SINGLESTEP); 238 if (step || cached_flags & _TIF_SYSCALL_TRACE) 239 tracehook_report_syscall_exit(regs, step); 240 } 241 242 /* 243 * Called with IRQs on and fully valid regs. Returns with IRQs off in a 244 * state such that we can immediately switch to user mode. 245 */ 246 __visible inline void syscall_return_slowpath(struct pt_regs *regs) 247 { 248 struct thread_info *ti = current_thread_info(); 249 u32 cached_flags = READ_ONCE(ti->flags); 250 251 CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 252 253 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && 254 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax)) 255 local_irq_enable(); 256 257 /* 258 * First do one-time work. If these work items are enabled, we 259 * want to run them exactly once per syscall exit with IRQs on. 260 */ 261 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS)) 262 syscall_slow_exit_work(regs, cached_flags); 263 264 local_irq_disable(); 265 prepare_exit_to_usermode(regs); 266 } 267 268 #ifdef CONFIG_X86_64 269 __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs) 270 { 271 struct thread_info *ti; 272 273 enter_from_user_mode(); 274 local_irq_enable(); 275 ti = current_thread_info(); 276 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) 277 nr = syscall_trace_enter(regs); 278 279 /* 280 * NB: Native and x32 syscalls are dispatched from the same 281 * table. The only functional difference is the x32 bit in 282 * regs->orig_ax, which changes the behavior of some syscalls. 283 */ 284 nr &= __SYSCALL_MASK; 285 if (likely(nr < NR_syscalls)) { 286 nr = array_index_nospec(nr, NR_syscalls); 287 regs->ax = sys_call_table[nr](regs); 288 } 289 290 syscall_return_slowpath(regs); 291 } 292 #endif 293 294 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 295 /* 296 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does 297 * all entry and exit work and returns with IRQs off. This function is 298 * extremely hot in workloads that use it, and it's usually called from 299 * do_fast_syscall_32, so forcibly inline it to improve performance. 300 */ 301 static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) 302 { 303 struct thread_info *ti = current_thread_info(); 304 unsigned int nr = (unsigned int)regs->orig_ax; 305 306 #ifdef CONFIG_IA32_EMULATION 307 ti->status |= TS_COMPAT; 308 #endif 309 310 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { 311 /* 312 * Subtlety here: if ptrace pokes something larger than 313 * 2^32-1 into orig_ax, this truncates it. This may or 314 * may not be necessary, but it matches the old asm 315 * behavior. 316 */ 317 nr = syscall_trace_enter(regs); 318 } 319 320 if (likely(nr < IA32_NR_syscalls)) { 321 nr = array_index_nospec(nr, IA32_NR_syscalls); 322 #ifdef CONFIG_IA32_EMULATION 323 regs->ax = ia32_sys_call_table[nr](regs); 324 #else 325 /* 326 * It's possible that a 32-bit syscall implementation 327 * takes a 64-bit parameter but nonetheless assumes that 328 * the high bits are zero. Make sure we zero-extend all 329 * of the args. 330 */ 331 regs->ax = ia32_sys_call_table[nr]( 332 (unsigned int)regs->bx, (unsigned int)regs->cx, 333 (unsigned int)regs->dx, (unsigned int)regs->si, 334 (unsigned int)regs->di, (unsigned int)regs->bp); 335 #endif /* CONFIG_IA32_EMULATION */ 336 } 337 338 syscall_return_slowpath(regs); 339 } 340 341 /* Handles int $0x80 */ 342 __visible void do_int80_syscall_32(struct pt_regs *regs) 343 { 344 enter_from_user_mode(); 345 local_irq_enable(); 346 do_syscall_32_irqs_on(regs); 347 } 348 349 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */ 350 __visible long do_fast_syscall_32(struct pt_regs *regs) 351 { 352 /* 353 * Called using the internal vDSO SYSENTER/SYSCALL32 calling 354 * convention. Adjust regs so it looks like we entered using int80. 355 */ 356 357 unsigned long landing_pad = (unsigned long)current->mm->context.vdso + 358 vdso_image_32.sym_int80_landing_pad; 359 360 /* 361 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward 362 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction. 363 * Fix it up. 364 */ 365 regs->ip = landing_pad; 366 367 enter_from_user_mode(); 368 369 local_irq_enable(); 370 371 /* Fetch EBP from where the vDSO stashed it. */ 372 if ( 373 #ifdef CONFIG_X86_64 374 /* 375 * Micro-optimization: the pointer we're following is explicitly 376 * 32 bits, so it can't be out of range. 377 */ 378 __get_user(*(u32 *)®s->bp, 379 (u32 __user __force *)(unsigned long)(u32)regs->sp) 380 #else 381 get_user(*(u32 *)®s->bp, 382 (u32 __user __force *)(unsigned long)(u32)regs->sp) 383 #endif 384 ) { 385 386 /* User code screwed up. */ 387 local_irq_disable(); 388 regs->ax = -EFAULT; 389 prepare_exit_to_usermode(regs); 390 return 0; /* Keep it simple: use IRET. */ 391 } 392 393 /* Now this is just like a normal syscall. */ 394 do_syscall_32_irqs_on(regs); 395 396 #ifdef CONFIG_X86_64 397 /* 398 * Opportunistic SYSRETL: if possible, try to return using SYSRETL. 399 * SYSRETL is available on all 64-bit CPUs, so we don't need to 400 * bother with SYSEXIT. 401 * 402 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, 403 * because the ECX fixup above will ensure that this is essentially 404 * never the case. 405 */ 406 return regs->cs == __USER32_CS && regs->ss == __USER_DS && 407 regs->ip == landing_pad && 408 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0; 409 #else 410 /* 411 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT. 412 * 413 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, 414 * because the ECX fixup above will ensure that this is essentially 415 * never the case. 416 * 417 * We don't allow syscalls at all from VM86 mode, but we still 418 * need to check VM, because we might be returning from sys_vm86. 419 */ 420 return static_cpu_has(X86_FEATURE_SEP) && 421 regs->cs == __USER_CS && regs->ss == __USER_DS && 422 regs->ip == landing_pad && 423 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0; 424 #endif 425 } 426 #endif 427