xref: /openbmc/linux/arch/x86/entry/common.c (revision 74ce1896)
1 /*
2  * common.c - C code for kernel entry and exit
3  * Copyright (c) 2015 Andrew Lutomirski
4  * GPL v2
5  *
6  * Based on asm and ptrace code by many authors.  The code here originated
7  * in ptrace.c and signal.c.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/tracehook.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/export.h>
22 #include <linux/context_tracking.h>
23 #include <linux/user-return-notifier.h>
24 #include <linux/uprobes.h>
25 #include <linux/livepatch.h>
26 #include <linux/syscalls.h>
27 
28 #include <asm/desc.h>
29 #include <asm/traps.h>
30 #include <asm/vdso.h>
31 #include <linux/uaccess.h>
32 #include <asm/cpufeature.h>
33 
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/syscalls.h>
36 
37 #ifdef CONFIG_CONTEXT_TRACKING
38 /* Called on entry from user mode with IRQs off. */
39 __visible inline void enter_from_user_mode(void)
40 {
41 	CT_WARN_ON(ct_state() != CONTEXT_USER);
42 	user_exit_irqoff();
43 }
44 #else
45 static inline void enter_from_user_mode(void) {}
46 #endif
47 
48 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
49 {
50 #ifdef CONFIG_X86_64
51 	if (arch == AUDIT_ARCH_X86_64) {
52 		audit_syscall_entry(regs->orig_ax, regs->di,
53 				    regs->si, regs->dx, regs->r10);
54 	} else
55 #endif
56 	{
57 		audit_syscall_entry(regs->orig_ax, regs->bx,
58 				    regs->cx, regs->dx, regs->si);
59 	}
60 }
61 
62 /*
63  * Returns the syscall nr to run (which should match regs->orig_ax) or -1
64  * to skip the syscall.
65  */
66 static long syscall_trace_enter(struct pt_regs *regs)
67 {
68 	u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
69 
70 	struct thread_info *ti = current_thread_info();
71 	unsigned long ret = 0;
72 	bool emulated = false;
73 	u32 work;
74 
75 	if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
76 		BUG_ON(regs != task_pt_regs(current));
77 
78 	work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
79 
80 	if (unlikely(work & _TIF_SYSCALL_EMU))
81 		emulated = true;
82 
83 	if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
84 	    tracehook_report_syscall_entry(regs))
85 		return -1L;
86 
87 	if (emulated)
88 		return -1L;
89 
90 #ifdef CONFIG_SECCOMP
91 	/*
92 	 * Do seccomp after ptrace, to catch any tracer changes.
93 	 */
94 	if (work & _TIF_SECCOMP) {
95 		struct seccomp_data sd;
96 
97 		sd.arch = arch;
98 		sd.nr = regs->orig_ax;
99 		sd.instruction_pointer = regs->ip;
100 #ifdef CONFIG_X86_64
101 		if (arch == AUDIT_ARCH_X86_64) {
102 			sd.args[0] = regs->di;
103 			sd.args[1] = regs->si;
104 			sd.args[2] = regs->dx;
105 			sd.args[3] = regs->r10;
106 			sd.args[4] = regs->r8;
107 			sd.args[5] = regs->r9;
108 		} else
109 #endif
110 		{
111 			sd.args[0] = regs->bx;
112 			sd.args[1] = regs->cx;
113 			sd.args[2] = regs->dx;
114 			sd.args[3] = regs->si;
115 			sd.args[4] = regs->di;
116 			sd.args[5] = regs->bp;
117 		}
118 
119 		ret = __secure_computing(&sd);
120 		if (ret == -1)
121 			return ret;
122 	}
123 #endif
124 
125 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
126 		trace_sys_enter(regs, regs->orig_ax);
127 
128 	do_audit_syscall_entry(regs, arch);
129 
130 	return ret ?: regs->orig_ax;
131 }
132 
133 #define EXIT_TO_USERMODE_LOOP_FLAGS				\
134 	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |	\
135 	 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
136 
137 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
138 {
139 	/*
140 	 * In order to return to user mode, we need to have IRQs off with
141 	 * none of EXIT_TO_USERMODE_LOOP_FLAGS set.  Several of these flags
142 	 * can be set at any time on preemptable kernels if we have IRQs on,
143 	 * so we need to loop.  Disabling preemption wouldn't help: doing the
144 	 * work to clear some of the flags can sleep.
145 	 */
146 	while (true) {
147 		/* We have work to do. */
148 		local_irq_enable();
149 
150 		if (cached_flags & _TIF_NEED_RESCHED)
151 			schedule();
152 
153 		if (cached_flags & _TIF_UPROBE)
154 			uprobe_notify_resume(regs);
155 
156 		/* deal with pending signal delivery */
157 		if (cached_flags & _TIF_SIGPENDING)
158 			do_signal(regs);
159 
160 		if (cached_flags & _TIF_NOTIFY_RESUME) {
161 			clear_thread_flag(TIF_NOTIFY_RESUME);
162 			tracehook_notify_resume(regs);
163 		}
164 
165 		if (cached_flags & _TIF_USER_RETURN_NOTIFY)
166 			fire_user_return_notifiers();
167 
168 		if (cached_flags & _TIF_PATCH_PENDING)
169 			klp_update_patch_state(current);
170 
171 		/* Disable IRQs and retry */
172 		local_irq_disable();
173 
174 		cached_flags = READ_ONCE(current_thread_info()->flags);
175 
176 		if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
177 			break;
178 	}
179 }
180 
181 /* Called with IRQs disabled. */
182 __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
183 {
184 	struct thread_info *ti = current_thread_info();
185 	u32 cached_flags;
186 
187 	addr_limit_user_check();
188 
189 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
190 		local_irq_disable();
191 
192 	lockdep_sys_exit();
193 
194 	cached_flags = READ_ONCE(ti->flags);
195 
196 	if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
197 		exit_to_usermode_loop(regs, cached_flags);
198 
199 #ifdef CONFIG_COMPAT
200 	/*
201 	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
202 	 * returning to user mode.  We need to clear it *after* signal
203 	 * handling, because syscall restart has a fixup for compat
204 	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
205 	 * selftest.
206 	 *
207 	 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
208 	 * special case only applies after poking regs and before the
209 	 * very next return to user mode.
210 	 */
211 	current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
212 #endif
213 
214 	user_enter_irqoff();
215 }
216 
217 #define SYSCALL_EXIT_WORK_FLAGS				\
218 	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |	\
219 	 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
220 
221 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
222 {
223 	bool step;
224 
225 	audit_syscall_exit(regs);
226 
227 	if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
228 		trace_sys_exit(regs, regs->ax);
229 
230 	/*
231 	 * If TIF_SYSCALL_EMU is set, we only get here because of
232 	 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
233 	 * We already reported this syscall instruction in
234 	 * syscall_trace_enter().
235 	 */
236 	step = unlikely(
237 		(cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
238 		== _TIF_SINGLESTEP);
239 	if (step || cached_flags & _TIF_SYSCALL_TRACE)
240 		tracehook_report_syscall_exit(regs, step);
241 }
242 
243 /*
244  * Called with IRQs on and fully valid regs.  Returns with IRQs off in a
245  * state such that we can immediately switch to user mode.
246  */
247 __visible inline void syscall_return_slowpath(struct pt_regs *regs)
248 {
249 	struct thread_info *ti = current_thread_info();
250 	u32 cached_flags = READ_ONCE(ti->flags);
251 
252 	CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
253 
254 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
255 	    WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
256 		local_irq_enable();
257 
258 	/*
259 	 * First do one-time work.  If these work items are enabled, we
260 	 * want to run them exactly once per syscall exit with IRQs on.
261 	 */
262 	if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
263 		syscall_slow_exit_work(regs, cached_flags);
264 
265 	local_irq_disable();
266 	prepare_exit_to_usermode(regs);
267 }
268 
269 #ifdef CONFIG_X86_64
270 __visible void do_syscall_64(struct pt_regs *regs)
271 {
272 	struct thread_info *ti = current_thread_info();
273 	unsigned long nr = regs->orig_ax;
274 
275 	enter_from_user_mode();
276 	local_irq_enable();
277 
278 	if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
279 		nr = syscall_trace_enter(regs);
280 
281 	/*
282 	 * NB: Native and x32 syscalls are dispatched from the same
283 	 * table.  The only functional difference is the x32 bit in
284 	 * regs->orig_ax, which changes the behavior of some syscalls.
285 	 */
286 	if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
287 		regs->ax = sys_call_table[nr & __SYSCALL_MASK](
288 			regs->di, regs->si, regs->dx,
289 			regs->r10, regs->r8, regs->r9);
290 	}
291 
292 	syscall_return_slowpath(regs);
293 }
294 #endif
295 
296 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
297 /*
298  * Does a 32-bit syscall.  Called with IRQs on in CONTEXT_KERNEL.  Does
299  * all entry and exit work and returns with IRQs off.  This function is
300  * extremely hot in workloads that use it, and it's usually called from
301  * do_fast_syscall_32, so forcibly inline it to improve performance.
302  */
303 static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
304 {
305 	struct thread_info *ti = current_thread_info();
306 	unsigned int nr = (unsigned int)regs->orig_ax;
307 
308 #ifdef CONFIG_IA32_EMULATION
309 	current->thread.status |= TS_COMPAT;
310 #endif
311 
312 	if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
313 		/*
314 		 * Subtlety here: if ptrace pokes something larger than
315 		 * 2^32-1 into orig_ax, this truncates it.  This may or
316 		 * may not be necessary, but it matches the old asm
317 		 * behavior.
318 		 */
319 		nr = syscall_trace_enter(regs);
320 	}
321 
322 	if (likely(nr < IA32_NR_syscalls)) {
323 		/*
324 		 * It's possible that a 32-bit syscall implementation
325 		 * takes a 64-bit parameter but nonetheless assumes that
326 		 * the high bits are zero.  Make sure we zero-extend all
327 		 * of the args.
328 		 */
329 		regs->ax = ia32_sys_call_table[nr](
330 			(unsigned int)regs->bx, (unsigned int)regs->cx,
331 			(unsigned int)regs->dx, (unsigned int)regs->si,
332 			(unsigned int)regs->di, (unsigned int)regs->bp);
333 	}
334 
335 	syscall_return_slowpath(regs);
336 }
337 
338 /* Handles int $0x80 */
339 __visible void do_int80_syscall_32(struct pt_regs *regs)
340 {
341 	enter_from_user_mode();
342 	local_irq_enable();
343 	do_syscall_32_irqs_on(regs);
344 }
345 
346 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
347 __visible long do_fast_syscall_32(struct pt_regs *regs)
348 {
349 	/*
350 	 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
351 	 * convention.  Adjust regs so it looks like we entered using int80.
352 	 */
353 
354 	unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
355 		vdso_image_32.sym_int80_landing_pad;
356 
357 	/*
358 	 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
359 	 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
360 	 * Fix it up.
361 	 */
362 	regs->ip = landing_pad;
363 
364 	enter_from_user_mode();
365 
366 	local_irq_enable();
367 
368 	/* Fetch EBP from where the vDSO stashed it. */
369 	if (
370 #ifdef CONFIG_X86_64
371 		/*
372 		 * Micro-optimization: the pointer we're following is explicitly
373 		 * 32 bits, so it can't be out of range.
374 		 */
375 		__get_user(*(u32 *)&regs->bp,
376 			    (u32 __user __force *)(unsigned long)(u32)regs->sp)
377 #else
378 		get_user(*(u32 *)&regs->bp,
379 			 (u32 __user __force *)(unsigned long)(u32)regs->sp)
380 #endif
381 		) {
382 
383 		/* User code screwed up. */
384 		local_irq_disable();
385 		regs->ax = -EFAULT;
386 		prepare_exit_to_usermode(regs);
387 		return 0;	/* Keep it simple: use IRET. */
388 	}
389 
390 	/* Now this is just like a normal syscall. */
391 	do_syscall_32_irqs_on(regs);
392 
393 #ifdef CONFIG_X86_64
394 	/*
395 	 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
396 	 * SYSRETL is available on all 64-bit CPUs, so we don't need to
397 	 * bother with SYSEXIT.
398 	 *
399 	 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
400 	 * because the ECX fixup above will ensure that this is essentially
401 	 * never the case.
402 	 */
403 	return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
404 		regs->ip == landing_pad &&
405 		(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
406 #else
407 	/*
408 	 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
409 	 *
410 	 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
411 	 * because the ECX fixup above will ensure that this is essentially
412 	 * never the case.
413 	 *
414 	 * We don't allow syscalls at all from VM86 mode, but we still
415 	 * need to check VM, because we might be returning from sys_vm86.
416 	 */
417 	return static_cpu_has(X86_FEATURE_SEP) &&
418 		regs->cs == __USER_CS && regs->ss == __USER_DS &&
419 		regs->ip == landing_pad &&
420 		(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
421 #endif
422 }
423 #endif
424