xref: /openbmc/linux/kernel/entry/common.c (revision 47b8ff19)
1142781e1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2142781e1SThomas Gleixner 
3142781e1SThomas Gleixner #include <linux/context_tracking.h>
4142781e1SThomas Gleixner #include <linux/entry-common.h>
55fbda3ecSThomas Gleixner #include <linux/highmem.h>
6a9f3a74aSThomas Gleixner #include <linux/livepatch.h>
7a9f3a74aSThomas Gleixner #include <linux/audit.h>
8142781e1SThomas Gleixner 
911894468SGabriel Krisman Bertazi #include "common.h"
1011894468SGabriel Krisman Bertazi 
11142781e1SThomas Gleixner #define CREATE_TRACE_POINTS
12142781e1SThomas Gleixner #include <trace/events/syscalls.h>
13142781e1SThomas Gleixner 
1496e2fbccSSven Schnelle /* See comment for enter_from_user_mode() in entry-common.h */
156666bb71SSven Schnelle static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
16142781e1SThomas Gleixner {
17142781e1SThomas Gleixner 	arch_check_user_regs(regs);
18142781e1SThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
19142781e1SThomas Gleixner 
20142781e1SThomas Gleixner 	CT_WARN_ON(ct_state() != CONTEXT_USER);
21142781e1SThomas Gleixner 	user_exit_irqoff();
22142781e1SThomas Gleixner 
23142781e1SThomas Gleixner 	instrumentation_begin();
24142781e1SThomas Gleixner 	trace_hardirqs_off_finish();
25142781e1SThomas Gleixner 	instrumentation_end();
26142781e1SThomas Gleixner }
27142781e1SThomas Gleixner 
2896e2fbccSSven Schnelle void noinstr enter_from_user_mode(struct pt_regs *regs)
2996e2fbccSSven Schnelle {
3096e2fbccSSven Schnelle 	__enter_from_user_mode(regs);
3196e2fbccSSven Schnelle }
3296e2fbccSSven Schnelle 
33142781e1SThomas Gleixner static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
34142781e1SThomas Gleixner {
35142781e1SThomas Gleixner 	if (unlikely(audit_context())) {
36142781e1SThomas Gleixner 		unsigned long args[6];
37142781e1SThomas Gleixner 
38142781e1SThomas Gleixner 		syscall_get_arguments(current, regs, args);
39142781e1SThomas Gleixner 		audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
40142781e1SThomas Gleixner 	}
41142781e1SThomas Gleixner }
42142781e1SThomas Gleixner 
43142781e1SThomas Gleixner static long syscall_trace_enter(struct pt_regs *regs, long syscall,
4429915524SGabriel Krisman Bertazi 				unsigned long work)
45142781e1SThomas Gleixner {
46142781e1SThomas Gleixner 	long ret = 0;
47142781e1SThomas Gleixner 
4811894468SGabriel Krisman Bertazi 	/*
4911894468SGabriel Krisman Bertazi 	 * Handle Syscall User Dispatch.  This must comes first, since
5011894468SGabriel Krisman Bertazi 	 * the ABI here can be something that doesn't make sense for
5111894468SGabriel Krisman Bertazi 	 * other syscall_work features.
5211894468SGabriel Krisman Bertazi 	 */
5311894468SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
5411894468SGabriel Krisman Bertazi 		if (syscall_user_dispatch(regs))
5511894468SGabriel Krisman Bertazi 			return -1L;
5611894468SGabriel Krisman Bertazi 	}
5711894468SGabriel Krisman Bertazi 
58142781e1SThomas Gleixner 	/* Handle ptrace */
5964eb35f7SGabriel Krisman Bertazi 	if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
60142781e1SThomas Gleixner 		ret = arch_syscall_enter_tracehook(regs);
6164eb35f7SGabriel Krisman Bertazi 		if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
62142781e1SThomas Gleixner 			return -1L;
63142781e1SThomas Gleixner 	}
64142781e1SThomas Gleixner 
65142781e1SThomas Gleixner 	/* Do seccomp after ptrace, to catch any tracer changes. */
6623d67a54SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SECCOMP) {
67142781e1SThomas Gleixner 		ret = __secure_computing(NULL);
68142781e1SThomas Gleixner 		if (ret == -1L)
69142781e1SThomas Gleixner 			return ret;
70142781e1SThomas Gleixner 	}
71142781e1SThomas Gleixner 
72b6ec4134SKees Cook 	/* Either of the above might have changed the syscall number */
73b6ec4134SKees Cook 	syscall = syscall_get_nr(current, regs);
74b6ec4134SKees Cook 
75524666cbSGabriel Krisman Bertazi 	if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
76142781e1SThomas Gleixner 		trace_sys_enter(regs, syscall);
77142781e1SThomas Gleixner 
78142781e1SThomas Gleixner 	syscall_enter_audit(regs, syscall);
79142781e1SThomas Gleixner 
80142781e1SThomas Gleixner 	return ret ? : syscall;
81142781e1SThomas Gleixner }
82142781e1SThomas Gleixner 
834facb95bSThomas Gleixner static __always_inline long
844facb95bSThomas Gleixner __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
85142781e1SThomas Gleixner {
86b86678cfSGabriel Krisman Bertazi 	unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
87142781e1SThomas Gleixner 
8829915524SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_ENTER)
8929915524SGabriel Krisman Bertazi 		syscall = syscall_trace_enter(regs, syscall, work);
90142781e1SThomas Gleixner 
91142781e1SThomas Gleixner 	return syscall;
92142781e1SThomas Gleixner }
93142781e1SThomas Gleixner 
944facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
954facb95bSThomas Gleixner {
964facb95bSThomas Gleixner 	return __syscall_enter_from_user_work(regs, syscall);
974facb95bSThomas Gleixner }
984facb95bSThomas Gleixner 
994facb95bSThomas Gleixner noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
1004facb95bSThomas Gleixner {
1014facb95bSThomas Gleixner 	long ret;
1024facb95bSThomas Gleixner 
1036666bb71SSven Schnelle 	__enter_from_user_mode(regs);
1044facb95bSThomas Gleixner 
1054facb95bSThomas Gleixner 	instrumentation_begin();
1064facb95bSThomas Gleixner 	local_irq_enable();
1074facb95bSThomas Gleixner 	ret = __syscall_enter_from_user_work(regs, syscall);
1084facb95bSThomas Gleixner 	instrumentation_end();
1094facb95bSThomas Gleixner 
1104facb95bSThomas Gleixner 	return ret;
1114facb95bSThomas Gleixner }
1124facb95bSThomas Gleixner 
1134facb95bSThomas Gleixner noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
1144facb95bSThomas Gleixner {
1156666bb71SSven Schnelle 	__enter_from_user_mode(regs);
1164facb95bSThomas Gleixner 	instrumentation_begin();
1174facb95bSThomas Gleixner 	local_irq_enable();
1184facb95bSThomas Gleixner 	instrumentation_end();
1194facb95bSThomas Gleixner }
1204facb95bSThomas Gleixner 
121310de1a6SSven Schnelle /* See comment for exit_to_user_mode() in entry-common.h */
122bb793562SSven Schnelle static __always_inline void __exit_to_user_mode(void)
123a9f3a74aSThomas Gleixner {
124a9f3a74aSThomas Gleixner 	instrumentation_begin();
125a9f3a74aSThomas Gleixner 	trace_hardirqs_on_prepare();
126a9f3a74aSThomas Gleixner 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
127a9f3a74aSThomas Gleixner 	instrumentation_end();
128a9f3a74aSThomas Gleixner 
129a9f3a74aSThomas Gleixner 	user_enter_irqoff();
130a9f3a74aSThomas Gleixner 	arch_exit_to_user_mode();
131a9f3a74aSThomas Gleixner 	lockdep_hardirqs_on(CALLER_ADDR0);
132a9f3a74aSThomas Gleixner }
133a9f3a74aSThomas Gleixner 
134310de1a6SSven Schnelle void noinstr exit_to_user_mode(void)
135310de1a6SSven Schnelle {
136310de1a6SSven Schnelle 	__exit_to_user_mode();
137310de1a6SSven Schnelle }
138310de1a6SSven Schnelle 
139a9f3a74aSThomas Gleixner /* Workaround to allow gradual conversion of architecture code */
14012db8b69SJens Axboe void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { }
14112db8b69SJens Axboe 
14212db8b69SJens Axboe static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
14312db8b69SJens Axboe {
14412db8b69SJens Axboe 	if (ti_work & _TIF_NOTIFY_SIGNAL)
14512db8b69SJens Axboe 		tracehook_notify_signal();
14612db8b69SJens Axboe 
14712db8b69SJens Axboe 	arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
14812db8b69SJens Axboe }
149a9f3a74aSThomas Gleixner 
150a9f3a74aSThomas Gleixner static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
151a9f3a74aSThomas Gleixner 					    unsigned long ti_work)
152a9f3a74aSThomas Gleixner {
153a9f3a74aSThomas Gleixner 	/*
154a9f3a74aSThomas Gleixner 	 * Before returning to user space ensure that all pending work
155a9f3a74aSThomas Gleixner 	 * items have been completed.
156a9f3a74aSThomas Gleixner 	 */
157a9f3a74aSThomas Gleixner 	while (ti_work & EXIT_TO_USER_MODE_WORK) {
158a9f3a74aSThomas Gleixner 
159a9f3a74aSThomas Gleixner 		local_irq_enable_exit_to_user(ti_work);
160a9f3a74aSThomas Gleixner 
161a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_NEED_RESCHED)
162a9f3a74aSThomas Gleixner 			schedule();
163a9f3a74aSThomas Gleixner 
164a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_UPROBE)
165a9f3a74aSThomas Gleixner 			uprobe_notify_resume(regs);
166a9f3a74aSThomas Gleixner 
167a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_PATCH_PENDING)
168a9f3a74aSThomas Gleixner 			klp_update_patch_state(current);
169a9f3a74aSThomas Gleixner 
17012db8b69SJens Axboe 		if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
17112db8b69SJens Axboe 			handle_signal_work(regs, ti_work);
172a9f3a74aSThomas Gleixner 
173a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_NOTIFY_RESUME) {
174a9f3a74aSThomas Gleixner 			tracehook_notify_resume(regs);
175a9f3a74aSThomas Gleixner 			rseq_handle_notify_resume(NULL, regs);
176a9f3a74aSThomas Gleixner 		}
177a9f3a74aSThomas Gleixner 
178a9f3a74aSThomas Gleixner 		/* Architecture specific TIF work */
179a9f3a74aSThomas Gleixner 		arch_exit_to_user_mode_work(regs, ti_work);
180a9f3a74aSThomas Gleixner 
181a9f3a74aSThomas Gleixner 		/*
182a9f3a74aSThomas Gleixner 		 * Disable interrupts and reevaluate the work flags as they
183a9f3a74aSThomas Gleixner 		 * might have changed while interrupts and preemption was
184a9f3a74aSThomas Gleixner 		 * enabled above.
185a9f3a74aSThomas Gleixner 		 */
186a9f3a74aSThomas Gleixner 		local_irq_disable_exit_to_user();
187*47b8ff19SFrederic Weisbecker 
188*47b8ff19SFrederic Weisbecker 		/* Check if any of the above work has queued a deferred wakeup */
189*47b8ff19SFrederic Weisbecker 		rcu_nocb_flush_deferred_wakeup();
190*47b8ff19SFrederic Weisbecker 
191a9f3a74aSThomas Gleixner 		ti_work = READ_ONCE(current_thread_info()->flags);
192a9f3a74aSThomas Gleixner 	}
193a9f3a74aSThomas Gleixner 
194a9f3a74aSThomas Gleixner 	/* Return the latest work state for arch_exit_to_user_mode() */
195a9f3a74aSThomas Gleixner 	return ti_work;
196a9f3a74aSThomas Gleixner }
197a9f3a74aSThomas Gleixner 
198a9f3a74aSThomas Gleixner static void exit_to_user_mode_prepare(struct pt_regs *regs)
199a9f3a74aSThomas Gleixner {
200a9f3a74aSThomas Gleixner 	unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
201a9f3a74aSThomas Gleixner 
202a9f3a74aSThomas Gleixner 	lockdep_assert_irqs_disabled();
203a9f3a74aSThomas Gleixner 
204*47b8ff19SFrederic Weisbecker 	/* Flush pending rcuog wakeup before the last need_resched() check */
205*47b8ff19SFrederic Weisbecker 	rcu_nocb_flush_deferred_wakeup();
206*47b8ff19SFrederic Weisbecker 
207a9f3a74aSThomas Gleixner 	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
208a9f3a74aSThomas Gleixner 		ti_work = exit_to_user_mode_loop(regs, ti_work);
209a9f3a74aSThomas Gleixner 
210a9f3a74aSThomas Gleixner 	arch_exit_to_user_mode_prepare(regs, ti_work);
211a9f3a74aSThomas Gleixner 
212a9f3a74aSThomas Gleixner 	/* Ensure that the address limit is intact and no locks are held */
213a9f3a74aSThomas Gleixner 	addr_limit_user_check();
2145fbda3ecSThomas Gleixner 	kmap_assert_nomap();
215a9f3a74aSThomas Gleixner 	lockdep_assert_irqs_disabled();
216a9f3a74aSThomas Gleixner 	lockdep_sys_exit();
217a9f3a74aSThomas Gleixner }
218a9f3a74aSThomas Gleixner 
219a9f3a74aSThomas Gleixner /*
22064eb35f7SGabriel Krisman Bertazi  * If SYSCALL_EMU is set, then the only reason to report is when
2216342adcaSGabriel Krisman Bertazi  * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP).  This syscall
222900ffe39SKees Cook  * instruction has been already reported in syscall_enter_from_user_mode().
223a9f3a74aSThomas Gleixner  */
22464eb35f7SGabriel Krisman Bertazi static inline bool report_single_step(unsigned long work)
225a9f3a74aSThomas Gleixner {
22641c1a06dSYuxuan Shui 	if (work & SYSCALL_WORK_SYSCALL_EMU)
22764eb35f7SGabriel Krisman Bertazi 		return false;
22864eb35f7SGabriel Krisman Bertazi 
2296342adcaSGabriel Krisman Bertazi 	return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
230a9f3a74aSThomas Gleixner }
23129915524SGabriel Krisman Bertazi 
23229915524SGabriel Krisman Bertazi static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
233a9f3a74aSThomas Gleixner {
234a9f3a74aSThomas Gleixner 	bool step;
235a9f3a74aSThomas Gleixner 
23611894468SGabriel Krisman Bertazi 	/*
23711894468SGabriel Krisman Bertazi 	 * If the syscall was rolled back due to syscall user dispatching,
23811894468SGabriel Krisman Bertazi 	 * then the tracers below are not invoked for the same reason as
23911894468SGabriel Krisman Bertazi 	 * the entry side was not invoked in syscall_trace_enter(): The ABI
24011894468SGabriel Krisman Bertazi 	 * of these syscalls is unknown.
24111894468SGabriel Krisman Bertazi 	 */
24211894468SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
24311894468SGabriel Krisman Bertazi 		if (unlikely(current->syscall_dispatch.on_dispatch)) {
24411894468SGabriel Krisman Bertazi 			current->syscall_dispatch.on_dispatch = false;
24511894468SGabriel Krisman Bertazi 			return;
24611894468SGabriel Krisman Bertazi 		}
24711894468SGabriel Krisman Bertazi 	}
24811894468SGabriel Krisman Bertazi 
249a9f3a74aSThomas Gleixner 	audit_syscall_exit(regs);
250a9f3a74aSThomas Gleixner 
251524666cbSGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
252a9f3a74aSThomas Gleixner 		trace_sys_exit(regs, syscall_get_return_value(current, regs));
253a9f3a74aSThomas Gleixner 
25464eb35f7SGabriel Krisman Bertazi 	step = report_single_step(work);
25564c19ba2SGabriel Krisman Bertazi 	if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
256a9f3a74aSThomas Gleixner 		arch_syscall_exit_tracehook(regs, step);
257a9f3a74aSThomas Gleixner }
258a9f3a74aSThomas Gleixner 
259a9f3a74aSThomas Gleixner /*
260a9f3a74aSThomas Gleixner  * Syscall specific exit to user mode preparation. Runs with interrupts
261a9f3a74aSThomas Gleixner  * enabled.
262a9f3a74aSThomas Gleixner  */
263a9f3a74aSThomas Gleixner static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
264a9f3a74aSThomas Gleixner {
265b86678cfSGabriel Krisman Bertazi 	unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
266a9f3a74aSThomas Gleixner 	unsigned long nr = syscall_get_nr(current, regs);
267a9f3a74aSThomas Gleixner 
268a9f3a74aSThomas Gleixner 	CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
269a9f3a74aSThomas Gleixner 
270a9f3a74aSThomas Gleixner 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
271a9f3a74aSThomas Gleixner 		if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
272a9f3a74aSThomas Gleixner 			local_irq_enable();
273a9f3a74aSThomas Gleixner 	}
274a9f3a74aSThomas Gleixner 
275a9f3a74aSThomas Gleixner 	rseq_syscall(regs);
276a9f3a74aSThomas Gleixner 
277a9f3a74aSThomas Gleixner 	/*
278a9f3a74aSThomas Gleixner 	 * Do one-time syscall specific work. If these work items are
279a9f3a74aSThomas Gleixner 	 * enabled, we want to run them exactly once per syscall exit with
280a9f3a74aSThomas Gleixner 	 * interrupts enabled.
281a9f3a74aSThomas Gleixner 	 */
28229915524SGabriel Krisman Bertazi 	if (unlikely(work & SYSCALL_WORK_EXIT))
28329915524SGabriel Krisman Bertazi 		syscall_exit_work(regs, work);
284a9f3a74aSThomas Gleixner }
285a9f3a74aSThomas Gleixner 
286c6156e1dSSven Schnelle static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
287a9f3a74aSThomas Gleixner {
288a9f3a74aSThomas Gleixner 	syscall_exit_to_user_mode_prepare(regs);
289a9f3a74aSThomas Gleixner 	local_irq_disable_exit_to_user();
290a9f3a74aSThomas Gleixner 	exit_to_user_mode_prepare(regs);
291c6156e1dSSven Schnelle }
292c6156e1dSSven Schnelle 
293c6156e1dSSven Schnelle void syscall_exit_to_user_mode_work(struct pt_regs *regs)
294c6156e1dSSven Schnelle {
295c6156e1dSSven Schnelle 	__syscall_exit_to_user_mode_work(regs);
296c6156e1dSSven Schnelle }
297c6156e1dSSven Schnelle 
298c6156e1dSSven Schnelle __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
299c6156e1dSSven Schnelle {
300c6156e1dSSven Schnelle 	instrumentation_begin();
301c6156e1dSSven Schnelle 	__syscall_exit_to_user_mode_work(regs);
302a9f3a74aSThomas Gleixner 	instrumentation_end();
303bb793562SSven Schnelle 	__exit_to_user_mode();
304a9f3a74aSThomas Gleixner }
305a9f3a74aSThomas Gleixner 
306142781e1SThomas Gleixner noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
307142781e1SThomas Gleixner {
3086666bb71SSven Schnelle 	__enter_from_user_mode(regs);
309142781e1SThomas Gleixner }
310a9f3a74aSThomas Gleixner 
311a9f3a74aSThomas Gleixner noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
312a9f3a74aSThomas Gleixner {
313a9f3a74aSThomas Gleixner 	instrumentation_begin();
314a9f3a74aSThomas Gleixner 	exit_to_user_mode_prepare(regs);
315a9f3a74aSThomas Gleixner 	instrumentation_end();
316bb793562SSven Schnelle 	__exit_to_user_mode();
317a9f3a74aSThomas Gleixner }
318a5497babSThomas Gleixner 
319aadfc2f9SIngo Molnar noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
320a5497babSThomas Gleixner {
321a5497babSThomas Gleixner 	irqentry_state_t ret = {
322a5497babSThomas Gleixner 		.exit_rcu = false,
323a5497babSThomas Gleixner 	};
324a5497babSThomas Gleixner 
325a5497babSThomas Gleixner 	if (user_mode(regs)) {
326a5497babSThomas Gleixner 		irqentry_enter_from_user_mode(regs);
327a5497babSThomas Gleixner 		return ret;
328a5497babSThomas Gleixner 	}
329a5497babSThomas Gleixner 
330a5497babSThomas Gleixner 	/*
331a5497babSThomas Gleixner 	 * If this entry hit the idle task invoke rcu_irq_enter() whether
332a5497babSThomas Gleixner 	 * RCU is watching or not.
333a5497babSThomas Gleixner 	 *
33478a56e04SIra Weiny 	 * Interrupts can nest when the first interrupt invokes softirq
335a5497babSThomas Gleixner 	 * processing on return which enables interrupts.
336a5497babSThomas Gleixner 	 *
337a5497babSThomas Gleixner 	 * Scheduler ticks in the idle task can mark quiescent state and
338a5497babSThomas Gleixner 	 * terminate a grace period, if and only if the timer interrupt is
339a5497babSThomas Gleixner 	 * not nested into another interrupt.
340a5497babSThomas Gleixner 	 *
3417f2a53c2SPaul E. McKenney 	 * Checking for rcu_is_watching() here would prevent the nesting
342a5497babSThomas Gleixner 	 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
343a5497babSThomas Gleixner 	 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
344a5497babSThomas Gleixner 	 * assume that it is the first interupt and eventually claim
34578a56e04SIra Weiny 	 * quiescent state and end grace periods prematurely.
346a5497babSThomas Gleixner 	 *
347a5497babSThomas Gleixner 	 * Unconditionally invoke rcu_irq_enter() so RCU state stays
348a5497babSThomas Gleixner 	 * consistent.
349a5497babSThomas Gleixner 	 *
350a5497babSThomas Gleixner 	 * TINY_RCU does not support EQS, so let the compiler eliminate
351a5497babSThomas Gleixner 	 * this part when enabled.
352a5497babSThomas Gleixner 	 */
353a5497babSThomas Gleixner 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
354a5497babSThomas Gleixner 		/*
355a5497babSThomas Gleixner 		 * If RCU is not watching then the same careful
356a5497babSThomas Gleixner 		 * sequence vs. lockdep and tracing is required
35745ff5105SIra Weiny 		 * as in irqentry_enter_from_user_mode().
358a5497babSThomas Gleixner 		 */
359a5497babSThomas Gleixner 		lockdep_hardirqs_off(CALLER_ADDR0);
360a5497babSThomas Gleixner 		rcu_irq_enter();
361a5497babSThomas Gleixner 		instrumentation_begin();
362a5497babSThomas Gleixner 		trace_hardirqs_off_finish();
363a5497babSThomas Gleixner 		instrumentation_end();
364a5497babSThomas Gleixner 
365a5497babSThomas Gleixner 		ret.exit_rcu = true;
366a5497babSThomas Gleixner 		return ret;
367a5497babSThomas Gleixner 	}
368a5497babSThomas Gleixner 
369a5497babSThomas Gleixner 	/*
370a5497babSThomas Gleixner 	 * If RCU is watching then RCU only wants to check whether it needs
371a5497babSThomas Gleixner 	 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
372a5497babSThomas Gleixner 	 * already contains a warning when RCU is not watching, so no point
373a5497babSThomas Gleixner 	 * in having another one here.
374a5497babSThomas Gleixner 	 */
3759d820f68SThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
376a5497babSThomas Gleixner 	instrumentation_begin();
377a5497babSThomas Gleixner 	rcu_irq_enter_check_tick();
3789d820f68SThomas Gleixner 	trace_hardirqs_off_finish();
379a5497babSThomas Gleixner 	instrumentation_end();
380a5497babSThomas Gleixner 
381a5497babSThomas Gleixner 	return ret;
382a5497babSThomas Gleixner }
383a5497babSThomas Gleixner 
384a5497babSThomas Gleixner void irqentry_exit_cond_resched(void)
385a5497babSThomas Gleixner {
386a5497babSThomas Gleixner 	if (!preempt_count()) {
387a5497babSThomas Gleixner 		/* Sanity check RCU and thread stack */
388a5497babSThomas Gleixner 		rcu_irq_exit_check_preempt();
389a5497babSThomas Gleixner 		if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
390a5497babSThomas Gleixner 			WARN_ON_ONCE(!on_thread_stack());
391a5497babSThomas Gleixner 		if (need_resched())
392a5497babSThomas Gleixner 			preempt_schedule_irq();
393a5497babSThomas Gleixner 	}
394a5497babSThomas Gleixner }
39540607ee9SPeter Zijlstra (Intel) #ifdef CONFIG_PREEMPT_DYNAMIC
39640607ee9SPeter Zijlstra (Intel) DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
39740607ee9SPeter Zijlstra (Intel) #endif
398a5497babSThomas Gleixner 
399aadfc2f9SIngo Molnar noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
400a5497babSThomas Gleixner {
401a5497babSThomas Gleixner 	lockdep_assert_irqs_disabled();
402a5497babSThomas Gleixner 
403a5497babSThomas Gleixner 	/* Check whether this returns to user mode */
404a5497babSThomas Gleixner 	if (user_mode(regs)) {
405a5497babSThomas Gleixner 		irqentry_exit_to_user_mode(regs);
406a5497babSThomas Gleixner 	} else if (!regs_irqs_disabled(regs)) {
407a5497babSThomas Gleixner 		/*
408a5497babSThomas Gleixner 		 * If RCU was not watching on entry this needs to be done
409a5497babSThomas Gleixner 		 * carefully and needs the same ordering of lockdep/tracing
410a5497babSThomas Gleixner 		 * and RCU as the return to user mode path.
411a5497babSThomas Gleixner 		 */
412a5497babSThomas Gleixner 		if (state.exit_rcu) {
413a5497babSThomas Gleixner 			instrumentation_begin();
414a5497babSThomas Gleixner 			/* Tell the tracer that IRET will enable interrupts */
415a5497babSThomas Gleixner 			trace_hardirqs_on_prepare();
416a5497babSThomas Gleixner 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
417a5497babSThomas Gleixner 			instrumentation_end();
418a5497babSThomas Gleixner 			rcu_irq_exit();
419a5497babSThomas Gleixner 			lockdep_hardirqs_on(CALLER_ADDR0);
420a5497babSThomas Gleixner 			return;
421a5497babSThomas Gleixner 		}
422a5497babSThomas Gleixner 
423a5497babSThomas Gleixner 		instrumentation_begin();
42440607ee9SPeter Zijlstra (Intel) 		if (IS_ENABLED(CONFIG_PREEMPTION)) {
42540607ee9SPeter Zijlstra (Intel) #ifdef CONFIG_PREEMT_DYNAMIC
42640607ee9SPeter Zijlstra (Intel) 			static_call(irqentry_exit_cond_resched)();
42740607ee9SPeter Zijlstra (Intel) #else
428a5497babSThomas Gleixner 			irqentry_exit_cond_resched();
42940607ee9SPeter Zijlstra (Intel) #endif
43040607ee9SPeter Zijlstra (Intel) 		}
431a5497babSThomas Gleixner 		/* Covers both tracing and lockdep */
432a5497babSThomas Gleixner 		trace_hardirqs_on();
433a5497babSThomas Gleixner 		instrumentation_end();
434a5497babSThomas Gleixner 	} else {
435a5497babSThomas Gleixner 		/*
436a5497babSThomas Gleixner 		 * IRQ flags state is correct already. Just tell RCU if it
437a5497babSThomas Gleixner 		 * was not watching on entry.
438a5497babSThomas Gleixner 		 */
439a5497babSThomas Gleixner 		if (state.exit_rcu)
440a5497babSThomas Gleixner 			rcu_irq_exit();
441a5497babSThomas Gleixner 	}
442a5497babSThomas Gleixner }
443b6be002bSThomas Gleixner 
444b6be002bSThomas Gleixner irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
445b6be002bSThomas Gleixner {
446b6be002bSThomas Gleixner 	irqentry_state_t irq_state;
447b6be002bSThomas Gleixner 
448b6be002bSThomas Gleixner 	irq_state.lockdep = lockdep_hardirqs_enabled();
449b6be002bSThomas Gleixner 
450b6be002bSThomas Gleixner 	__nmi_enter();
451b6be002bSThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
452b6be002bSThomas Gleixner 	lockdep_hardirq_enter();
453b6be002bSThomas Gleixner 	rcu_nmi_enter();
454b6be002bSThomas Gleixner 
455b6be002bSThomas Gleixner 	instrumentation_begin();
456b6be002bSThomas Gleixner 	trace_hardirqs_off_finish();
457b6be002bSThomas Gleixner 	ftrace_nmi_enter();
458b6be002bSThomas Gleixner 	instrumentation_end();
459b6be002bSThomas Gleixner 
460b6be002bSThomas Gleixner 	return irq_state;
461b6be002bSThomas Gleixner }
462b6be002bSThomas Gleixner 
463b6be002bSThomas Gleixner void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
464b6be002bSThomas Gleixner {
465b6be002bSThomas Gleixner 	instrumentation_begin();
466b6be002bSThomas Gleixner 	ftrace_nmi_exit();
467b6be002bSThomas Gleixner 	if (irq_state.lockdep) {
468b6be002bSThomas Gleixner 		trace_hardirqs_on_prepare();
469b6be002bSThomas Gleixner 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
470b6be002bSThomas Gleixner 	}
471b6be002bSThomas Gleixner 	instrumentation_end();
472b6be002bSThomas Gleixner 
473b6be002bSThomas Gleixner 	rcu_nmi_exit();
474b6be002bSThomas Gleixner 	lockdep_hardirq_exit();
475b6be002bSThomas Gleixner 	if (irq_state.lockdep)
476b6be002bSThomas Gleixner 		lockdep_hardirqs_on(CALLER_ADDR0);
477b6be002bSThomas Gleixner 	__nmi_exit();
478b6be002bSThomas Gleixner }
479