xref: /openbmc/linux/kernel/entry/common.c (revision 11894468)
1142781e1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2142781e1SThomas Gleixner 
3142781e1SThomas Gleixner #include <linux/context_tracking.h>
4142781e1SThomas Gleixner #include <linux/entry-common.h>
5a9f3a74aSThomas Gleixner #include <linux/livepatch.h>
6a9f3a74aSThomas Gleixner #include <linux/audit.h>
7142781e1SThomas Gleixner 
8*11894468SGabriel Krisman Bertazi #include "common.h"
9*11894468SGabriel Krisman Bertazi 
10142781e1SThomas Gleixner #define CREATE_TRACE_POINTS
11142781e1SThomas Gleixner #include <trace/events/syscalls.h>
12142781e1SThomas Gleixner 
13142781e1SThomas Gleixner /**
14142781e1SThomas Gleixner  * enter_from_user_mode - Establish state when coming from user mode
15142781e1SThomas Gleixner  *
16142781e1SThomas Gleixner  * Syscall/interrupt entry disables interrupts, but user mode is traced as
17142781e1SThomas Gleixner  * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
18142781e1SThomas Gleixner  *
19142781e1SThomas Gleixner  * 1) Tell lockdep that interrupts are disabled
20142781e1SThomas Gleixner  * 2) Invoke context tracking if enabled to reactivate RCU
21142781e1SThomas Gleixner  * 3) Trace interrupts off state
22142781e1SThomas Gleixner  */
23142781e1SThomas Gleixner static __always_inline void enter_from_user_mode(struct pt_regs *regs)
24142781e1SThomas Gleixner {
25142781e1SThomas Gleixner 	arch_check_user_regs(regs);
26142781e1SThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
27142781e1SThomas Gleixner 
28142781e1SThomas Gleixner 	CT_WARN_ON(ct_state() != CONTEXT_USER);
29142781e1SThomas Gleixner 	user_exit_irqoff();
30142781e1SThomas Gleixner 
31142781e1SThomas Gleixner 	instrumentation_begin();
32142781e1SThomas Gleixner 	trace_hardirqs_off_finish();
33142781e1SThomas Gleixner 	instrumentation_end();
34142781e1SThomas Gleixner }
35142781e1SThomas Gleixner 
36142781e1SThomas Gleixner static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
37142781e1SThomas Gleixner {
38142781e1SThomas Gleixner 	if (unlikely(audit_context())) {
39142781e1SThomas Gleixner 		unsigned long args[6];
40142781e1SThomas Gleixner 
41142781e1SThomas Gleixner 		syscall_get_arguments(current, regs, args);
42142781e1SThomas Gleixner 		audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
43142781e1SThomas Gleixner 	}
44142781e1SThomas Gleixner }
45142781e1SThomas Gleixner 
46142781e1SThomas Gleixner static long syscall_trace_enter(struct pt_regs *regs, long syscall,
4729915524SGabriel Krisman Bertazi 				unsigned long work)
48142781e1SThomas Gleixner {
49142781e1SThomas Gleixner 	long ret = 0;
50142781e1SThomas Gleixner 
51*11894468SGabriel Krisman Bertazi 	/*
52*11894468SGabriel Krisman Bertazi 	 * Handle Syscall User Dispatch.  This must comes first, since
53*11894468SGabriel Krisman Bertazi 	 * the ABI here can be something that doesn't make sense for
54*11894468SGabriel Krisman Bertazi 	 * other syscall_work features.
55*11894468SGabriel Krisman Bertazi 	 */
56*11894468SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
57*11894468SGabriel Krisman Bertazi 		if (syscall_user_dispatch(regs))
58*11894468SGabriel Krisman Bertazi 			return -1L;
59*11894468SGabriel Krisman Bertazi 	}
60*11894468SGabriel Krisman Bertazi 
61142781e1SThomas Gleixner 	/* Handle ptrace */
6264eb35f7SGabriel Krisman Bertazi 	if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
63142781e1SThomas Gleixner 		ret = arch_syscall_enter_tracehook(regs);
6464eb35f7SGabriel Krisman Bertazi 		if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
65142781e1SThomas Gleixner 			return -1L;
66142781e1SThomas Gleixner 	}
67142781e1SThomas Gleixner 
68142781e1SThomas Gleixner 	/* Do seccomp after ptrace, to catch any tracer changes. */
6923d67a54SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SECCOMP) {
70142781e1SThomas Gleixner 		ret = __secure_computing(NULL);
71142781e1SThomas Gleixner 		if (ret == -1L)
72142781e1SThomas Gleixner 			return ret;
73142781e1SThomas Gleixner 	}
74142781e1SThomas Gleixner 
75b6ec4134SKees Cook 	/* Either of the above might have changed the syscall number */
76b6ec4134SKees Cook 	syscall = syscall_get_nr(current, regs);
77b6ec4134SKees Cook 
78524666cbSGabriel Krisman Bertazi 	if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
79142781e1SThomas Gleixner 		trace_sys_enter(regs, syscall);
80142781e1SThomas Gleixner 
81142781e1SThomas Gleixner 	syscall_enter_audit(regs, syscall);
82142781e1SThomas Gleixner 
83142781e1SThomas Gleixner 	return ret ? : syscall;
84142781e1SThomas Gleixner }
85142781e1SThomas Gleixner 
864facb95bSThomas Gleixner static __always_inline long
874facb95bSThomas Gleixner __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
88142781e1SThomas Gleixner {
89b86678cfSGabriel Krisman Bertazi 	unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
90142781e1SThomas Gleixner 
9129915524SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_ENTER)
9229915524SGabriel Krisman Bertazi 		syscall = syscall_trace_enter(regs, syscall, work);
93142781e1SThomas Gleixner 
94142781e1SThomas Gleixner 	return syscall;
95142781e1SThomas Gleixner }
96142781e1SThomas Gleixner 
974facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
984facb95bSThomas Gleixner {
994facb95bSThomas Gleixner 	return __syscall_enter_from_user_work(regs, syscall);
1004facb95bSThomas Gleixner }
1014facb95bSThomas Gleixner 
1024facb95bSThomas Gleixner noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
1034facb95bSThomas Gleixner {
1044facb95bSThomas Gleixner 	long ret;
1054facb95bSThomas Gleixner 
1064facb95bSThomas Gleixner 	enter_from_user_mode(regs);
1074facb95bSThomas Gleixner 
1084facb95bSThomas Gleixner 	instrumentation_begin();
1094facb95bSThomas Gleixner 	local_irq_enable();
1104facb95bSThomas Gleixner 	ret = __syscall_enter_from_user_work(regs, syscall);
1114facb95bSThomas Gleixner 	instrumentation_end();
1124facb95bSThomas Gleixner 
1134facb95bSThomas Gleixner 	return ret;
1144facb95bSThomas Gleixner }
1154facb95bSThomas Gleixner 
1164facb95bSThomas Gleixner noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
1174facb95bSThomas Gleixner {
1184facb95bSThomas Gleixner 	enter_from_user_mode(regs);
1194facb95bSThomas Gleixner 	instrumentation_begin();
1204facb95bSThomas Gleixner 	local_irq_enable();
1214facb95bSThomas Gleixner 	instrumentation_end();
1224facb95bSThomas Gleixner }
1234facb95bSThomas Gleixner 
124a9f3a74aSThomas Gleixner /**
125a9f3a74aSThomas Gleixner  * exit_to_user_mode - Fixup state when exiting to user mode
126a9f3a74aSThomas Gleixner  *
127a9f3a74aSThomas Gleixner  * Syscall/interupt exit enables interrupts, but the kernel state is
128a9f3a74aSThomas Gleixner  * interrupts disabled when this is invoked. Also tell RCU about it.
129a9f3a74aSThomas Gleixner  *
130a9f3a74aSThomas Gleixner  * 1) Trace interrupts on state
131a9f3a74aSThomas Gleixner  * 2) Invoke context tracking if enabled to adjust RCU state
132a9f3a74aSThomas Gleixner  * 3) Invoke architecture specific last minute exit code, e.g. speculation
133a9f3a74aSThomas Gleixner  *    mitigations, etc.
134a9f3a74aSThomas Gleixner  * 4) Tell lockdep that interrupts are enabled
135a9f3a74aSThomas Gleixner  */
136a9f3a74aSThomas Gleixner static __always_inline void exit_to_user_mode(void)
137a9f3a74aSThomas Gleixner {
138a9f3a74aSThomas Gleixner 	instrumentation_begin();
139a9f3a74aSThomas Gleixner 	trace_hardirqs_on_prepare();
140a9f3a74aSThomas Gleixner 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
141a9f3a74aSThomas Gleixner 	instrumentation_end();
142a9f3a74aSThomas Gleixner 
143a9f3a74aSThomas Gleixner 	user_enter_irqoff();
144a9f3a74aSThomas Gleixner 	arch_exit_to_user_mode();
145a9f3a74aSThomas Gleixner 	lockdep_hardirqs_on(CALLER_ADDR0);
146a9f3a74aSThomas Gleixner }
147a9f3a74aSThomas Gleixner 
148a9f3a74aSThomas Gleixner /* Workaround to allow gradual conversion of architecture code */
14912db8b69SJens Axboe void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { }
15012db8b69SJens Axboe 
15112db8b69SJens Axboe static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
15212db8b69SJens Axboe {
15312db8b69SJens Axboe 	if (ti_work & _TIF_NOTIFY_SIGNAL)
15412db8b69SJens Axboe 		tracehook_notify_signal();
15512db8b69SJens Axboe 
15612db8b69SJens Axboe 	arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
15712db8b69SJens Axboe }
158a9f3a74aSThomas Gleixner 
159a9f3a74aSThomas Gleixner static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
160a9f3a74aSThomas Gleixner 					    unsigned long ti_work)
161a9f3a74aSThomas Gleixner {
162a9f3a74aSThomas Gleixner 	/*
163a9f3a74aSThomas Gleixner 	 * Before returning to user space ensure that all pending work
164a9f3a74aSThomas Gleixner 	 * items have been completed.
165a9f3a74aSThomas Gleixner 	 */
166a9f3a74aSThomas Gleixner 	while (ti_work & EXIT_TO_USER_MODE_WORK) {
167a9f3a74aSThomas Gleixner 
168a9f3a74aSThomas Gleixner 		local_irq_enable_exit_to_user(ti_work);
169a9f3a74aSThomas Gleixner 
170a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_NEED_RESCHED)
171a9f3a74aSThomas Gleixner 			schedule();
172a9f3a74aSThomas Gleixner 
173a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_UPROBE)
174a9f3a74aSThomas Gleixner 			uprobe_notify_resume(regs);
175a9f3a74aSThomas Gleixner 
176a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_PATCH_PENDING)
177a9f3a74aSThomas Gleixner 			klp_update_patch_state(current);
178a9f3a74aSThomas Gleixner 
17912db8b69SJens Axboe 		if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
18012db8b69SJens Axboe 			handle_signal_work(regs, ti_work);
181a9f3a74aSThomas Gleixner 
182a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_NOTIFY_RESUME) {
183a9f3a74aSThomas Gleixner 			tracehook_notify_resume(regs);
184a9f3a74aSThomas Gleixner 			rseq_handle_notify_resume(NULL, regs);
185a9f3a74aSThomas Gleixner 		}
186a9f3a74aSThomas Gleixner 
187a9f3a74aSThomas Gleixner 		/* Architecture specific TIF work */
188a9f3a74aSThomas Gleixner 		arch_exit_to_user_mode_work(regs, ti_work);
189a9f3a74aSThomas Gleixner 
190a9f3a74aSThomas Gleixner 		/*
191a9f3a74aSThomas Gleixner 		 * Disable interrupts and reevaluate the work flags as they
192a9f3a74aSThomas Gleixner 		 * might have changed while interrupts and preemption was
193a9f3a74aSThomas Gleixner 		 * enabled above.
194a9f3a74aSThomas Gleixner 		 */
195a9f3a74aSThomas Gleixner 		local_irq_disable_exit_to_user();
196a9f3a74aSThomas Gleixner 		ti_work = READ_ONCE(current_thread_info()->flags);
197a9f3a74aSThomas Gleixner 	}
198a9f3a74aSThomas Gleixner 
199a9f3a74aSThomas Gleixner 	/* Return the latest work state for arch_exit_to_user_mode() */
200a9f3a74aSThomas Gleixner 	return ti_work;
201a9f3a74aSThomas Gleixner }
202a9f3a74aSThomas Gleixner 
203a9f3a74aSThomas Gleixner static void exit_to_user_mode_prepare(struct pt_regs *regs)
204a9f3a74aSThomas Gleixner {
205a9f3a74aSThomas Gleixner 	unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
206a9f3a74aSThomas Gleixner 
207a9f3a74aSThomas Gleixner 	lockdep_assert_irqs_disabled();
208a9f3a74aSThomas Gleixner 
209a9f3a74aSThomas Gleixner 	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
210a9f3a74aSThomas Gleixner 		ti_work = exit_to_user_mode_loop(regs, ti_work);
211a9f3a74aSThomas Gleixner 
212a9f3a74aSThomas Gleixner 	arch_exit_to_user_mode_prepare(regs, ti_work);
213a9f3a74aSThomas Gleixner 
214a9f3a74aSThomas Gleixner 	/* Ensure that the address limit is intact and no locks are held */
215a9f3a74aSThomas Gleixner 	addr_limit_user_check();
216a9f3a74aSThomas Gleixner 	lockdep_assert_irqs_disabled();
217a9f3a74aSThomas Gleixner 	lockdep_sys_exit();
218a9f3a74aSThomas Gleixner }
219a9f3a74aSThomas Gleixner 
220a9f3a74aSThomas Gleixner #ifndef _TIF_SINGLESTEP
22164eb35f7SGabriel Krisman Bertazi static inline bool report_single_step(unsigned long work)
222a9f3a74aSThomas Gleixner {
223a9f3a74aSThomas Gleixner 	return false;
224a9f3a74aSThomas Gleixner }
225a9f3a74aSThomas Gleixner #else
226a9f3a74aSThomas Gleixner /*
22764eb35f7SGabriel Krisman Bertazi  * If SYSCALL_EMU is set, then the only reason to report is when
228a9f3a74aSThomas Gleixner  * TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP).  This syscall
229900ffe39SKees Cook  * instruction has been already reported in syscall_enter_from_user_mode().
230a9f3a74aSThomas Gleixner  */
23164eb35f7SGabriel Krisman Bertazi static inline bool report_single_step(unsigned long work)
232a9f3a74aSThomas Gleixner {
23364eb35f7SGabriel Krisman Bertazi 	if (!(work & SYSCALL_WORK_SYSCALL_EMU))
23464eb35f7SGabriel Krisman Bertazi 		return false;
23564eb35f7SGabriel Krisman Bertazi 
23664eb35f7SGabriel Krisman Bertazi 	return !!(current_thread_info()->flags & _TIF_SINGLESTEP);
237a9f3a74aSThomas Gleixner }
238a9f3a74aSThomas Gleixner #endif
239a9f3a74aSThomas Gleixner 
24029915524SGabriel Krisman Bertazi 
24129915524SGabriel Krisman Bertazi static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
242a9f3a74aSThomas Gleixner {
243a9f3a74aSThomas Gleixner 	bool step;
244a9f3a74aSThomas Gleixner 
245*11894468SGabriel Krisman Bertazi 	/*
246*11894468SGabriel Krisman Bertazi 	 * If the syscall was rolled back due to syscall user dispatching,
247*11894468SGabriel Krisman Bertazi 	 * then the tracers below are not invoked for the same reason as
248*11894468SGabriel Krisman Bertazi 	 * the entry side was not invoked in syscall_trace_enter(): The ABI
249*11894468SGabriel Krisman Bertazi 	 * of these syscalls is unknown.
250*11894468SGabriel Krisman Bertazi 	 */
251*11894468SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
252*11894468SGabriel Krisman Bertazi 		if (unlikely(current->syscall_dispatch.on_dispatch)) {
253*11894468SGabriel Krisman Bertazi 			current->syscall_dispatch.on_dispatch = false;
254*11894468SGabriel Krisman Bertazi 			return;
255*11894468SGabriel Krisman Bertazi 		}
256*11894468SGabriel Krisman Bertazi 	}
257*11894468SGabriel Krisman Bertazi 
258a9f3a74aSThomas Gleixner 	audit_syscall_exit(regs);
259a9f3a74aSThomas Gleixner 
260524666cbSGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
261a9f3a74aSThomas Gleixner 		trace_sys_exit(regs, syscall_get_return_value(current, regs));
262a9f3a74aSThomas Gleixner 
26364eb35f7SGabriel Krisman Bertazi 	step = report_single_step(work);
26464c19ba2SGabriel Krisman Bertazi 	if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
265a9f3a74aSThomas Gleixner 		arch_syscall_exit_tracehook(regs, step);
266a9f3a74aSThomas Gleixner }
267a9f3a74aSThomas Gleixner 
268a9f3a74aSThomas Gleixner /*
269a9f3a74aSThomas Gleixner  * Syscall specific exit to user mode preparation. Runs with interrupts
270a9f3a74aSThomas Gleixner  * enabled.
271a9f3a74aSThomas Gleixner  */
272a9f3a74aSThomas Gleixner static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
273a9f3a74aSThomas Gleixner {
274b86678cfSGabriel Krisman Bertazi 	unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
275a9f3a74aSThomas Gleixner 	unsigned long nr = syscall_get_nr(current, regs);
276a9f3a74aSThomas Gleixner 
277a9f3a74aSThomas Gleixner 	CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
278a9f3a74aSThomas Gleixner 
279a9f3a74aSThomas Gleixner 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
280a9f3a74aSThomas Gleixner 		if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
281a9f3a74aSThomas Gleixner 			local_irq_enable();
282a9f3a74aSThomas Gleixner 	}
283a9f3a74aSThomas Gleixner 
284a9f3a74aSThomas Gleixner 	rseq_syscall(regs);
285a9f3a74aSThomas Gleixner 
286a9f3a74aSThomas Gleixner 	/*
287a9f3a74aSThomas Gleixner 	 * Do one-time syscall specific work. If these work items are
288a9f3a74aSThomas Gleixner 	 * enabled, we want to run them exactly once per syscall exit with
289a9f3a74aSThomas Gleixner 	 * interrupts enabled.
290a9f3a74aSThomas Gleixner 	 */
29129915524SGabriel Krisman Bertazi 	if (unlikely(work & SYSCALL_WORK_EXIT))
29229915524SGabriel Krisman Bertazi 		syscall_exit_work(regs, work);
293a9f3a74aSThomas Gleixner }
294a9f3a74aSThomas Gleixner 
295a9f3a74aSThomas Gleixner __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
296a9f3a74aSThomas Gleixner {
297a9f3a74aSThomas Gleixner 	instrumentation_begin();
298a9f3a74aSThomas Gleixner 	syscall_exit_to_user_mode_prepare(regs);
299a9f3a74aSThomas Gleixner 	local_irq_disable_exit_to_user();
300a9f3a74aSThomas Gleixner 	exit_to_user_mode_prepare(regs);
301a9f3a74aSThomas Gleixner 	instrumentation_end();
302a9f3a74aSThomas Gleixner 	exit_to_user_mode();
303a9f3a74aSThomas Gleixner }
304a9f3a74aSThomas Gleixner 
305142781e1SThomas Gleixner noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
306142781e1SThomas Gleixner {
307142781e1SThomas Gleixner 	enter_from_user_mode(regs);
308142781e1SThomas Gleixner }
309a9f3a74aSThomas Gleixner 
310a9f3a74aSThomas Gleixner noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
311a9f3a74aSThomas Gleixner {
312a9f3a74aSThomas Gleixner 	instrumentation_begin();
313a9f3a74aSThomas Gleixner 	exit_to_user_mode_prepare(regs);
314a9f3a74aSThomas Gleixner 	instrumentation_end();
315a9f3a74aSThomas Gleixner 	exit_to_user_mode();
316a9f3a74aSThomas Gleixner }
317a5497babSThomas Gleixner 
318aadfc2f9SIngo Molnar noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
319a5497babSThomas Gleixner {
320a5497babSThomas Gleixner 	irqentry_state_t ret = {
321a5497babSThomas Gleixner 		.exit_rcu = false,
322a5497babSThomas Gleixner 	};
323a5497babSThomas Gleixner 
324a5497babSThomas Gleixner 	if (user_mode(regs)) {
325a5497babSThomas Gleixner 		irqentry_enter_from_user_mode(regs);
326a5497babSThomas Gleixner 		return ret;
327a5497babSThomas Gleixner 	}
328a5497babSThomas Gleixner 
329a5497babSThomas Gleixner 	/*
330a5497babSThomas Gleixner 	 * If this entry hit the idle task invoke rcu_irq_enter() whether
331a5497babSThomas Gleixner 	 * RCU is watching or not.
332a5497babSThomas Gleixner 	 *
33378a56e04SIra Weiny 	 * Interrupts can nest when the first interrupt invokes softirq
334a5497babSThomas Gleixner 	 * processing on return which enables interrupts.
335a5497babSThomas Gleixner 	 *
336a5497babSThomas Gleixner 	 * Scheduler ticks in the idle task can mark quiescent state and
337a5497babSThomas Gleixner 	 * terminate a grace period, if and only if the timer interrupt is
338a5497babSThomas Gleixner 	 * not nested into another interrupt.
339a5497babSThomas Gleixner 	 *
3407f2a53c2SPaul E. McKenney 	 * Checking for rcu_is_watching() here would prevent the nesting
341a5497babSThomas Gleixner 	 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
342a5497babSThomas Gleixner 	 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
343a5497babSThomas Gleixner 	 * assume that it is the first interupt and eventually claim
34478a56e04SIra Weiny 	 * quiescent state and end grace periods prematurely.
345a5497babSThomas Gleixner 	 *
346a5497babSThomas Gleixner 	 * Unconditionally invoke rcu_irq_enter() so RCU state stays
347a5497babSThomas Gleixner 	 * consistent.
348a5497babSThomas Gleixner 	 *
349a5497babSThomas Gleixner 	 * TINY_RCU does not support EQS, so let the compiler eliminate
350a5497babSThomas Gleixner 	 * this part when enabled.
351a5497babSThomas Gleixner 	 */
352a5497babSThomas Gleixner 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
353a5497babSThomas Gleixner 		/*
354a5497babSThomas Gleixner 		 * If RCU is not watching then the same careful
355a5497babSThomas Gleixner 		 * sequence vs. lockdep and tracing is required
35645ff5105SIra Weiny 		 * as in irqentry_enter_from_user_mode().
357a5497babSThomas Gleixner 		 */
358a5497babSThomas Gleixner 		lockdep_hardirqs_off(CALLER_ADDR0);
359a5497babSThomas Gleixner 		rcu_irq_enter();
360a5497babSThomas Gleixner 		instrumentation_begin();
361a5497babSThomas Gleixner 		trace_hardirqs_off_finish();
362a5497babSThomas Gleixner 		instrumentation_end();
363a5497babSThomas Gleixner 
364a5497babSThomas Gleixner 		ret.exit_rcu = true;
365a5497babSThomas Gleixner 		return ret;
366a5497babSThomas Gleixner 	}
367a5497babSThomas Gleixner 
368a5497babSThomas Gleixner 	/*
369a5497babSThomas Gleixner 	 * If RCU is watching then RCU only wants to check whether it needs
370a5497babSThomas Gleixner 	 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
371a5497babSThomas Gleixner 	 * already contains a warning when RCU is not watching, so no point
372a5497babSThomas Gleixner 	 * in having another one here.
373a5497babSThomas Gleixner 	 */
3749d820f68SThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
375a5497babSThomas Gleixner 	instrumentation_begin();
376a5497babSThomas Gleixner 	rcu_irq_enter_check_tick();
3779d820f68SThomas Gleixner 	trace_hardirqs_off_finish();
378a5497babSThomas Gleixner 	instrumentation_end();
379a5497babSThomas Gleixner 
380a5497babSThomas Gleixner 	return ret;
381a5497babSThomas Gleixner }
382a5497babSThomas Gleixner 
383a5497babSThomas Gleixner void irqentry_exit_cond_resched(void)
384a5497babSThomas Gleixner {
385a5497babSThomas Gleixner 	if (!preempt_count()) {
386a5497babSThomas Gleixner 		/* Sanity check RCU and thread stack */
387a5497babSThomas Gleixner 		rcu_irq_exit_check_preempt();
388a5497babSThomas Gleixner 		if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
389a5497babSThomas Gleixner 			WARN_ON_ONCE(!on_thread_stack());
390a5497babSThomas Gleixner 		if (need_resched())
391a5497babSThomas Gleixner 			preempt_schedule_irq();
392a5497babSThomas Gleixner 	}
393a5497babSThomas Gleixner }
394a5497babSThomas Gleixner 
395aadfc2f9SIngo Molnar noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
396a5497babSThomas Gleixner {
397a5497babSThomas Gleixner 	lockdep_assert_irqs_disabled();
398a5497babSThomas Gleixner 
399a5497babSThomas Gleixner 	/* Check whether this returns to user mode */
400a5497babSThomas Gleixner 	if (user_mode(regs)) {
401a5497babSThomas Gleixner 		irqentry_exit_to_user_mode(regs);
402a5497babSThomas Gleixner 	} else if (!regs_irqs_disabled(regs)) {
403a5497babSThomas Gleixner 		/*
404a5497babSThomas Gleixner 		 * If RCU was not watching on entry this needs to be done
405a5497babSThomas Gleixner 		 * carefully and needs the same ordering of lockdep/tracing
406a5497babSThomas Gleixner 		 * and RCU as the return to user mode path.
407a5497babSThomas Gleixner 		 */
408a5497babSThomas Gleixner 		if (state.exit_rcu) {
409a5497babSThomas Gleixner 			instrumentation_begin();
410a5497babSThomas Gleixner 			/* Tell the tracer that IRET will enable interrupts */
411a5497babSThomas Gleixner 			trace_hardirqs_on_prepare();
412a5497babSThomas Gleixner 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
413a5497babSThomas Gleixner 			instrumentation_end();
414a5497babSThomas Gleixner 			rcu_irq_exit();
415a5497babSThomas Gleixner 			lockdep_hardirqs_on(CALLER_ADDR0);
416a5497babSThomas Gleixner 			return;
417a5497babSThomas Gleixner 		}
418a5497babSThomas Gleixner 
419a5497babSThomas Gleixner 		instrumentation_begin();
420a5497babSThomas Gleixner 		if (IS_ENABLED(CONFIG_PREEMPTION))
421a5497babSThomas Gleixner 			irqentry_exit_cond_resched();
422a5497babSThomas Gleixner 		/* Covers both tracing and lockdep */
423a5497babSThomas Gleixner 		trace_hardirqs_on();
424a5497babSThomas Gleixner 		instrumentation_end();
425a5497babSThomas Gleixner 	} else {
426a5497babSThomas Gleixner 		/*
427a5497babSThomas Gleixner 		 * IRQ flags state is correct already. Just tell RCU if it
428a5497babSThomas Gleixner 		 * was not watching on entry.
429a5497babSThomas Gleixner 		 */
430a5497babSThomas Gleixner 		if (state.exit_rcu)
431a5497babSThomas Gleixner 			rcu_irq_exit();
432a5497babSThomas Gleixner 	}
433a5497babSThomas Gleixner }
434b6be002bSThomas Gleixner 
435b6be002bSThomas Gleixner irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
436b6be002bSThomas Gleixner {
437b6be002bSThomas Gleixner 	irqentry_state_t irq_state;
438b6be002bSThomas Gleixner 
439b6be002bSThomas Gleixner 	irq_state.lockdep = lockdep_hardirqs_enabled();
440b6be002bSThomas Gleixner 
441b6be002bSThomas Gleixner 	__nmi_enter();
442b6be002bSThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
443b6be002bSThomas Gleixner 	lockdep_hardirq_enter();
444b6be002bSThomas Gleixner 	rcu_nmi_enter();
445b6be002bSThomas Gleixner 
446b6be002bSThomas Gleixner 	instrumentation_begin();
447b6be002bSThomas Gleixner 	trace_hardirqs_off_finish();
448b6be002bSThomas Gleixner 	ftrace_nmi_enter();
449b6be002bSThomas Gleixner 	instrumentation_end();
450b6be002bSThomas Gleixner 
451b6be002bSThomas Gleixner 	return irq_state;
452b6be002bSThomas Gleixner }
453b6be002bSThomas Gleixner 
454b6be002bSThomas Gleixner void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
455b6be002bSThomas Gleixner {
456b6be002bSThomas Gleixner 	instrumentation_begin();
457b6be002bSThomas Gleixner 	ftrace_nmi_exit();
458b6be002bSThomas Gleixner 	if (irq_state.lockdep) {
459b6be002bSThomas Gleixner 		trace_hardirqs_on_prepare();
460b6be002bSThomas Gleixner 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
461b6be002bSThomas Gleixner 	}
462b6be002bSThomas Gleixner 	instrumentation_end();
463b6be002bSThomas Gleixner 
464b6be002bSThomas Gleixner 	rcu_nmi_exit();
465b6be002bSThomas Gleixner 	lockdep_hardirq_exit();
466b6be002bSThomas Gleixner 	if (irq_state.lockdep)
467b6be002bSThomas Gleixner 		lockdep_hardirqs_on(CALLER_ADDR0);
468b6be002bSThomas Gleixner 	__nmi_exit();
469b6be002bSThomas Gleixner }
470