xref: /openbmc/linux/kernel/entry/common.c (revision c6156e1d)
1142781e1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2142781e1SThomas Gleixner 
3142781e1SThomas Gleixner #include <linux/context_tracking.h>
4142781e1SThomas Gleixner #include <linux/entry-common.h>
5a9f3a74aSThomas Gleixner #include <linux/livepatch.h>
6a9f3a74aSThomas Gleixner #include <linux/audit.h>
7142781e1SThomas Gleixner 
811894468SGabriel Krisman Bertazi #include "common.h"
911894468SGabriel Krisman Bertazi 
10142781e1SThomas Gleixner #define CREATE_TRACE_POINTS
11142781e1SThomas Gleixner #include <trace/events/syscalls.h>
12142781e1SThomas Gleixner 
1396e2fbccSSven Schnelle /* See comment for enter_from_user_mode() in entry-common.h */
146666bb71SSven Schnelle static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
15142781e1SThomas Gleixner {
16142781e1SThomas Gleixner 	arch_check_user_regs(regs);
17142781e1SThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
18142781e1SThomas Gleixner 
19142781e1SThomas Gleixner 	CT_WARN_ON(ct_state() != CONTEXT_USER);
20142781e1SThomas Gleixner 	user_exit_irqoff();
21142781e1SThomas Gleixner 
22142781e1SThomas Gleixner 	instrumentation_begin();
23142781e1SThomas Gleixner 	trace_hardirqs_off_finish();
24142781e1SThomas Gleixner 	instrumentation_end();
25142781e1SThomas Gleixner }
26142781e1SThomas Gleixner 
2796e2fbccSSven Schnelle void noinstr enter_from_user_mode(struct pt_regs *regs)
2896e2fbccSSven Schnelle {
2996e2fbccSSven Schnelle 	__enter_from_user_mode(regs);
3096e2fbccSSven Schnelle }
3196e2fbccSSven Schnelle 
32142781e1SThomas Gleixner static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
33142781e1SThomas Gleixner {
34142781e1SThomas Gleixner 	if (unlikely(audit_context())) {
35142781e1SThomas Gleixner 		unsigned long args[6];
36142781e1SThomas Gleixner 
37142781e1SThomas Gleixner 		syscall_get_arguments(current, regs, args);
38142781e1SThomas Gleixner 		audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
39142781e1SThomas Gleixner 	}
40142781e1SThomas Gleixner }
41142781e1SThomas Gleixner 
42142781e1SThomas Gleixner static long syscall_trace_enter(struct pt_regs *regs, long syscall,
4329915524SGabriel Krisman Bertazi 				unsigned long work)
44142781e1SThomas Gleixner {
45142781e1SThomas Gleixner 	long ret = 0;
46142781e1SThomas Gleixner 
4711894468SGabriel Krisman Bertazi 	/*
4811894468SGabriel Krisman Bertazi 	 * Handle Syscall User Dispatch.  This must comes first, since
4911894468SGabriel Krisman Bertazi 	 * the ABI here can be something that doesn't make sense for
5011894468SGabriel Krisman Bertazi 	 * other syscall_work features.
5111894468SGabriel Krisman Bertazi 	 */
5211894468SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
5311894468SGabriel Krisman Bertazi 		if (syscall_user_dispatch(regs))
5411894468SGabriel Krisman Bertazi 			return -1L;
5511894468SGabriel Krisman Bertazi 	}
5611894468SGabriel Krisman Bertazi 
57142781e1SThomas Gleixner 	/* Handle ptrace */
5864eb35f7SGabriel Krisman Bertazi 	if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
59142781e1SThomas Gleixner 		ret = arch_syscall_enter_tracehook(regs);
6064eb35f7SGabriel Krisman Bertazi 		if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
61142781e1SThomas Gleixner 			return -1L;
62142781e1SThomas Gleixner 	}
63142781e1SThomas Gleixner 
64142781e1SThomas Gleixner 	/* Do seccomp after ptrace, to catch any tracer changes. */
6523d67a54SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SECCOMP) {
66142781e1SThomas Gleixner 		ret = __secure_computing(NULL);
67142781e1SThomas Gleixner 		if (ret == -1L)
68142781e1SThomas Gleixner 			return ret;
69142781e1SThomas Gleixner 	}
70142781e1SThomas Gleixner 
71b6ec4134SKees Cook 	/* Either of the above might have changed the syscall number */
72b6ec4134SKees Cook 	syscall = syscall_get_nr(current, regs);
73b6ec4134SKees Cook 
74524666cbSGabriel Krisman Bertazi 	if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
75142781e1SThomas Gleixner 		trace_sys_enter(regs, syscall);
76142781e1SThomas Gleixner 
77142781e1SThomas Gleixner 	syscall_enter_audit(regs, syscall);
78142781e1SThomas Gleixner 
79142781e1SThomas Gleixner 	return ret ? : syscall;
80142781e1SThomas Gleixner }
81142781e1SThomas Gleixner 
824facb95bSThomas Gleixner static __always_inline long
834facb95bSThomas Gleixner __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
84142781e1SThomas Gleixner {
85b86678cfSGabriel Krisman Bertazi 	unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
86142781e1SThomas Gleixner 
8729915524SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_ENTER)
8829915524SGabriel Krisman Bertazi 		syscall = syscall_trace_enter(regs, syscall, work);
89142781e1SThomas Gleixner 
90142781e1SThomas Gleixner 	return syscall;
91142781e1SThomas Gleixner }
92142781e1SThomas Gleixner 
934facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
944facb95bSThomas Gleixner {
954facb95bSThomas Gleixner 	return __syscall_enter_from_user_work(regs, syscall);
964facb95bSThomas Gleixner }
974facb95bSThomas Gleixner 
984facb95bSThomas Gleixner noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
994facb95bSThomas Gleixner {
1004facb95bSThomas Gleixner 	long ret;
1014facb95bSThomas Gleixner 
1026666bb71SSven Schnelle 	__enter_from_user_mode(regs);
1034facb95bSThomas Gleixner 
1044facb95bSThomas Gleixner 	instrumentation_begin();
1054facb95bSThomas Gleixner 	local_irq_enable();
1064facb95bSThomas Gleixner 	ret = __syscall_enter_from_user_work(regs, syscall);
1074facb95bSThomas Gleixner 	instrumentation_end();
1084facb95bSThomas Gleixner 
1094facb95bSThomas Gleixner 	return ret;
1104facb95bSThomas Gleixner }
1114facb95bSThomas Gleixner 
1124facb95bSThomas Gleixner noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
1134facb95bSThomas Gleixner {
1146666bb71SSven Schnelle 	__enter_from_user_mode(regs);
1154facb95bSThomas Gleixner 	instrumentation_begin();
1164facb95bSThomas Gleixner 	local_irq_enable();
1174facb95bSThomas Gleixner 	instrumentation_end();
1184facb95bSThomas Gleixner }
1194facb95bSThomas Gleixner 
120310de1a6SSven Schnelle /* See comment for exit_to_user_mode() in entry-common.h */
121bb793562SSven Schnelle static __always_inline void __exit_to_user_mode(void)
122a9f3a74aSThomas Gleixner {
123a9f3a74aSThomas Gleixner 	instrumentation_begin();
124a9f3a74aSThomas Gleixner 	trace_hardirqs_on_prepare();
125a9f3a74aSThomas Gleixner 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
126a9f3a74aSThomas Gleixner 	instrumentation_end();
127a9f3a74aSThomas Gleixner 
128a9f3a74aSThomas Gleixner 	user_enter_irqoff();
129a9f3a74aSThomas Gleixner 	arch_exit_to_user_mode();
130a9f3a74aSThomas Gleixner 	lockdep_hardirqs_on(CALLER_ADDR0);
131a9f3a74aSThomas Gleixner }
132a9f3a74aSThomas Gleixner 
133310de1a6SSven Schnelle void noinstr exit_to_user_mode(void)
134310de1a6SSven Schnelle {
135310de1a6SSven Schnelle 	__exit_to_user_mode();
136310de1a6SSven Schnelle }
137310de1a6SSven Schnelle 
138a9f3a74aSThomas Gleixner /* Workaround to allow gradual conversion of architecture code */
13912db8b69SJens Axboe void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { }
14012db8b69SJens Axboe 
14112db8b69SJens Axboe static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
14212db8b69SJens Axboe {
14312db8b69SJens Axboe 	if (ti_work & _TIF_NOTIFY_SIGNAL)
14412db8b69SJens Axboe 		tracehook_notify_signal();
14512db8b69SJens Axboe 
14612db8b69SJens Axboe 	arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
14712db8b69SJens Axboe }
148a9f3a74aSThomas Gleixner 
149a9f3a74aSThomas Gleixner static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
150a9f3a74aSThomas Gleixner 					    unsigned long ti_work)
151a9f3a74aSThomas Gleixner {
152a9f3a74aSThomas Gleixner 	/*
153a9f3a74aSThomas Gleixner 	 * Before returning to user space ensure that all pending work
154a9f3a74aSThomas Gleixner 	 * items have been completed.
155a9f3a74aSThomas Gleixner 	 */
156a9f3a74aSThomas Gleixner 	while (ti_work & EXIT_TO_USER_MODE_WORK) {
157a9f3a74aSThomas Gleixner 
158a9f3a74aSThomas Gleixner 		local_irq_enable_exit_to_user(ti_work);
159a9f3a74aSThomas Gleixner 
160a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_NEED_RESCHED)
161a9f3a74aSThomas Gleixner 			schedule();
162a9f3a74aSThomas Gleixner 
163a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_UPROBE)
164a9f3a74aSThomas Gleixner 			uprobe_notify_resume(regs);
165a9f3a74aSThomas Gleixner 
166a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_PATCH_PENDING)
167a9f3a74aSThomas Gleixner 			klp_update_patch_state(current);
168a9f3a74aSThomas Gleixner 
16912db8b69SJens Axboe 		if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
17012db8b69SJens Axboe 			handle_signal_work(regs, ti_work);
171a9f3a74aSThomas Gleixner 
172a9f3a74aSThomas Gleixner 		if (ti_work & _TIF_NOTIFY_RESUME) {
173a9f3a74aSThomas Gleixner 			tracehook_notify_resume(regs);
174a9f3a74aSThomas Gleixner 			rseq_handle_notify_resume(NULL, regs);
175a9f3a74aSThomas Gleixner 		}
176a9f3a74aSThomas Gleixner 
177a9f3a74aSThomas Gleixner 		/* Architecture specific TIF work */
178a9f3a74aSThomas Gleixner 		arch_exit_to_user_mode_work(regs, ti_work);
179a9f3a74aSThomas Gleixner 
180a9f3a74aSThomas Gleixner 		/*
181a9f3a74aSThomas Gleixner 		 * Disable interrupts and reevaluate the work flags as they
182a9f3a74aSThomas Gleixner 		 * might have changed while interrupts and preemption was
183a9f3a74aSThomas Gleixner 		 * enabled above.
184a9f3a74aSThomas Gleixner 		 */
185a9f3a74aSThomas Gleixner 		local_irq_disable_exit_to_user();
186a9f3a74aSThomas Gleixner 		ti_work = READ_ONCE(current_thread_info()->flags);
187a9f3a74aSThomas Gleixner 	}
188a9f3a74aSThomas Gleixner 
189a9f3a74aSThomas Gleixner 	/* Return the latest work state for arch_exit_to_user_mode() */
190a9f3a74aSThomas Gleixner 	return ti_work;
191a9f3a74aSThomas Gleixner }
192a9f3a74aSThomas Gleixner 
193a9f3a74aSThomas Gleixner static void exit_to_user_mode_prepare(struct pt_regs *regs)
194a9f3a74aSThomas Gleixner {
195a9f3a74aSThomas Gleixner 	unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
196a9f3a74aSThomas Gleixner 
197a9f3a74aSThomas Gleixner 	lockdep_assert_irqs_disabled();
198a9f3a74aSThomas Gleixner 
199a9f3a74aSThomas Gleixner 	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
200a9f3a74aSThomas Gleixner 		ti_work = exit_to_user_mode_loop(regs, ti_work);
201a9f3a74aSThomas Gleixner 
202a9f3a74aSThomas Gleixner 	arch_exit_to_user_mode_prepare(regs, ti_work);
203a9f3a74aSThomas Gleixner 
204a9f3a74aSThomas Gleixner 	/* Ensure that the address limit is intact and no locks are held */
205a9f3a74aSThomas Gleixner 	addr_limit_user_check();
206a9f3a74aSThomas Gleixner 	lockdep_assert_irqs_disabled();
207a9f3a74aSThomas Gleixner 	lockdep_sys_exit();
208a9f3a74aSThomas Gleixner }
209a9f3a74aSThomas Gleixner 
210a9f3a74aSThomas Gleixner #ifndef _TIF_SINGLESTEP
21164eb35f7SGabriel Krisman Bertazi static inline bool report_single_step(unsigned long work)
212a9f3a74aSThomas Gleixner {
213a9f3a74aSThomas Gleixner 	return false;
214a9f3a74aSThomas Gleixner }
215a9f3a74aSThomas Gleixner #else
216a9f3a74aSThomas Gleixner /*
21764eb35f7SGabriel Krisman Bertazi  * If SYSCALL_EMU is set, then the only reason to report is when
218a9f3a74aSThomas Gleixner  * TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP).  This syscall
219900ffe39SKees Cook  * instruction has been already reported in syscall_enter_from_user_mode().
220a9f3a74aSThomas Gleixner  */
22164eb35f7SGabriel Krisman Bertazi static inline bool report_single_step(unsigned long work)
222a9f3a74aSThomas Gleixner {
22364eb35f7SGabriel Krisman Bertazi 	if (!(work & SYSCALL_WORK_SYSCALL_EMU))
22464eb35f7SGabriel Krisman Bertazi 		return false;
22564eb35f7SGabriel Krisman Bertazi 
22664eb35f7SGabriel Krisman Bertazi 	return !!(current_thread_info()->flags & _TIF_SINGLESTEP);
227a9f3a74aSThomas Gleixner }
228a9f3a74aSThomas Gleixner #endif
229a9f3a74aSThomas Gleixner 
23029915524SGabriel Krisman Bertazi 
23129915524SGabriel Krisman Bertazi static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
232a9f3a74aSThomas Gleixner {
233a9f3a74aSThomas Gleixner 	bool step;
234a9f3a74aSThomas Gleixner 
23511894468SGabriel Krisman Bertazi 	/*
23611894468SGabriel Krisman Bertazi 	 * If the syscall was rolled back due to syscall user dispatching,
23711894468SGabriel Krisman Bertazi 	 * then the tracers below are not invoked for the same reason as
23811894468SGabriel Krisman Bertazi 	 * the entry side was not invoked in syscall_trace_enter(): The ABI
23911894468SGabriel Krisman Bertazi 	 * of these syscalls is unknown.
24011894468SGabriel Krisman Bertazi 	 */
24111894468SGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
24211894468SGabriel Krisman Bertazi 		if (unlikely(current->syscall_dispatch.on_dispatch)) {
24311894468SGabriel Krisman Bertazi 			current->syscall_dispatch.on_dispatch = false;
24411894468SGabriel Krisman Bertazi 			return;
24511894468SGabriel Krisman Bertazi 		}
24611894468SGabriel Krisman Bertazi 	}
24711894468SGabriel Krisman Bertazi 
248a9f3a74aSThomas Gleixner 	audit_syscall_exit(regs);
249a9f3a74aSThomas Gleixner 
250524666cbSGabriel Krisman Bertazi 	if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
251a9f3a74aSThomas Gleixner 		trace_sys_exit(regs, syscall_get_return_value(current, regs));
252a9f3a74aSThomas Gleixner 
25364eb35f7SGabriel Krisman Bertazi 	step = report_single_step(work);
25464c19ba2SGabriel Krisman Bertazi 	if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
255a9f3a74aSThomas Gleixner 		arch_syscall_exit_tracehook(regs, step);
256a9f3a74aSThomas Gleixner }
257a9f3a74aSThomas Gleixner 
258a9f3a74aSThomas Gleixner /*
259a9f3a74aSThomas Gleixner  * Syscall specific exit to user mode preparation. Runs with interrupts
260a9f3a74aSThomas Gleixner  * enabled.
261a9f3a74aSThomas Gleixner  */
262a9f3a74aSThomas Gleixner static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
263a9f3a74aSThomas Gleixner {
264b86678cfSGabriel Krisman Bertazi 	unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
265a9f3a74aSThomas Gleixner 	unsigned long nr = syscall_get_nr(current, regs);
266a9f3a74aSThomas Gleixner 
267a9f3a74aSThomas Gleixner 	CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
268a9f3a74aSThomas Gleixner 
269a9f3a74aSThomas Gleixner 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
270a9f3a74aSThomas Gleixner 		if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
271a9f3a74aSThomas Gleixner 			local_irq_enable();
272a9f3a74aSThomas Gleixner 	}
273a9f3a74aSThomas Gleixner 
274a9f3a74aSThomas Gleixner 	rseq_syscall(regs);
275a9f3a74aSThomas Gleixner 
276a9f3a74aSThomas Gleixner 	/*
277a9f3a74aSThomas Gleixner 	 * Do one-time syscall specific work. If these work items are
278a9f3a74aSThomas Gleixner 	 * enabled, we want to run them exactly once per syscall exit with
279a9f3a74aSThomas Gleixner 	 * interrupts enabled.
280a9f3a74aSThomas Gleixner 	 */
28129915524SGabriel Krisman Bertazi 	if (unlikely(work & SYSCALL_WORK_EXIT))
28229915524SGabriel Krisman Bertazi 		syscall_exit_work(regs, work);
283a9f3a74aSThomas Gleixner }
284a9f3a74aSThomas Gleixner 
285*c6156e1dSSven Schnelle static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
286a9f3a74aSThomas Gleixner {
287a9f3a74aSThomas Gleixner 	syscall_exit_to_user_mode_prepare(regs);
288a9f3a74aSThomas Gleixner 	local_irq_disable_exit_to_user();
289a9f3a74aSThomas Gleixner 	exit_to_user_mode_prepare(regs);
290*c6156e1dSSven Schnelle }
291*c6156e1dSSven Schnelle 
292*c6156e1dSSven Schnelle void syscall_exit_to_user_mode_work(struct pt_regs *regs)
293*c6156e1dSSven Schnelle {
294*c6156e1dSSven Schnelle 	__syscall_exit_to_user_mode_work(regs);
295*c6156e1dSSven Schnelle }
296*c6156e1dSSven Schnelle 
297*c6156e1dSSven Schnelle __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
298*c6156e1dSSven Schnelle {
299*c6156e1dSSven Schnelle 	instrumentation_begin();
300*c6156e1dSSven Schnelle 	__syscall_exit_to_user_mode_work(regs);
301a9f3a74aSThomas Gleixner 	instrumentation_end();
302bb793562SSven Schnelle 	__exit_to_user_mode();
303a9f3a74aSThomas Gleixner }
304a9f3a74aSThomas Gleixner 
305142781e1SThomas Gleixner noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
306142781e1SThomas Gleixner {
3076666bb71SSven Schnelle 	__enter_from_user_mode(regs);
308142781e1SThomas Gleixner }
309a9f3a74aSThomas Gleixner 
310a9f3a74aSThomas Gleixner noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
311a9f3a74aSThomas Gleixner {
312a9f3a74aSThomas Gleixner 	instrumentation_begin();
313a9f3a74aSThomas Gleixner 	exit_to_user_mode_prepare(regs);
314a9f3a74aSThomas Gleixner 	instrumentation_end();
315bb793562SSven Schnelle 	__exit_to_user_mode();
316a9f3a74aSThomas Gleixner }
317a5497babSThomas Gleixner 
318aadfc2f9SIngo Molnar noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
319a5497babSThomas Gleixner {
320a5497babSThomas Gleixner 	irqentry_state_t ret = {
321a5497babSThomas Gleixner 		.exit_rcu = false,
322a5497babSThomas Gleixner 	};
323a5497babSThomas Gleixner 
324a5497babSThomas Gleixner 	if (user_mode(regs)) {
325a5497babSThomas Gleixner 		irqentry_enter_from_user_mode(regs);
326a5497babSThomas Gleixner 		return ret;
327a5497babSThomas Gleixner 	}
328a5497babSThomas Gleixner 
329a5497babSThomas Gleixner 	/*
330a5497babSThomas Gleixner 	 * If this entry hit the idle task invoke rcu_irq_enter() whether
331a5497babSThomas Gleixner 	 * RCU is watching or not.
332a5497babSThomas Gleixner 	 *
33378a56e04SIra Weiny 	 * Interrupts can nest when the first interrupt invokes softirq
334a5497babSThomas Gleixner 	 * processing on return which enables interrupts.
335a5497babSThomas Gleixner 	 *
336a5497babSThomas Gleixner 	 * Scheduler ticks in the idle task can mark quiescent state and
337a5497babSThomas Gleixner 	 * terminate a grace period, if and only if the timer interrupt is
338a5497babSThomas Gleixner 	 * not nested into another interrupt.
339a5497babSThomas Gleixner 	 *
3407f2a53c2SPaul E. McKenney 	 * Checking for rcu_is_watching() here would prevent the nesting
341a5497babSThomas Gleixner 	 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
342a5497babSThomas Gleixner 	 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
343a5497babSThomas Gleixner 	 * assume that it is the first interupt and eventually claim
34478a56e04SIra Weiny 	 * quiescent state and end grace periods prematurely.
345a5497babSThomas Gleixner 	 *
346a5497babSThomas Gleixner 	 * Unconditionally invoke rcu_irq_enter() so RCU state stays
347a5497babSThomas Gleixner 	 * consistent.
348a5497babSThomas Gleixner 	 *
349a5497babSThomas Gleixner 	 * TINY_RCU does not support EQS, so let the compiler eliminate
350a5497babSThomas Gleixner 	 * this part when enabled.
351a5497babSThomas Gleixner 	 */
352a5497babSThomas Gleixner 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
353a5497babSThomas Gleixner 		/*
354a5497babSThomas Gleixner 		 * If RCU is not watching then the same careful
355a5497babSThomas Gleixner 		 * sequence vs. lockdep and tracing is required
35645ff5105SIra Weiny 		 * as in irqentry_enter_from_user_mode().
357a5497babSThomas Gleixner 		 */
358a5497babSThomas Gleixner 		lockdep_hardirqs_off(CALLER_ADDR0);
359a5497babSThomas Gleixner 		rcu_irq_enter();
360a5497babSThomas Gleixner 		instrumentation_begin();
361a5497babSThomas Gleixner 		trace_hardirqs_off_finish();
362a5497babSThomas Gleixner 		instrumentation_end();
363a5497babSThomas Gleixner 
364a5497babSThomas Gleixner 		ret.exit_rcu = true;
365a5497babSThomas Gleixner 		return ret;
366a5497babSThomas Gleixner 	}
367a5497babSThomas Gleixner 
368a5497babSThomas Gleixner 	/*
369a5497babSThomas Gleixner 	 * If RCU is watching then RCU only wants to check whether it needs
370a5497babSThomas Gleixner 	 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
371a5497babSThomas Gleixner 	 * already contains a warning when RCU is not watching, so no point
372a5497babSThomas Gleixner 	 * in having another one here.
373a5497babSThomas Gleixner 	 */
3749d820f68SThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
375a5497babSThomas Gleixner 	instrumentation_begin();
376a5497babSThomas Gleixner 	rcu_irq_enter_check_tick();
3779d820f68SThomas Gleixner 	trace_hardirqs_off_finish();
378a5497babSThomas Gleixner 	instrumentation_end();
379a5497babSThomas Gleixner 
380a5497babSThomas Gleixner 	return ret;
381a5497babSThomas Gleixner }
382a5497babSThomas Gleixner 
383a5497babSThomas Gleixner void irqentry_exit_cond_resched(void)
384a5497babSThomas Gleixner {
385a5497babSThomas Gleixner 	if (!preempt_count()) {
386a5497babSThomas Gleixner 		/* Sanity check RCU and thread stack */
387a5497babSThomas Gleixner 		rcu_irq_exit_check_preempt();
388a5497babSThomas Gleixner 		if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
389a5497babSThomas Gleixner 			WARN_ON_ONCE(!on_thread_stack());
390a5497babSThomas Gleixner 		if (need_resched())
391a5497babSThomas Gleixner 			preempt_schedule_irq();
392a5497babSThomas Gleixner 	}
393a5497babSThomas Gleixner }
394a5497babSThomas Gleixner 
395aadfc2f9SIngo Molnar noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
396a5497babSThomas Gleixner {
397a5497babSThomas Gleixner 	lockdep_assert_irqs_disabled();
398a5497babSThomas Gleixner 
399a5497babSThomas Gleixner 	/* Check whether this returns to user mode */
400a5497babSThomas Gleixner 	if (user_mode(regs)) {
401a5497babSThomas Gleixner 		irqentry_exit_to_user_mode(regs);
402a5497babSThomas Gleixner 	} else if (!regs_irqs_disabled(regs)) {
403a5497babSThomas Gleixner 		/*
404a5497babSThomas Gleixner 		 * If RCU was not watching on entry this needs to be done
405a5497babSThomas Gleixner 		 * carefully and needs the same ordering of lockdep/tracing
406a5497babSThomas Gleixner 		 * and RCU as the return to user mode path.
407a5497babSThomas Gleixner 		 */
408a5497babSThomas Gleixner 		if (state.exit_rcu) {
409a5497babSThomas Gleixner 			instrumentation_begin();
410a5497babSThomas Gleixner 			/* Tell the tracer that IRET will enable interrupts */
411a5497babSThomas Gleixner 			trace_hardirqs_on_prepare();
412a5497babSThomas Gleixner 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
413a5497babSThomas Gleixner 			instrumentation_end();
414a5497babSThomas Gleixner 			rcu_irq_exit();
415a5497babSThomas Gleixner 			lockdep_hardirqs_on(CALLER_ADDR0);
416a5497babSThomas Gleixner 			return;
417a5497babSThomas Gleixner 		}
418a5497babSThomas Gleixner 
419a5497babSThomas Gleixner 		instrumentation_begin();
420a5497babSThomas Gleixner 		if (IS_ENABLED(CONFIG_PREEMPTION))
421a5497babSThomas Gleixner 			irqentry_exit_cond_resched();
422a5497babSThomas Gleixner 		/* Covers both tracing and lockdep */
423a5497babSThomas Gleixner 		trace_hardirqs_on();
424a5497babSThomas Gleixner 		instrumentation_end();
425a5497babSThomas Gleixner 	} else {
426a5497babSThomas Gleixner 		/*
427a5497babSThomas Gleixner 		 * IRQ flags state is correct already. Just tell RCU if it
428a5497babSThomas Gleixner 		 * was not watching on entry.
429a5497babSThomas Gleixner 		 */
430a5497babSThomas Gleixner 		if (state.exit_rcu)
431a5497babSThomas Gleixner 			rcu_irq_exit();
432a5497babSThomas Gleixner 	}
433a5497babSThomas Gleixner }
434b6be002bSThomas Gleixner 
435b6be002bSThomas Gleixner irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
436b6be002bSThomas Gleixner {
437b6be002bSThomas Gleixner 	irqentry_state_t irq_state;
438b6be002bSThomas Gleixner 
439b6be002bSThomas Gleixner 	irq_state.lockdep = lockdep_hardirqs_enabled();
440b6be002bSThomas Gleixner 
441b6be002bSThomas Gleixner 	__nmi_enter();
442b6be002bSThomas Gleixner 	lockdep_hardirqs_off(CALLER_ADDR0);
443b6be002bSThomas Gleixner 	lockdep_hardirq_enter();
444b6be002bSThomas Gleixner 	rcu_nmi_enter();
445b6be002bSThomas Gleixner 
446b6be002bSThomas Gleixner 	instrumentation_begin();
447b6be002bSThomas Gleixner 	trace_hardirqs_off_finish();
448b6be002bSThomas Gleixner 	ftrace_nmi_enter();
449b6be002bSThomas Gleixner 	instrumentation_end();
450b6be002bSThomas Gleixner 
451b6be002bSThomas Gleixner 	return irq_state;
452b6be002bSThomas Gleixner }
453b6be002bSThomas Gleixner 
454b6be002bSThomas Gleixner void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
455b6be002bSThomas Gleixner {
456b6be002bSThomas Gleixner 	instrumentation_begin();
457b6be002bSThomas Gleixner 	ftrace_nmi_exit();
458b6be002bSThomas Gleixner 	if (irq_state.lockdep) {
459b6be002bSThomas Gleixner 		trace_hardirqs_on_prepare();
460b6be002bSThomas Gleixner 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
461b6be002bSThomas Gleixner 	}
462b6be002bSThomas Gleixner 	instrumentation_end();
463b6be002bSThomas Gleixner 
464b6be002bSThomas Gleixner 	rcu_nmi_exit();
465b6be002bSThomas Gleixner 	lockdep_hardirq_exit();
466b6be002bSThomas Gleixner 	if (irq_state.lockdep)
467b6be002bSThomas Gleixner 		lockdep_hardirqs_on(CALLER_ADDR0);
468b6be002bSThomas Gleixner 	__nmi_exit();
469b6be002bSThomas Gleixner }
470