1142781e1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0 2142781e1SThomas Gleixner 3142781e1SThomas Gleixner #include <linux/context_tracking.h> 4142781e1SThomas Gleixner #include <linux/entry-common.h> 55fbda3ecSThomas Gleixner #include <linux/highmem.h> 6a9f3a74aSThomas Gleixner #include <linux/livepatch.h> 7a9f3a74aSThomas Gleixner #include <linux/audit.h> 8*f268c373SFrederic Weisbecker #include <linux/tick.h> 9142781e1SThomas Gleixner 1011894468SGabriel Krisman Bertazi #include "common.h" 1111894468SGabriel Krisman Bertazi 12142781e1SThomas Gleixner #define CREATE_TRACE_POINTS 13142781e1SThomas Gleixner #include <trace/events/syscalls.h> 14142781e1SThomas Gleixner 1596e2fbccSSven Schnelle /* See comment for enter_from_user_mode() in entry-common.h */ 166666bb71SSven Schnelle static __always_inline void __enter_from_user_mode(struct pt_regs *regs) 17142781e1SThomas Gleixner { 18142781e1SThomas Gleixner arch_check_user_regs(regs); 19142781e1SThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 20142781e1SThomas Gleixner 21142781e1SThomas Gleixner CT_WARN_ON(ct_state() != CONTEXT_USER); 22142781e1SThomas Gleixner user_exit_irqoff(); 23142781e1SThomas Gleixner 24142781e1SThomas Gleixner instrumentation_begin(); 25142781e1SThomas Gleixner trace_hardirqs_off_finish(); 26142781e1SThomas Gleixner instrumentation_end(); 27142781e1SThomas Gleixner } 28142781e1SThomas Gleixner 2996e2fbccSSven Schnelle void noinstr enter_from_user_mode(struct pt_regs *regs) 3096e2fbccSSven Schnelle { 3196e2fbccSSven Schnelle __enter_from_user_mode(regs); 3296e2fbccSSven Schnelle } 3396e2fbccSSven Schnelle 34142781e1SThomas Gleixner static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) 35142781e1SThomas Gleixner { 36142781e1SThomas Gleixner if (unlikely(audit_context())) { 37142781e1SThomas Gleixner unsigned long args[6]; 38142781e1SThomas Gleixner 39142781e1SThomas Gleixner syscall_get_arguments(current, regs, args); 40142781e1SThomas Gleixner audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]); 41142781e1SThomas Gleixner } 42142781e1SThomas Gleixner } 43142781e1SThomas Gleixner 44142781e1SThomas Gleixner static long syscall_trace_enter(struct pt_regs *regs, long syscall, 4529915524SGabriel Krisman Bertazi unsigned long work) 46142781e1SThomas Gleixner { 47142781e1SThomas Gleixner long ret = 0; 48142781e1SThomas Gleixner 4911894468SGabriel Krisman Bertazi /* 5011894468SGabriel Krisman Bertazi * Handle Syscall User Dispatch. This must comes first, since 5111894468SGabriel Krisman Bertazi * the ABI here can be something that doesn't make sense for 5211894468SGabriel Krisman Bertazi * other syscall_work features. 5311894468SGabriel Krisman Bertazi */ 5411894468SGabriel Krisman Bertazi if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { 5511894468SGabriel Krisman Bertazi if (syscall_user_dispatch(regs)) 5611894468SGabriel Krisman Bertazi return -1L; 5711894468SGabriel Krisman Bertazi } 5811894468SGabriel Krisman Bertazi 59142781e1SThomas Gleixner /* Handle ptrace */ 6064eb35f7SGabriel Krisman Bertazi if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) { 61142781e1SThomas Gleixner ret = arch_syscall_enter_tracehook(regs); 6264eb35f7SGabriel Krisman Bertazi if (ret || (work & SYSCALL_WORK_SYSCALL_EMU)) 63142781e1SThomas Gleixner return -1L; 64142781e1SThomas Gleixner } 65142781e1SThomas Gleixner 66142781e1SThomas Gleixner /* Do seccomp after ptrace, to catch any tracer changes. */ 6723d67a54SGabriel Krisman Bertazi if (work & SYSCALL_WORK_SECCOMP) { 68142781e1SThomas Gleixner ret = __secure_computing(NULL); 69142781e1SThomas Gleixner if (ret == -1L) 70142781e1SThomas Gleixner return ret; 71142781e1SThomas Gleixner } 72142781e1SThomas Gleixner 73b6ec4134SKees Cook /* Either of the above might have changed the syscall number */ 74b6ec4134SKees Cook syscall = syscall_get_nr(current, regs); 75b6ec4134SKees Cook 76524666cbSGabriel Krisman Bertazi if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) 77142781e1SThomas Gleixner trace_sys_enter(regs, syscall); 78142781e1SThomas Gleixner 79142781e1SThomas Gleixner syscall_enter_audit(regs, syscall); 80142781e1SThomas Gleixner 81142781e1SThomas Gleixner return ret ? : syscall; 82142781e1SThomas Gleixner } 83142781e1SThomas Gleixner 844facb95bSThomas Gleixner static __always_inline long 854facb95bSThomas Gleixner __syscall_enter_from_user_work(struct pt_regs *regs, long syscall) 86142781e1SThomas Gleixner { 87b86678cfSGabriel Krisman Bertazi unsigned long work = READ_ONCE(current_thread_info()->syscall_work); 88142781e1SThomas Gleixner 8929915524SGabriel Krisman Bertazi if (work & SYSCALL_WORK_ENTER) 9029915524SGabriel Krisman Bertazi syscall = syscall_trace_enter(regs, syscall, work); 91142781e1SThomas Gleixner 92142781e1SThomas Gleixner return syscall; 93142781e1SThomas Gleixner } 94142781e1SThomas Gleixner 954facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall) 964facb95bSThomas Gleixner { 974facb95bSThomas Gleixner return __syscall_enter_from_user_work(regs, syscall); 984facb95bSThomas Gleixner } 994facb95bSThomas Gleixner 1004facb95bSThomas Gleixner noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) 1014facb95bSThomas Gleixner { 1024facb95bSThomas Gleixner long ret; 1034facb95bSThomas Gleixner 1046666bb71SSven Schnelle __enter_from_user_mode(regs); 1054facb95bSThomas Gleixner 1064facb95bSThomas Gleixner instrumentation_begin(); 1074facb95bSThomas Gleixner local_irq_enable(); 1084facb95bSThomas Gleixner ret = __syscall_enter_from_user_work(regs, syscall); 1094facb95bSThomas Gleixner instrumentation_end(); 1104facb95bSThomas Gleixner 1114facb95bSThomas Gleixner return ret; 1124facb95bSThomas Gleixner } 1134facb95bSThomas Gleixner 1144facb95bSThomas Gleixner noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs) 1154facb95bSThomas Gleixner { 1166666bb71SSven Schnelle __enter_from_user_mode(regs); 1174facb95bSThomas Gleixner instrumentation_begin(); 1184facb95bSThomas Gleixner local_irq_enable(); 1194facb95bSThomas Gleixner instrumentation_end(); 1204facb95bSThomas Gleixner } 1214facb95bSThomas Gleixner 122310de1a6SSven Schnelle /* See comment for exit_to_user_mode() in entry-common.h */ 123bb793562SSven Schnelle static __always_inline void __exit_to_user_mode(void) 124a9f3a74aSThomas Gleixner { 125a9f3a74aSThomas Gleixner instrumentation_begin(); 126a9f3a74aSThomas Gleixner trace_hardirqs_on_prepare(); 127a9f3a74aSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 128a9f3a74aSThomas Gleixner instrumentation_end(); 129a9f3a74aSThomas Gleixner 130a9f3a74aSThomas Gleixner user_enter_irqoff(); 131a9f3a74aSThomas Gleixner arch_exit_to_user_mode(); 132a9f3a74aSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 133a9f3a74aSThomas Gleixner } 134a9f3a74aSThomas Gleixner 135310de1a6SSven Schnelle void noinstr exit_to_user_mode(void) 136310de1a6SSven Schnelle { 137310de1a6SSven Schnelle __exit_to_user_mode(); 138310de1a6SSven Schnelle } 139310de1a6SSven Schnelle 140a9f3a74aSThomas Gleixner /* Workaround to allow gradual conversion of architecture code */ 14112db8b69SJens Axboe void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { } 14212db8b69SJens Axboe 14312db8b69SJens Axboe static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work) 14412db8b69SJens Axboe { 14512db8b69SJens Axboe if (ti_work & _TIF_NOTIFY_SIGNAL) 14612db8b69SJens Axboe tracehook_notify_signal(); 14712db8b69SJens Axboe 14812db8b69SJens Axboe arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING); 14912db8b69SJens Axboe } 150a9f3a74aSThomas Gleixner 151a9f3a74aSThomas Gleixner static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, 152a9f3a74aSThomas Gleixner unsigned long ti_work) 153a9f3a74aSThomas Gleixner { 154a9f3a74aSThomas Gleixner /* 155a9f3a74aSThomas Gleixner * Before returning to user space ensure that all pending work 156a9f3a74aSThomas Gleixner * items have been completed. 157a9f3a74aSThomas Gleixner */ 158a9f3a74aSThomas Gleixner while (ti_work & EXIT_TO_USER_MODE_WORK) { 159a9f3a74aSThomas Gleixner 160a9f3a74aSThomas Gleixner local_irq_enable_exit_to_user(ti_work); 161a9f3a74aSThomas Gleixner 162a9f3a74aSThomas Gleixner if (ti_work & _TIF_NEED_RESCHED) 163a9f3a74aSThomas Gleixner schedule(); 164a9f3a74aSThomas Gleixner 165a9f3a74aSThomas Gleixner if (ti_work & _TIF_UPROBE) 166a9f3a74aSThomas Gleixner uprobe_notify_resume(regs); 167a9f3a74aSThomas Gleixner 168a9f3a74aSThomas Gleixner if (ti_work & _TIF_PATCH_PENDING) 169a9f3a74aSThomas Gleixner klp_update_patch_state(current); 170a9f3a74aSThomas Gleixner 17112db8b69SJens Axboe if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 17212db8b69SJens Axboe handle_signal_work(regs, ti_work); 173a9f3a74aSThomas Gleixner 174a9f3a74aSThomas Gleixner if (ti_work & _TIF_NOTIFY_RESUME) { 175a9f3a74aSThomas Gleixner tracehook_notify_resume(regs); 176a9f3a74aSThomas Gleixner rseq_handle_notify_resume(NULL, regs); 177a9f3a74aSThomas Gleixner } 178a9f3a74aSThomas Gleixner 179a9f3a74aSThomas Gleixner /* Architecture specific TIF work */ 180a9f3a74aSThomas Gleixner arch_exit_to_user_mode_work(regs, ti_work); 181a9f3a74aSThomas Gleixner 182a9f3a74aSThomas Gleixner /* 183a9f3a74aSThomas Gleixner * Disable interrupts and reevaluate the work flags as they 184a9f3a74aSThomas Gleixner * might have changed while interrupts and preemption was 185a9f3a74aSThomas Gleixner * enabled above. 186a9f3a74aSThomas Gleixner */ 187a9f3a74aSThomas Gleixner local_irq_disable_exit_to_user(); 18847b8ff19SFrederic Weisbecker 18947b8ff19SFrederic Weisbecker /* Check if any of the above work has queued a deferred wakeup */ 190*f268c373SFrederic Weisbecker tick_nohz_user_enter_prepare(); 19147b8ff19SFrederic Weisbecker 192a9f3a74aSThomas Gleixner ti_work = READ_ONCE(current_thread_info()->flags); 193a9f3a74aSThomas Gleixner } 194a9f3a74aSThomas Gleixner 195a9f3a74aSThomas Gleixner /* Return the latest work state for arch_exit_to_user_mode() */ 196a9f3a74aSThomas Gleixner return ti_work; 197a9f3a74aSThomas Gleixner } 198a9f3a74aSThomas Gleixner 199a9f3a74aSThomas Gleixner static void exit_to_user_mode_prepare(struct pt_regs *regs) 200a9f3a74aSThomas Gleixner { 201a9f3a74aSThomas Gleixner unsigned long ti_work = READ_ONCE(current_thread_info()->flags); 202a9f3a74aSThomas Gleixner 203a9f3a74aSThomas Gleixner lockdep_assert_irqs_disabled(); 204a9f3a74aSThomas Gleixner 20547b8ff19SFrederic Weisbecker /* Flush pending rcuog wakeup before the last need_resched() check */ 206*f268c373SFrederic Weisbecker tick_nohz_user_enter_prepare(); 20747b8ff19SFrederic Weisbecker 208a9f3a74aSThomas Gleixner if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) 209a9f3a74aSThomas Gleixner ti_work = exit_to_user_mode_loop(regs, ti_work); 210a9f3a74aSThomas Gleixner 211a9f3a74aSThomas Gleixner arch_exit_to_user_mode_prepare(regs, ti_work); 212a9f3a74aSThomas Gleixner 213a9f3a74aSThomas Gleixner /* Ensure that the address limit is intact and no locks are held */ 214a9f3a74aSThomas Gleixner addr_limit_user_check(); 2155fbda3ecSThomas Gleixner kmap_assert_nomap(); 216a9f3a74aSThomas Gleixner lockdep_assert_irqs_disabled(); 217a9f3a74aSThomas Gleixner lockdep_sys_exit(); 218a9f3a74aSThomas Gleixner } 219a9f3a74aSThomas Gleixner 220a9f3a74aSThomas Gleixner /* 22164eb35f7SGabriel Krisman Bertazi * If SYSCALL_EMU is set, then the only reason to report is when 2226342adcaSGabriel Krisman Bertazi * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall 223900ffe39SKees Cook * instruction has been already reported in syscall_enter_from_user_mode(). 224a9f3a74aSThomas Gleixner */ 22564eb35f7SGabriel Krisman Bertazi static inline bool report_single_step(unsigned long work) 226a9f3a74aSThomas Gleixner { 22741c1a06dSYuxuan Shui if (work & SYSCALL_WORK_SYSCALL_EMU) 22864eb35f7SGabriel Krisman Bertazi return false; 22964eb35f7SGabriel Krisman Bertazi 2306342adcaSGabriel Krisman Bertazi return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP; 231a9f3a74aSThomas Gleixner } 23229915524SGabriel Krisman Bertazi 23329915524SGabriel Krisman Bertazi static void syscall_exit_work(struct pt_regs *regs, unsigned long work) 234a9f3a74aSThomas Gleixner { 235a9f3a74aSThomas Gleixner bool step; 236a9f3a74aSThomas Gleixner 23711894468SGabriel Krisman Bertazi /* 23811894468SGabriel Krisman Bertazi * If the syscall was rolled back due to syscall user dispatching, 23911894468SGabriel Krisman Bertazi * then the tracers below are not invoked for the same reason as 24011894468SGabriel Krisman Bertazi * the entry side was not invoked in syscall_trace_enter(): The ABI 24111894468SGabriel Krisman Bertazi * of these syscalls is unknown. 24211894468SGabriel Krisman Bertazi */ 24311894468SGabriel Krisman Bertazi if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { 24411894468SGabriel Krisman Bertazi if (unlikely(current->syscall_dispatch.on_dispatch)) { 24511894468SGabriel Krisman Bertazi current->syscall_dispatch.on_dispatch = false; 24611894468SGabriel Krisman Bertazi return; 24711894468SGabriel Krisman Bertazi } 24811894468SGabriel Krisman Bertazi } 24911894468SGabriel Krisman Bertazi 250a9f3a74aSThomas Gleixner audit_syscall_exit(regs); 251a9f3a74aSThomas Gleixner 252524666cbSGabriel Krisman Bertazi if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT) 253a9f3a74aSThomas Gleixner trace_sys_exit(regs, syscall_get_return_value(current, regs)); 254a9f3a74aSThomas Gleixner 25564eb35f7SGabriel Krisman Bertazi step = report_single_step(work); 25664c19ba2SGabriel Krisman Bertazi if (step || work & SYSCALL_WORK_SYSCALL_TRACE) 257a9f3a74aSThomas Gleixner arch_syscall_exit_tracehook(regs, step); 258a9f3a74aSThomas Gleixner } 259a9f3a74aSThomas Gleixner 260a9f3a74aSThomas Gleixner /* 261a9f3a74aSThomas Gleixner * Syscall specific exit to user mode preparation. Runs with interrupts 262a9f3a74aSThomas Gleixner * enabled. 263a9f3a74aSThomas Gleixner */ 264a9f3a74aSThomas Gleixner static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) 265a9f3a74aSThomas Gleixner { 266b86678cfSGabriel Krisman Bertazi unsigned long work = READ_ONCE(current_thread_info()->syscall_work); 267a9f3a74aSThomas Gleixner unsigned long nr = syscall_get_nr(current, regs); 268a9f3a74aSThomas Gleixner 269a9f3a74aSThomas Gleixner CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 270a9f3a74aSThomas Gleixner 271a9f3a74aSThomas Gleixner if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 272a9f3a74aSThomas Gleixner if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) 273a9f3a74aSThomas Gleixner local_irq_enable(); 274a9f3a74aSThomas Gleixner } 275a9f3a74aSThomas Gleixner 276a9f3a74aSThomas Gleixner rseq_syscall(regs); 277a9f3a74aSThomas Gleixner 278a9f3a74aSThomas Gleixner /* 279a9f3a74aSThomas Gleixner * Do one-time syscall specific work. If these work items are 280a9f3a74aSThomas Gleixner * enabled, we want to run them exactly once per syscall exit with 281a9f3a74aSThomas Gleixner * interrupts enabled. 282a9f3a74aSThomas Gleixner */ 28329915524SGabriel Krisman Bertazi if (unlikely(work & SYSCALL_WORK_EXIT)) 28429915524SGabriel Krisman Bertazi syscall_exit_work(regs, work); 285a9f3a74aSThomas Gleixner } 286a9f3a74aSThomas Gleixner 287c6156e1dSSven Schnelle static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs) 288a9f3a74aSThomas Gleixner { 289a9f3a74aSThomas Gleixner syscall_exit_to_user_mode_prepare(regs); 290a9f3a74aSThomas Gleixner local_irq_disable_exit_to_user(); 291a9f3a74aSThomas Gleixner exit_to_user_mode_prepare(regs); 292c6156e1dSSven Schnelle } 293c6156e1dSSven Schnelle 294c6156e1dSSven Schnelle void syscall_exit_to_user_mode_work(struct pt_regs *regs) 295c6156e1dSSven Schnelle { 296c6156e1dSSven Schnelle __syscall_exit_to_user_mode_work(regs); 297c6156e1dSSven Schnelle } 298c6156e1dSSven Schnelle 299c6156e1dSSven Schnelle __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs) 300c6156e1dSSven Schnelle { 301c6156e1dSSven Schnelle instrumentation_begin(); 302c6156e1dSSven Schnelle __syscall_exit_to_user_mode_work(regs); 303a9f3a74aSThomas Gleixner instrumentation_end(); 304bb793562SSven Schnelle __exit_to_user_mode(); 305a9f3a74aSThomas Gleixner } 306a9f3a74aSThomas Gleixner 307142781e1SThomas Gleixner noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) 308142781e1SThomas Gleixner { 3096666bb71SSven Schnelle __enter_from_user_mode(regs); 310142781e1SThomas Gleixner } 311a9f3a74aSThomas Gleixner 312a9f3a74aSThomas Gleixner noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) 313a9f3a74aSThomas Gleixner { 314a9f3a74aSThomas Gleixner instrumentation_begin(); 315a9f3a74aSThomas Gleixner exit_to_user_mode_prepare(regs); 316a9f3a74aSThomas Gleixner instrumentation_end(); 317bb793562SSven Schnelle __exit_to_user_mode(); 318a9f3a74aSThomas Gleixner } 319a5497babSThomas Gleixner 320aadfc2f9SIngo Molnar noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) 321a5497babSThomas Gleixner { 322a5497babSThomas Gleixner irqentry_state_t ret = { 323a5497babSThomas Gleixner .exit_rcu = false, 324a5497babSThomas Gleixner }; 325a5497babSThomas Gleixner 326a5497babSThomas Gleixner if (user_mode(regs)) { 327a5497babSThomas Gleixner irqentry_enter_from_user_mode(regs); 328a5497babSThomas Gleixner return ret; 329a5497babSThomas Gleixner } 330a5497babSThomas Gleixner 331a5497babSThomas Gleixner /* 332a5497babSThomas Gleixner * If this entry hit the idle task invoke rcu_irq_enter() whether 333a5497babSThomas Gleixner * RCU is watching or not. 334a5497babSThomas Gleixner * 33578a56e04SIra Weiny * Interrupts can nest when the first interrupt invokes softirq 336a5497babSThomas Gleixner * processing on return which enables interrupts. 337a5497babSThomas Gleixner * 338a5497babSThomas Gleixner * Scheduler ticks in the idle task can mark quiescent state and 339a5497babSThomas Gleixner * terminate a grace period, if and only if the timer interrupt is 340a5497babSThomas Gleixner * not nested into another interrupt. 341a5497babSThomas Gleixner * 3427f2a53c2SPaul E. McKenney * Checking for rcu_is_watching() here would prevent the nesting 343a5497babSThomas Gleixner * interrupt to invoke rcu_irq_enter(). If that nested interrupt is 344a5497babSThomas Gleixner * the tick then rcu_flavor_sched_clock_irq() would wrongfully 34597258ce9SIngo Molnar * assume that it is the first interrupt and eventually claim 34678a56e04SIra Weiny * quiescent state and end grace periods prematurely. 347a5497babSThomas Gleixner * 348a5497babSThomas Gleixner * Unconditionally invoke rcu_irq_enter() so RCU state stays 349a5497babSThomas Gleixner * consistent. 350a5497babSThomas Gleixner * 351a5497babSThomas Gleixner * TINY_RCU does not support EQS, so let the compiler eliminate 352a5497babSThomas Gleixner * this part when enabled. 353a5497babSThomas Gleixner */ 354a5497babSThomas Gleixner if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { 355a5497babSThomas Gleixner /* 356a5497babSThomas Gleixner * If RCU is not watching then the same careful 357a5497babSThomas Gleixner * sequence vs. lockdep and tracing is required 35845ff5105SIra Weiny * as in irqentry_enter_from_user_mode(). 359a5497babSThomas Gleixner */ 360a5497babSThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 361a5497babSThomas Gleixner rcu_irq_enter(); 362a5497babSThomas Gleixner instrumentation_begin(); 363a5497babSThomas Gleixner trace_hardirqs_off_finish(); 364a5497babSThomas Gleixner instrumentation_end(); 365a5497babSThomas Gleixner 366a5497babSThomas Gleixner ret.exit_rcu = true; 367a5497babSThomas Gleixner return ret; 368a5497babSThomas Gleixner } 369a5497babSThomas Gleixner 370a5497babSThomas Gleixner /* 371a5497babSThomas Gleixner * If RCU is watching then RCU only wants to check whether it needs 372a5497babSThomas Gleixner * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick() 373a5497babSThomas Gleixner * already contains a warning when RCU is not watching, so no point 374a5497babSThomas Gleixner * in having another one here. 375a5497babSThomas Gleixner */ 3769d820f68SThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 377a5497babSThomas Gleixner instrumentation_begin(); 378a5497babSThomas Gleixner rcu_irq_enter_check_tick(); 3799d820f68SThomas Gleixner trace_hardirqs_off_finish(); 380a5497babSThomas Gleixner instrumentation_end(); 381a5497babSThomas Gleixner 382a5497babSThomas Gleixner return ret; 383a5497babSThomas Gleixner } 384a5497babSThomas Gleixner 385a5497babSThomas Gleixner void irqentry_exit_cond_resched(void) 386a5497babSThomas Gleixner { 387a5497babSThomas Gleixner if (!preempt_count()) { 388a5497babSThomas Gleixner /* Sanity check RCU and thread stack */ 389a5497babSThomas Gleixner rcu_irq_exit_check_preempt(); 390a5497babSThomas Gleixner if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 391a5497babSThomas Gleixner WARN_ON_ONCE(!on_thread_stack()); 392a5497babSThomas Gleixner if (need_resched()) 393a5497babSThomas Gleixner preempt_schedule_irq(); 394a5497babSThomas Gleixner } 395a5497babSThomas Gleixner } 39640607ee9SPeter Zijlstra (Intel) #ifdef CONFIG_PREEMPT_DYNAMIC 39740607ee9SPeter Zijlstra (Intel) DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched); 39840607ee9SPeter Zijlstra (Intel) #endif 399a5497babSThomas Gleixner 400aadfc2f9SIngo Molnar noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) 401a5497babSThomas Gleixner { 402a5497babSThomas Gleixner lockdep_assert_irqs_disabled(); 403a5497babSThomas Gleixner 404a5497babSThomas Gleixner /* Check whether this returns to user mode */ 405a5497babSThomas Gleixner if (user_mode(regs)) { 406a5497babSThomas Gleixner irqentry_exit_to_user_mode(regs); 407a5497babSThomas Gleixner } else if (!regs_irqs_disabled(regs)) { 408a5497babSThomas Gleixner /* 409a5497babSThomas Gleixner * If RCU was not watching on entry this needs to be done 410a5497babSThomas Gleixner * carefully and needs the same ordering of lockdep/tracing 411a5497babSThomas Gleixner * and RCU as the return to user mode path. 412a5497babSThomas Gleixner */ 413a5497babSThomas Gleixner if (state.exit_rcu) { 414a5497babSThomas Gleixner instrumentation_begin(); 415a5497babSThomas Gleixner /* Tell the tracer that IRET will enable interrupts */ 416a5497babSThomas Gleixner trace_hardirqs_on_prepare(); 417a5497babSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 418a5497babSThomas Gleixner instrumentation_end(); 419a5497babSThomas Gleixner rcu_irq_exit(); 420a5497babSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 421a5497babSThomas Gleixner return; 422a5497babSThomas Gleixner } 423a5497babSThomas Gleixner 424a5497babSThomas Gleixner instrumentation_begin(); 42540607ee9SPeter Zijlstra (Intel) if (IS_ENABLED(CONFIG_PREEMPTION)) { 4260c89d87dSZhouyi Zhou #ifdef CONFIG_PREEMPT_DYNAMIC 42740607ee9SPeter Zijlstra (Intel) static_call(irqentry_exit_cond_resched)(); 42840607ee9SPeter Zijlstra (Intel) #else 429a5497babSThomas Gleixner irqentry_exit_cond_resched(); 43040607ee9SPeter Zijlstra (Intel) #endif 43140607ee9SPeter Zijlstra (Intel) } 432a5497babSThomas Gleixner /* Covers both tracing and lockdep */ 433a5497babSThomas Gleixner trace_hardirqs_on(); 434a5497babSThomas Gleixner instrumentation_end(); 435a5497babSThomas Gleixner } else { 436a5497babSThomas Gleixner /* 437a5497babSThomas Gleixner * IRQ flags state is correct already. Just tell RCU if it 438a5497babSThomas Gleixner * was not watching on entry. 439a5497babSThomas Gleixner */ 440a5497babSThomas Gleixner if (state.exit_rcu) 441a5497babSThomas Gleixner rcu_irq_exit(); 442a5497babSThomas Gleixner } 443a5497babSThomas Gleixner } 444b6be002bSThomas Gleixner 445b6be002bSThomas Gleixner irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs) 446b6be002bSThomas Gleixner { 447b6be002bSThomas Gleixner irqentry_state_t irq_state; 448b6be002bSThomas Gleixner 449b6be002bSThomas Gleixner irq_state.lockdep = lockdep_hardirqs_enabled(); 450b6be002bSThomas Gleixner 451b6be002bSThomas Gleixner __nmi_enter(); 452b6be002bSThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 453b6be002bSThomas Gleixner lockdep_hardirq_enter(); 454b6be002bSThomas Gleixner rcu_nmi_enter(); 455b6be002bSThomas Gleixner 456b6be002bSThomas Gleixner instrumentation_begin(); 457b6be002bSThomas Gleixner trace_hardirqs_off_finish(); 458b6be002bSThomas Gleixner ftrace_nmi_enter(); 459b6be002bSThomas Gleixner instrumentation_end(); 460b6be002bSThomas Gleixner 461b6be002bSThomas Gleixner return irq_state; 462b6be002bSThomas Gleixner } 463b6be002bSThomas Gleixner 464b6be002bSThomas Gleixner void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state) 465b6be002bSThomas Gleixner { 466b6be002bSThomas Gleixner instrumentation_begin(); 467b6be002bSThomas Gleixner ftrace_nmi_exit(); 468b6be002bSThomas Gleixner if (irq_state.lockdep) { 469b6be002bSThomas Gleixner trace_hardirqs_on_prepare(); 470b6be002bSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 471b6be002bSThomas Gleixner } 472b6be002bSThomas Gleixner instrumentation_end(); 473b6be002bSThomas Gleixner 474b6be002bSThomas Gleixner rcu_nmi_exit(); 475b6be002bSThomas Gleixner lockdep_hardirq_exit(); 476b6be002bSThomas Gleixner if (irq_state.lockdep) 477b6be002bSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 478b6be002bSThomas Gleixner __nmi_exit(); 479b6be002bSThomas Gleixner } 480