1142781e1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0 2142781e1SThomas Gleixner 3142781e1SThomas Gleixner #include <linux/context_tracking.h> 4142781e1SThomas Gleixner #include <linux/entry-common.h> 55fbda3ecSThomas Gleixner #include <linux/highmem.h> 6*99cf983cSMark Rutland #include <linux/jump_label.h> 7a9f3a74aSThomas Gleixner #include <linux/livepatch.h> 8a9f3a74aSThomas Gleixner #include <linux/audit.h> 9f268c373SFrederic Weisbecker #include <linux/tick.h> 10142781e1SThomas Gleixner 1111894468SGabriel Krisman Bertazi #include "common.h" 1211894468SGabriel Krisman Bertazi 13142781e1SThomas Gleixner #define CREATE_TRACE_POINTS 14142781e1SThomas Gleixner #include <trace/events/syscalls.h> 15142781e1SThomas Gleixner 1696e2fbccSSven Schnelle /* See comment for enter_from_user_mode() in entry-common.h */ 176666bb71SSven Schnelle static __always_inline void __enter_from_user_mode(struct pt_regs *regs) 18142781e1SThomas Gleixner { 19142781e1SThomas Gleixner arch_check_user_regs(regs); 20142781e1SThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 21142781e1SThomas Gleixner 22142781e1SThomas Gleixner CT_WARN_ON(ct_state() != CONTEXT_USER); 23142781e1SThomas Gleixner user_exit_irqoff(); 24142781e1SThomas Gleixner 25142781e1SThomas Gleixner instrumentation_begin(); 26142781e1SThomas Gleixner trace_hardirqs_off_finish(); 27142781e1SThomas Gleixner instrumentation_end(); 28142781e1SThomas Gleixner } 29142781e1SThomas Gleixner 3096e2fbccSSven Schnelle void noinstr enter_from_user_mode(struct pt_regs *regs) 3196e2fbccSSven Schnelle { 3296e2fbccSSven Schnelle __enter_from_user_mode(regs); 3396e2fbccSSven Schnelle } 3496e2fbccSSven Schnelle 35142781e1SThomas Gleixner static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) 36142781e1SThomas Gleixner { 37142781e1SThomas Gleixner if (unlikely(audit_context())) { 38142781e1SThomas Gleixner unsigned long args[6]; 39142781e1SThomas Gleixner 40142781e1SThomas Gleixner syscall_get_arguments(current, regs, args); 41142781e1SThomas Gleixner audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]); 42142781e1SThomas Gleixner } 43142781e1SThomas Gleixner } 44142781e1SThomas Gleixner 45142781e1SThomas Gleixner static long syscall_trace_enter(struct pt_regs *regs, long syscall, 4629915524SGabriel Krisman Bertazi unsigned long work) 47142781e1SThomas Gleixner { 48142781e1SThomas Gleixner long ret = 0; 49142781e1SThomas Gleixner 5011894468SGabriel Krisman Bertazi /* 5111894468SGabriel Krisman Bertazi * Handle Syscall User Dispatch. This must comes first, since 5211894468SGabriel Krisman Bertazi * the ABI here can be something that doesn't make sense for 5311894468SGabriel Krisman Bertazi * other syscall_work features. 5411894468SGabriel Krisman Bertazi */ 5511894468SGabriel Krisman Bertazi if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { 5611894468SGabriel Krisman Bertazi if (syscall_user_dispatch(regs)) 5711894468SGabriel Krisman Bertazi return -1L; 5811894468SGabriel Krisman Bertazi } 5911894468SGabriel Krisman Bertazi 60142781e1SThomas Gleixner /* Handle ptrace */ 6164eb35f7SGabriel Krisman Bertazi if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) { 62142781e1SThomas Gleixner ret = arch_syscall_enter_tracehook(regs); 6364eb35f7SGabriel Krisman Bertazi if (ret || (work & SYSCALL_WORK_SYSCALL_EMU)) 64142781e1SThomas Gleixner return -1L; 65142781e1SThomas Gleixner } 66142781e1SThomas Gleixner 67142781e1SThomas Gleixner /* Do seccomp after ptrace, to catch any tracer changes. */ 6823d67a54SGabriel Krisman Bertazi if (work & SYSCALL_WORK_SECCOMP) { 69142781e1SThomas Gleixner ret = __secure_computing(NULL); 70142781e1SThomas Gleixner if (ret == -1L) 71142781e1SThomas Gleixner return ret; 72142781e1SThomas Gleixner } 73142781e1SThomas Gleixner 74b6ec4134SKees Cook /* Either of the above might have changed the syscall number */ 75b6ec4134SKees Cook syscall = syscall_get_nr(current, regs); 76b6ec4134SKees Cook 77524666cbSGabriel Krisman Bertazi if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) 78142781e1SThomas Gleixner trace_sys_enter(regs, syscall); 79142781e1SThomas Gleixner 80142781e1SThomas Gleixner syscall_enter_audit(regs, syscall); 81142781e1SThomas Gleixner 82142781e1SThomas Gleixner return ret ? : syscall; 83142781e1SThomas Gleixner } 84142781e1SThomas Gleixner 854facb95bSThomas Gleixner static __always_inline long 864facb95bSThomas Gleixner __syscall_enter_from_user_work(struct pt_regs *regs, long syscall) 87142781e1SThomas Gleixner { 88b86678cfSGabriel Krisman Bertazi unsigned long work = READ_ONCE(current_thread_info()->syscall_work); 89142781e1SThomas Gleixner 9029915524SGabriel Krisman Bertazi if (work & SYSCALL_WORK_ENTER) 9129915524SGabriel Krisman Bertazi syscall = syscall_trace_enter(regs, syscall, work); 92142781e1SThomas Gleixner 93142781e1SThomas Gleixner return syscall; 94142781e1SThomas Gleixner } 95142781e1SThomas Gleixner 964facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall) 974facb95bSThomas Gleixner { 984facb95bSThomas Gleixner return __syscall_enter_from_user_work(regs, syscall); 994facb95bSThomas Gleixner } 1004facb95bSThomas Gleixner 1014facb95bSThomas Gleixner noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) 1024facb95bSThomas Gleixner { 1034facb95bSThomas Gleixner long ret; 1044facb95bSThomas Gleixner 1056666bb71SSven Schnelle __enter_from_user_mode(regs); 1064facb95bSThomas Gleixner 1074facb95bSThomas Gleixner instrumentation_begin(); 1084facb95bSThomas Gleixner local_irq_enable(); 1094facb95bSThomas Gleixner ret = __syscall_enter_from_user_work(regs, syscall); 1104facb95bSThomas Gleixner instrumentation_end(); 1114facb95bSThomas Gleixner 1124facb95bSThomas Gleixner return ret; 1134facb95bSThomas Gleixner } 1144facb95bSThomas Gleixner 1154facb95bSThomas Gleixner noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs) 1164facb95bSThomas Gleixner { 1176666bb71SSven Schnelle __enter_from_user_mode(regs); 1184facb95bSThomas Gleixner instrumentation_begin(); 1194facb95bSThomas Gleixner local_irq_enable(); 1204facb95bSThomas Gleixner instrumentation_end(); 1214facb95bSThomas Gleixner } 1224facb95bSThomas Gleixner 123310de1a6SSven Schnelle /* See comment for exit_to_user_mode() in entry-common.h */ 124bb793562SSven Schnelle static __always_inline void __exit_to_user_mode(void) 125a9f3a74aSThomas Gleixner { 126a9f3a74aSThomas Gleixner instrumentation_begin(); 127a9f3a74aSThomas Gleixner trace_hardirqs_on_prepare(); 128a9f3a74aSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 129a9f3a74aSThomas Gleixner instrumentation_end(); 130a9f3a74aSThomas Gleixner 131a9f3a74aSThomas Gleixner user_enter_irqoff(); 132a9f3a74aSThomas Gleixner arch_exit_to_user_mode(); 133a9f3a74aSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 134a9f3a74aSThomas Gleixner } 135a9f3a74aSThomas Gleixner 136310de1a6SSven Schnelle void noinstr exit_to_user_mode(void) 137310de1a6SSven Schnelle { 138310de1a6SSven Schnelle __exit_to_user_mode(); 139310de1a6SSven Schnelle } 140310de1a6SSven Schnelle 141a9f3a74aSThomas Gleixner /* Workaround to allow gradual conversion of architecture code */ 14212db8b69SJens Axboe void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { } 14312db8b69SJens Axboe 14412db8b69SJens Axboe static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work) 14512db8b69SJens Axboe { 14612db8b69SJens Axboe if (ti_work & _TIF_NOTIFY_SIGNAL) 14712db8b69SJens Axboe tracehook_notify_signal(); 14812db8b69SJens Axboe 14912db8b69SJens Axboe arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING); 15012db8b69SJens Axboe } 151a9f3a74aSThomas Gleixner 152a9f3a74aSThomas Gleixner static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, 153a9f3a74aSThomas Gleixner unsigned long ti_work) 154a9f3a74aSThomas Gleixner { 155a9f3a74aSThomas Gleixner /* 156a9f3a74aSThomas Gleixner * Before returning to user space ensure that all pending work 157a9f3a74aSThomas Gleixner * items have been completed. 158a9f3a74aSThomas Gleixner */ 159a9f3a74aSThomas Gleixner while (ti_work & EXIT_TO_USER_MODE_WORK) { 160a9f3a74aSThomas Gleixner 161a9f3a74aSThomas Gleixner local_irq_enable_exit_to_user(ti_work); 162a9f3a74aSThomas Gleixner 163a9f3a74aSThomas Gleixner if (ti_work & _TIF_NEED_RESCHED) 164a9f3a74aSThomas Gleixner schedule(); 165a9f3a74aSThomas Gleixner 166a9f3a74aSThomas Gleixner if (ti_work & _TIF_UPROBE) 167a9f3a74aSThomas Gleixner uprobe_notify_resume(regs); 168a9f3a74aSThomas Gleixner 169a9f3a74aSThomas Gleixner if (ti_work & _TIF_PATCH_PENDING) 170a9f3a74aSThomas Gleixner klp_update_patch_state(current); 171a9f3a74aSThomas Gleixner 17212db8b69SJens Axboe if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 17312db8b69SJens Axboe handle_signal_work(regs, ti_work); 174a9f3a74aSThomas Gleixner 175a68de80fSSean Christopherson if (ti_work & _TIF_NOTIFY_RESUME) 176a9f3a74aSThomas Gleixner tracehook_notify_resume(regs); 177a9f3a74aSThomas Gleixner 178a9f3a74aSThomas Gleixner /* Architecture specific TIF work */ 179a9f3a74aSThomas Gleixner arch_exit_to_user_mode_work(regs, ti_work); 180a9f3a74aSThomas Gleixner 181a9f3a74aSThomas Gleixner /* 182a9f3a74aSThomas Gleixner * Disable interrupts and reevaluate the work flags as they 183a9f3a74aSThomas Gleixner * might have changed while interrupts and preemption was 184a9f3a74aSThomas Gleixner * enabled above. 185a9f3a74aSThomas Gleixner */ 186a9f3a74aSThomas Gleixner local_irq_disable_exit_to_user(); 18747b8ff19SFrederic Weisbecker 18847b8ff19SFrederic Weisbecker /* Check if any of the above work has queued a deferred wakeup */ 189f268c373SFrederic Weisbecker tick_nohz_user_enter_prepare(); 19047b8ff19SFrederic Weisbecker 1916ce89512SMark Rutland ti_work = read_thread_flags(); 192a9f3a74aSThomas Gleixner } 193a9f3a74aSThomas Gleixner 194a9f3a74aSThomas Gleixner /* Return the latest work state for arch_exit_to_user_mode() */ 195a9f3a74aSThomas Gleixner return ti_work; 196a9f3a74aSThomas Gleixner } 197a9f3a74aSThomas Gleixner 198a9f3a74aSThomas Gleixner static void exit_to_user_mode_prepare(struct pt_regs *regs) 199a9f3a74aSThomas Gleixner { 2006ce89512SMark Rutland unsigned long ti_work = read_thread_flags(); 201a9f3a74aSThomas Gleixner 202a9f3a74aSThomas Gleixner lockdep_assert_irqs_disabled(); 203a9f3a74aSThomas Gleixner 20447b8ff19SFrederic Weisbecker /* Flush pending rcuog wakeup before the last need_resched() check */ 205f268c373SFrederic Weisbecker tick_nohz_user_enter_prepare(); 20647b8ff19SFrederic Weisbecker 207a9f3a74aSThomas Gleixner if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) 208a9f3a74aSThomas Gleixner ti_work = exit_to_user_mode_loop(regs, ti_work); 209a9f3a74aSThomas Gleixner 210a9f3a74aSThomas Gleixner arch_exit_to_user_mode_prepare(regs, ti_work); 211a9f3a74aSThomas Gleixner 212a9f3a74aSThomas Gleixner /* Ensure that the address limit is intact and no locks are held */ 213a9f3a74aSThomas Gleixner addr_limit_user_check(); 2145fbda3ecSThomas Gleixner kmap_assert_nomap(); 215a9f3a74aSThomas Gleixner lockdep_assert_irqs_disabled(); 216a9f3a74aSThomas Gleixner lockdep_sys_exit(); 217a9f3a74aSThomas Gleixner } 218a9f3a74aSThomas Gleixner 219a9f3a74aSThomas Gleixner /* 22064eb35f7SGabriel Krisman Bertazi * If SYSCALL_EMU is set, then the only reason to report is when 2216342adcaSGabriel Krisman Bertazi * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall 222900ffe39SKees Cook * instruction has been already reported in syscall_enter_from_user_mode(). 223a9f3a74aSThomas Gleixner */ 22464eb35f7SGabriel Krisman Bertazi static inline bool report_single_step(unsigned long work) 225a9f3a74aSThomas Gleixner { 22641c1a06dSYuxuan Shui if (work & SYSCALL_WORK_SYSCALL_EMU) 22764eb35f7SGabriel Krisman Bertazi return false; 22864eb35f7SGabriel Krisman Bertazi 2296342adcaSGabriel Krisman Bertazi return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP; 230a9f3a74aSThomas Gleixner } 23129915524SGabriel Krisman Bertazi 23229915524SGabriel Krisman Bertazi static void syscall_exit_work(struct pt_regs *regs, unsigned long work) 233a9f3a74aSThomas Gleixner { 234a9f3a74aSThomas Gleixner bool step; 235a9f3a74aSThomas Gleixner 23611894468SGabriel Krisman Bertazi /* 23711894468SGabriel Krisman Bertazi * If the syscall was rolled back due to syscall user dispatching, 23811894468SGabriel Krisman Bertazi * then the tracers below are not invoked for the same reason as 23911894468SGabriel Krisman Bertazi * the entry side was not invoked in syscall_trace_enter(): The ABI 24011894468SGabriel Krisman Bertazi * of these syscalls is unknown. 24111894468SGabriel Krisman Bertazi */ 24211894468SGabriel Krisman Bertazi if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { 24311894468SGabriel Krisman Bertazi if (unlikely(current->syscall_dispatch.on_dispatch)) { 24411894468SGabriel Krisman Bertazi current->syscall_dispatch.on_dispatch = false; 24511894468SGabriel Krisman Bertazi return; 24611894468SGabriel Krisman Bertazi } 24711894468SGabriel Krisman Bertazi } 24811894468SGabriel Krisman Bertazi 249a9f3a74aSThomas Gleixner audit_syscall_exit(regs); 250a9f3a74aSThomas Gleixner 251524666cbSGabriel Krisman Bertazi if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT) 252a9f3a74aSThomas Gleixner trace_sys_exit(regs, syscall_get_return_value(current, regs)); 253a9f3a74aSThomas Gleixner 25464eb35f7SGabriel Krisman Bertazi step = report_single_step(work); 25564c19ba2SGabriel Krisman Bertazi if (step || work & SYSCALL_WORK_SYSCALL_TRACE) 256a9f3a74aSThomas Gleixner arch_syscall_exit_tracehook(regs, step); 257a9f3a74aSThomas Gleixner } 258a9f3a74aSThomas Gleixner 259a9f3a74aSThomas Gleixner /* 260a9f3a74aSThomas Gleixner * Syscall specific exit to user mode preparation. Runs with interrupts 261a9f3a74aSThomas Gleixner * enabled. 262a9f3a74aSThomas Gleixner */ 263a9f3a74aSThomas Gleixner static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) 264a9f3a74aSThomas Gleixner { 265b86678cfSGabriel Krisman Bertazi unsigned long work = READ_ONCE(current_thread_info()->syscall_work); 266a9f3a74aSThomas Gleixner unsigned long nr = syscall_get_nr(current, regs); 267a9f3a74aSThomas Gleixner 268a9f3a74aSThomas Gleixner CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 269a9f3a74aSThomas Gleixner 270a9f3a74aSThomas Gleixner if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 271a9f3a74aSThomas Gleixner if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) 272a9f3a74aSThomas Gleixner local_irq_enable(); 273a9f3a74aSThomas Gleixner } 274a9f3a74aSThomas Gleixner 275a9f3a74aSThomas Gleixner rseq_syscall(regs); 276a9f3a74aSThomas Gleixner 277a9f3a74aSThomas Gleixner /* 278a9f3a74aSThomas Gleixner * Do one-time syscall specific work. If these work items are 279a9f3a74aSThomas Gleixner * enabled, we want to run them exactly once per syscall exit with 280a9f3a74aSThomas Gleixner * interrupts enabled. 281a9f3a74aSThomas Gleixner */ 28229915524SGabriel Krisman Bertazi if (unlikely(work & SYSCALL_WORK_EXIT)) 28329915524SGabriel Krisman Bertazi syscall_exit_work(regs, work); 284a9f3a74aSThomas Gleixner } 285a9f3a74aSThomas Gleixner 286c6156e1dSSven Schnelle static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs) 287a9f3a74aSThomas Gleixner { 288a9f3a74aSThomas Gleixner syscall_exit_to_user_mode_prepare(regs); 289a9f3a74aSThomas Gleixner local_irq_disable_exit_to_user(); 290a9f3a74aSThomas Gleixner exit_to_user_mode_prepare(regs); 291c6156e1dSSven Schnelle } 292c6156e1dSSven Schnelle 293c6156e1dSSven Schnelle void syscall_exit_to_user_mode_work(struct pt_regs *regs) 294c6156e1dSSven Schnelle { 295c6156e1dSSven Schnelle __syscall_exit_to_user_mode_work(regs); 296c6156e1dSSven Schnelle } 297c6156e1dSSven Schnelle 298c6156e1dSSven Schnelle __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs) 299c6156e1dSSven Schnelle { 300c6156e1dSSven Schnelle instrumentation_begin(); 301c6156e1dSSven Schnelle __syscall_exit_to_user_mode_work(regs); 302a9f3a74aSThomas Gleixner instrumentation_end(); 303bb793562SSven Schnelle __exit_to_user_mode(); 304a9f3a74aSThomas Gleixner } 305a9f3a74aSThomas Gleixner 306142781e1SThomas Gleixner noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) 307142781e1SThomas Gleixner { 3086666bb71SSven Schnelle __enter_from_user_mode(regs); 309142781e1SThomas Gleixner } 310a9f3a74aSThomas Gleixner 311a9f3a74aSThomas Gleixner noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) 312a9f3a74aSThomas Gleixner { 313a9f3a74aSThomas Gleixner instrumentation_begin(); 314a9f3a74aSThomas Gleixner exit_to_user_mode_prepare(regs); 315a9f3a74aSThomas Gleixner instrumentation_end(); 316bb793562SSven Schnelle __exit_to_user_mode(); 317a9f3a74aSThomas Gleixner } 318a5497babSThomas Gleixner 319aadfc2f9SIngo Molnar noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) 320a5497babSThomas Gleixner { 321a5497babSThomas Gleixner irqentry_state_t ret = { 322a5497babSThomas Gleixner .exit_rcu = false, 323a5497babSThomas Gleixner }; 324a5497babSThomas Gleixner 325a5497babSThomas Gleixner if (user_mode(regs)) { 326a5497babSThomas Gleixner irqentry_enter_from_user_mode(regs); 327a5497babSThomas Gleixner return ret; 328a5497babSThomas Gleixner } 329a5497babSThomas Gleixner 330a5497babSThomas Gleixner /* 331a5497babSThomas Gleixner * If this entry hit the idle task invoke rcu_irq_enter() whether 332a5497babSThomas Gleixner * RCU is watching or not. 333a5497babSThomas Gleixner * 33478a56e04SIra Weiny * Interrupts can nest when the first interrupt invokes softirq 335a5497babSThomas Gleixner * processing on return which enables interrupts. 336a5497babSThomas Gleixner * 337a5497babSThomas Gleixner * Scheduler ticks in the idle task can mark quiescent state and 338a5497babSThomas Gleixner * terminate a grace period, if and only if the timer interrupt is 339a5497babSThomas Gleixner * not nested into another interrupt. 340a5497babSThomas Gleixner * 3417f2a53c2SPaul E. McKenney * Checking for rcu_is_watching() here would prevent the nesting 342a5497babSThomas Gleixner * interrupt to invoke rcu_irq_enter(). If that nested interrupt is 343a5497babSThomas Gleixner * the tick then rcu_flavor_sched_clock_irq() would wrongfully 34497258ce9SIngo Molnar * assume that it is the first interrupt and eventually claim 34578a56e04SIra Weiny * quiescent state and end grace periods prematurely. 346a5497babSThomas Gleixner * 347a5497babSThomas Gleixner * Unconditionally invoke rcu_irq_enter() so RCU state stays 348a5497babSThomas Gleixner * consistent. 349a5497babSThomas Gleixner * 350a5497babSThomas Gleixner * TINY_RCU does not support EQS, so let the compiler eliminate 351a5497babSThomas Gleixner * this part when enabled. 352a5497babSThomas Gleixner */ 353a5497babSThomas Gleixner if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { 354a5497babSThomas Gleixner /* 355a5497babSThomas Gleixner * If RCU is not watching then the same careful 356a5497babSThomas Gleixner * sequence vs. lockdep and tracing is required 35745ff5105SIra Weiny * as in irqentry_enter_from_user_mode(). 358a5497babSThomas Gleixner */ 359a5497babSThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 360a5497babSThomas Gleixner rcu_irq_enter(); 361a5497babSThomas Gleixner instrumentation_begin(); 362a5497babSThomas Gleixner trace_hardirqs_off_finish(); 363a5497babSThomas Gleixner instrumentation_end(); 364a5497babSThomas Gleixner 365a5497babSThomas Gleixner ret.exit_rcu = true; 366a5497babSThomas Gleixner return ret; 367a5497babSThomas Gleixner } 368a5497babSThomas Gleixner 369a5497babSThomas Gleixner /* 370a5497babSThomas Gleixner * If RCU is watching then RCU only wants to check whether it needs 371a5497babSThomas Gleixner * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick() 372a5497babSThomas Gleixner * already contains a warning when RCU is not watching, so no point 373a5497babSThomas Gleixner * in having another one here. 374a5497babSThomas Gleixner */ 3759d820f68SThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 376a5497babSThomas Gleixner instrumentation_begin(); 377a5497babSThomas Gleixner rcu_irq_enter_check_tick(); 3789d820f68SThomas Gleixner trace_hardirqs_off_finish(); 379a5497babSThomas Gleixner instrumentation_end(); 380a5497babSThomas Gleixner 381a5497babSThomas Gleixner return ret; 382a5497babSThomas Gleixner } 383a5497babSThomas Gleixner 3844624a14fSMark Rutland void raw_irqentry_exit_cond_resched(void) 385a5497babSThomas Gleixner { 386a5497babSThomas Gleixner if (!preempt_count()) { 387a5497babSThomas Gleixner /* Sanity check RCU and thread stack */ 388a5497babSThomas Gleixner rcu_irq_exit_check_preempt(); 389a5497babSThomas Gleixner if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 390a5497babSThomas Gleixner WARN_ON_ONCE(!on_thread_stack()); 391a5497babSThomas Gleixner if (need_resched()) 392a5497babSThomas Gleixner preempt_schedule_irq(); 393a5497babSThomas Gleixner } 394a5497babSThomas Gleixner } 39540607ee9SPeter Zijlstra (Intel) #ifdef CONFIG_PREEMPT_DYNAMIC 396*99cf983cSMark Rutland #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 3974624a14fSMark Rutland DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); 398*99cf983cSMark Rutland #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 399*99cf983cSMark Rutland DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); 400*99cf983cSMark Rutland void dynamic_irqentry_exit_cond_resched(void) 401*99cf983cSMark Rutland { 402*99cf983cSMark Rutland if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) 403*99cf983cSMark Rutland return; 404*99cf983cSMark Rutland raw_irqentry_exit_cond_resched(); 405*99cf983cSMark Rutland } 406*99cf983cSMark Rutland #endif 40740607ee9SPeter Zijlstra (Intel) #endif 408a5497babSThomas Gleixner 409aadfc2f9SIngo Molnar noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) 410a5497babSThomas Gleixner { 411a5497babSThomas Gleixner lockdep_assert_irqs_disabled(); 412a5497babSThomas Gleixner 413a5497babSThomas Gleixner /* Check whether this returns to user mode */ 414a5497babSThomas Gleixner if (user_mode(regs)) { 415a5497babSThomas Gleixner irqentry_exit_to_user_mode(regs); 416a5497babSThomas Gleixner } else if (!regs_irqs_disabled(regs)) { 417a5497babSThomas Gleixner /* 418a5497babSThomas Gleixner * If RCU was not watching on entry this needs to be done 419a5497babSThomas Gleixner * carefully and needs the same ordering of lockdep/tracing 420a5497babSThomas Gleixner * and RCU as the return to user mode path. 421a5497babSThomas Gleixner */ 422a5497babSThomas Gleixner if (state.exit_rcu) { 423a5497babSThomas Gleixner instrumentation_begin(); 424a5497babSThomas Gleixner /* Tell the tracer that IRET will enable interrupts */ 425a5497babSThomas Gleixner trace_hardirqs_on_prepare(); 426a5497babSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 427a5497babSThomas Gleixner instrumentation_end(); 428a5497babSThomas Gleixner rcu_irq_exit(); 429a5497babSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 430a5497babSThomas Gleixner return; 431a5497babSThomas Gleixner } 432a5497babSThomas Gleixner 433a5497babSThomas Gleixner instrumentation_begin(); 4344624a14fSMark Rutland if (IS_ENABLED(CONFIG_PREEMPTION)) 435a5497babSThomas Gleixner irqentry_exit_cond_resched(); 4364624a14fSMark Rutland 437a5497babSThomas Gleixner /* Covers both tracing and lockdep */ 438a5497babSThomas Gleixner trace_hardirqs_on(); 439a5497babSThomas Gleixner instrumentation_end(); 440a5497babSThomas Gleixner } else { 441a5497babSThomas Gleixner /* 442a5497babSThomas Gleixner * IRQ flags state is correct already. Just tell RCU if it 443a5497babSThomas Gleixner * was not watching on entry. 444a5497babSThomas Gleixner */ 445a5497babSThomas Gleixner if (state.exit_rcu) 446a5497babSThomas Gleixner rcu_irq_exit(); 447a5497babSThomas Gleixner } 448a5497babSThomas Gleixner } 449b6be002bSThomas Gleixner 450b6be002bSThomas Gleixner irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs) 451b6be002bSThomas Gleixner { 452b6be002bSThomas Gleixner irqentry_state_t irq_state; 453b6be002bSThomas Gleixner 454b6be002bSThomas Gleixner irq_state.lockdep = lockdep_hardirqs_enabled(); 455b6be002bSThomas Gleixner 456b6be002bSThomas Gleixner __nmi_enter(); 457b6be002bSThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 458b6be002bSThomas Gleixner lockdep_hardirq_enter(); 459b6be002bSThomas Gleixner rcu_nmi_enter(); 460b6be002bSThomas Gleixner 461b6be002bSThomas Gleixner instrumentation_begin(); 462b6be002bSThomas Gleixner trace_hardirqs_off_finish(); 463b6be002bSThomas Gleixner ftrace_nmi_enter(); 464b6be002bSThomas Gleixner instrumentation_end(); 465b6be002bSThomas Gleixner 466b6be002bSThomas Gleixner return irq_state; 467b6be002bSThomas Gleixner } 468b6be002bSThomas Gleixner 469b6be002bSThomas Gleixner void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state) 470b6be002bSThomas Gleixner { 471b6be002bSThomas Gleixner instrumentation_begin(); 472b6be002bSThomas Gleixner ftrace_nmi_exit(); 473b6be002bSThomas Gleixner if (irq_state.lockdep) { 474b6be002bSThomas Gleixner trace_hardirqs_on_prepare(); 475b6be002bSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 476b6be002bSThomas Gleixner } 477b6be002bSThomas Gleixner instrumentation_end(); 478b6be002bSThomas Gleixner 479b6be002bSThomas Gleixner rcu_nmi_exit(); 480b6be002bSThomas Gleixner lockdep_hardirq_exit(); 481b6be002bSThomas Gleixner if (irq_state.lockdep) 482b6be002bSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 483b6be002bSThomas Gleixner __nmi_exit(); 484b6be002bSThomas Gleixner } 485