1142781e1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0 2142781e1SThomas Gleixner 3142781e1SThomas Gleixner #include <linux/context_tracking.h> 4142781e1SThomas Gleixner #include <linux/entry-common.h> 5a9f3a74aSThomas Gleixner #include <linux/livepatch.h> 6a9f3a74aSThomas Gleixner #include <linux/audit.h> 7142781e1SThomas Gleixner 8142781e1SThomas Gleixner #define CREATE_TRACE_POINTS 9142781e1SThomas Gleixner #include <trace/events/syscalls.h> 10142781e1SThomas Gleixner 11142781e1SThomas Gleixner /** 12142781e1SThomas Gleixner * enter_from_user_mode - Establish state when coming from user mode 13142781e1SThomas Gleixner * 14142781e1SThomas Gleixner * Syscall/interrupt entry disables interrupts, but user mode is traced as 15142781e1SThomas Gleixner * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 16142781e1SThomas Gleixner * 17142781e1SThomas Gleixner * 1) Tell lockdep that interrupts are disabled 18142781e1SThomas Gleixner * 2) Invoke context tracking if enabled to reactivate RCU 19142781e1SThomas Gleixner * 3) Trace interrupts off state 20142781e1SThomas Gleixner */ 21142781e1SThomas Gleixner static __always_inline void enter_from_user_mode(struct pt_regs *regs) 22142781e1SThomas Gleixner { 23142781e1SThomas Gleixner arch_check_user_regs(regs); 24142781e1SThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 25142781e1SThomas Gleixner 26142781e1SThomas Gleixner CT_WARN_ON(ct_state() != CONTEXT_USER); 27142781e1SThomas Gleixner user_exit_irqoff(); 28142781e1SThomas Gleixner 29142781e1SThomas Gleixner instrumentation_begin(); 30142781e1SThomas Gleixner trace_hardirqs_off_finish(); 31142781e1SThomas Gleixner instrumentation_end(); 32142781e1SThomas Gleixner } 33142781e1SThomas Gleixner 34142781e1SThomas Gleixner static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) 35142781e1SThomas Gleixner { 36142781e1SThomas Gleixner if (unlikely(audit_context())) { 37142781e1SThomas Gleixner unsigned long args[6]; 38142781e1SThomas Gleixner 39142781e1SThomas Gleixner syscall_get_arguments(current, regs, args); 40142781e1SThomas Gleixner audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]); 41142781e1SThomas Gleixner } 42142781e1SThomas Gleixner } 43142781e1SThomas Gleixner 44142781e1SThomas Gleixner static long syscall_trace_enter(struct pt_regs *regs, long syscall, 45142781e1SThomas Gleixner unsigned long ti_work) 46142781e1SThomas Gleixner { 47142781e1SThomas Gleixner long ret = 0; 48142781e1SThomas Gleixner 49142781e1SThomas Gleixner /* Handle ptrace */ 50142781e1SThomas Gleixner if (ti_work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) { 51142781e1SThomas Gleixner ret = arch_syscall_enter_tracehook(regs); 52142781e1SThomas Gleixner if (ret || (ti_work & _TIF_SYSCALL_EMU)) 53142781e1SThomas Gleixner return -1L; 54142781e1SThomas Gleixner } 55142781e1SThomas Gleixner 56142781e1SThomas Gleixner /* Do seccomp after ptrace, to catch any tracer changes. */ 57142781e1SThomas Gleixner if (ti_work & _TIF_SECCOMP) { 58142781e1SThomas Gleixner ret = __secure_computing(NULL); 59142781e1SThomas Gleixner if (ret == -1L) 60142781e1SThomas Gleixner return ret; 61142781e1SThomas Gleixner } 62142781e1SThomas Gleixner 63b6ec4134SKees Cook /* Either of the above might have changed the syscall number */ 64b6ec4134SKees Cook syscall = syscall_get_nr(current, regs); 65b6ec4134SKees Cook 66142781e1SThomas Gleixner if (unlikely(ti_work & _TIF_SYSCALL_TRACEPOINT)) 67142781e1SThomas Gleixner trace_sys_enter(regs, syscall); 68142781e1SThomas Gleixner 69142781e1SThomas Gleixner syscall_enter_audit(regs, syscall); 70142781e1SThomas Gleixner 71b6ec4134SKees Cook return ret ? : syscall; 72142781e1SThomas Gleixner } 73142781e1SThomas Gleixner 744facb95bSThomas Gleixner static __always_inline long 754facb95bSThomas Gleixner __syscall_enter_from_user_work(struct pt_regs *regs, long syscall) 76142781e1SThomas Gleixner { 77142781e1SThomas Gleixner unsigned long ti_work; 78142781e1SThomas Gleixner 79142781e1SThomas Gleixner ti_work = READ_ONCE(current_thread_info()->flags); 80142781e1SThomas Gleixner if (ti_work & SYSCALL_ENTER_WORK) 81142781e1SThomas Gleixner syscall = syscall_trace_enter(regs, syscall, ti_work); 82142781e1SThomas Gleixner 83142781e1SThomas Gleixner return syscall; 84142781e1SThomas Gleixner } 85142781e1SThomas Gleixner 864facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall) 874facb95bSThomas Gleixner { 884facb95bSThomas Gleixner return __syscall_enter_from_user_work(regs, syscall); 894facb95bSThomas Gleixner } 904facb95bSThomas Gleixner 914facb95bSThomas Gleixner noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) 924facb95bSThomas Gleixner { 934facb95bSThomas Gleixner long ret; 944facb95bSThomas Gleixner 954facb95bSThomas Gleixner enter_from_user_mode(regs); 964facb95bSThomas Gleixner 974facb95bSThomas Gleixner instrumentation_begin(); 984facb95bSThomas Gleixner local_irq_enable(); 994facb95bSThomas Gleixner ret = __syscall_enter_from_user_work(regs, syscall); 1004facb95bSThomas Gleixner instrumentation_end(); 1014facb95bSThomas Gleixner 1024facb95bSThomas Gleixner return ret; 1034facb95bSThomas Gleixner } 1044facb95bSThomas Gleixner 1054facb95bSThomas Gleixner noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs) 1064facb95bSThomas Gleixner { 1074facb95bSThomas Gleixner enter_from_user_mode(regs); 1084facb95bSThomas Gleixner instrumentation_begin(); 1094facb95bSThomas Gleixner local_irq_enable(); 1104facb95bSThomas Gleixner instrumentation_end(); 1114facb95bSThomas Gleixner } 1124facb95bSThomas Gleixner 113a9f3a74aSThomas Gleixner /** 114a9f3a74aSThomas Gleixner * exit_to_user_mode - Fixup state when exiting to user mode 115a9f3a74aSThomas Gleixner * 116a9f3a74aSThomas Gleixner * Syscall/interupt exit enables interrupts, but the kernel state is 117a9f3a74aSThomas Gleixner * interrupts disabled when this is invoked. Also tell RCU about it. 118a9f3a74aSThomas Gleixner * 119a9f3a74aSThomas Gleixner * 1) Trace interrupts on state 120a9f3a74aSThomas Gleixner * 2) Invoke context tracking if enabled to adjust RCU state 121a9f3a74aSThomas Gleixner * 3) Invoke architecture specific last minute exit code, e.g. speculation 122a9f3a74aSThomas Gleixner * mitigations, etc. 123a9f3a74aSThomas Gleixner * 4) Tell lockdep that interrupts are enabled 124a9f3a74aSThomas Gleixner */ 125a9f3a74aSThomas Gleixner static __always_inline void exit_to_user_mode(void) 126a9f3a74aSThomas Gleixner { 127a9f3a74aSThomas Gleixner instrumentation_begin(); 128a9f3a74aSThomas Gleixner trace_hardirqs_on_prepare(); 129a9f3a74aSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 130a9f3a74aSThomas Gleixner instrumentation_end(); 131a9f3a74aSThomas Gleixner 132a9f3a74aSThomas Gleixner user_enter_irqoff(); 133a9f3a74aSThomas Gleixner arch_exit_to_user_mode(); 134a9f3a74aSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 135a9f3a74aSThomas Gleixner } 136a9f3a74aSThomas Gleixner 137a9f3a74aSThomas Gleixner /* Workaround to allow gradual conversion of architecture code */ 138a9f3a74aSThomas Gleixner void __weak arch_do_signal(struct pt_regs *regs) { } 139a9f3a74aSThomas Gleixner 140a9f3a74aSThomas Gleixner static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, 141a9f3a74aSThomas Gleixner unsigned long ti_work) 142a9f3a74aSThomas Gleixner { 143a9f3a74aSThomas Gleixner /* 144a9f3a74aSThomas Gleixner * Before returning to user space ensure that all pending work 145a9f3a74aSThomas Gleixner * items have been completed. 146a9f3a74aSThomas Gleixner */ 147a9f3a74aSThomas Gleixner while (ti_work & EXIT_TO_USER_MODE_WORK) { 148a9f3a74aSThomas Gleixner 149a9f3a74aSThomas Gleixner local_irq_enable_exit_to_user(ti_work); 150a9f3a74aSThomas Gleixner 151a9f3a74aSThomas Gleixner if (ti_work & _TIF_NEED_RESCHED) 152a9f3a74aSThomas Gleixner schedule(); 153a9f3a74aSThomas Gleixner 154a9f3a74aSThomas Gleixner if (ti_work & _TIF_UPROBE) 155a9f3a74aSThomas Gleixner uprobe_notify_resume(regs); 156a9f3a74aSThomas Gleixner 157a9f3a74aSThomas Gleixner if (ti_work & _TIF_PATCH_PENDING) 158a9f3a74aSThomas Gleixner klp_update_patch_state(current); 159a9f3a74aSThomas Gleixner 160a9f3a74aSThomas Gleixner if (ti_work & _TIF_SIGPENDING) 161a9f3a74aSThomas Gleixner arch_do_signal(regs); 162a9f3a74aSThomas Gleixner 163a9f3a74aSThomas Gleixner if (ti_work & _TIF_NOTIFY_RESUME) { 164a9f3a74aSThomas Gleixner tracehook_notify_resume(regs); 165a9f3a74aSThomas Gleixner rseq_handle_notify_resume(NULL, regs); 166a9f3a74aSThomas Gleixner } 167a9f3a74aSThomas Gleixner 168a9f3a74aSThomas Gleixner /* Architecture specific TIF work */ 169a9f3a74aSThomas Gleixner arch_exit_to_user_mode_work(regs, ti_work); 170a9f3a74aSThomas Gleixner 171a9f3a74aSThomas Gleixner /* 172a9f3a74aSThomas Gleixner * Disable interrupts and reevaluate the work flags as they 173a9f3a74aSThomas Gleixner * might have changed while interrupts and preemption was 174a9f3a74aSThomas Gleixner * enabled above. 175a9f3a74aSThomas Gleixner */ 176a9f3a74aSThomas Gleixner local_irq_disable_exit_to_user(); 177a9f3a74aSThomas Gleixner ti_work = READ_ONCE(current_thread_info()->flags); 178a9f3a74aSThomas Gleixner } 179a9f3a74aSThomas Gleixner 180a9f3a74aSThomas Gleixner /* Return the latest work state for arch_exit_to_user_mode() */ 181a9f3a74aSThomas Gleixner return ti_work; 182a9f3a74aSThomas Gleixner } 183a9f3a74aSThomas Gleixner 184a9f3a74aSThomas Gleixner static void exit_to_user_mode_prepare(struct pt_regs *regs) 185a9f3a74aSThomas Gleixner { 186a9f3a74aSThomas Gleixner unsigned long ti_work = READ_ONCE(current_thread_info()->flags); 187a9f3a74aSThomas Gleixner 188a9f3a74aSThomas Gleixner lockdep_assert_irqs_disabled(); 189a9f3a74aSThomas Gleixner 190a9f3a74aSThomas Gleixner if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) 191a9f3a74aSThomas Gleixner ti_work = exit_to_user_mode_loop(regs, ti_work); 192a9f3a74aSThomas Gleixner 193a9f3a74aSThomas Gleixner arch_exit_to_user_mode_prepare(regs, ti_work); 194a9f3a74aSThomas Gleixner 195a9f3a74aSThomas Gleixner /* Ensure that the address limit is intact and no locks are held */ 196a9f3a74aSThomas Gleixner addr_limit_user_check(); 197a9f3a74aSThomas Gleixner lockdep_assert_irqs_disabled(); 198a9f3a74aSThomas Gleixner lockdep_sys_exit(); 199a9f3a74aSThomas Gleixner } 200a9f3a74aSThomas Gleixner 201a9f3a74aSThomas Gleixner #ifndef _TIF_SINGLESTEP 202a9f3a74aSThomas Gleixner static inline bool report_single_step(unsigned long ti_work) 203a9f3a74aSThomas Gleixner { 204a9f3a74aSThomas Gleixner return false; 205a9f3a74aSThomas Gleixner } 206a9f3a74aSThomas Gleixner #else 207a9f3a74aSThomas Gleixner /* 208a9f3a74aSThomas Gleixner * If TIF_SYSCALL_EMU is set, then the only reason to report is when 209a9f3a74aSThomas Gleixner * TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall 210900ffe39SKees Cook * instruction has been already reported in syscall_enter_from_user_mode(). 211a9f3a74aSThomas Gleixner */ 212a9f3a74aSThomas Gleixner #define SYSEMU_STEP (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU) 213a9f3a74aSThomas Gleixner 214a9f3a74aSThomas Gleixner static inline bool report_single_step(unsigned long ti_work) 215a9f3a74aSThomas Gleixner { 216a9f3a74aSThomas Gleixner return (ti_work & SYSEMU_STEP) == _TIF_SINGLESTEP; 217a9f3a74aSThomas Gleixner } 218a9f3a74aSThomas Gleixner #endif 219a9f3a74aSThomas Gleixner 220a9f3a74aSThomas Gleixner static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work) 221a9f3a74aSThomas Gleixner { 222a9f3a74aSThomas Gleixner bool step; 223a9f3a74aSThomas Gleixner 224a9f3a74aSThomas Gleixner audit_syscall_exit(regs); 225a9f3a74aSThomas Gleixner 226a9f3a74aSThomas Gleixner if (ti_work & _TIF_SYSCALL_TRACEPOINT) 227a9f3a74aSThomas Gleixner trace_sys_exit(regs, syscall_get_return_value(current, regs)); 228a9f3a74aSThomas Gleixner 229a9f3a74aSThomas Gleixner step = report_single_step(ti_work); 230a9f3a74aSThomas Gleixner if (step || ti_work & _TIF_SYSCALL_TRACE) 231a9f3a74aSThomas Gleixner arch_syscall_exit_tracehook(regs, step); 232a9f3a74aSThomas Gleixner } 233a9f3a74aSThomas Gleixner 234a9f3a74aSThomas Gleixner /* 235a9f3a74aSThomas Gleixner * Syscall specific exit to user mode preparation. Runs with interrupts 236a9f3a74aSThomas Gleixner * enabled. 237a9f3a74aSThomas Gleixner */ 238a9f3a74aSThomas Gleixner static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) 239a9f3a74aSThomas Gleixner { 240a9f3a74aSThomas Gleixner u32 cached_flags = READ_ONCE(current_thread_info()->flags); 241a9f3a74aSThomas Gleixner unsigned long nr = syscall_get_nr(current, regs); 242a9f3a74aSThomas Gleixner 243a9f3a74aSThomas Gleixner CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 244a9f3a74aSThomas Gleixner 245a9f3a74aSThomas Gleixner if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 246a9f3a74aSThomas Gleixner if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) 247a9f3a74aSThomas Gleixner local_irq_enable(); 248a9f3a74aSThomas Gleixner } 249a9f3a74aSThomas Gleixner 250a9f3a74aSThomas Gleixner rseq_syscall(regs); 251a9f3a74aSThomas Gleixner 252a9f3a74aSThomas Gleixner /* 253a9f3a74aSThomas Gleixner * Do one-time syscall specific work. If these work items are 254a9f3a74aSThomas Gleixner * enabled, we want to run them exactly once per syscall exit with 255a9f3a74aSThomas Gleixner * interrupts enabled. 256a9f3a74aSThomas Gleixner */ 257a9f3a74aSThomas Gleixner if (unlikely(cached_flags & SYSCALL_EXIT_WORK)) 258a9f3a74aSThomas Gleixner syscall_exit_work(regs, cached_flags); 259a9f3a74aSThomas Gleixner } 260a9f3a74aSThomas Gleixner 261a9f3a74aSThomas Gleixner __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs) 262a9f3a74aSThomas Gleixner { 263a9f3a74aSThomas Gleixner instrumentation_begin(); 264a9f3a74aSThomas Gleixner syscall_exit_to_user_mode_prepare(regs); 265a9f3a74aSThomas Gleixner local_irq_disable_exit_to_user(); 266a9f3a74aSThomas Gleixner exit_to_user_mode_prepare(regs); 267a9f3a74aSThomas Gleixner instrumentation_end(); 268a9f3a74aSThomas Gleixner exit_to_user_mode(); 269a9f3a74aSThomas Gleixner } 270a9f3a74aSThomas Gleixner 271142781e1SThomas Gleixner noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) 272142781e1SThomas Gleixner { 273142781e1SThomas Gleixner enter_from_user_mode(regs); 274142781e1SThomas Gleixner } 275a9f3a74aSThomas Gleixner 276a9f3a74aSThomas Gleixner noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) 277a9f3a74aSThomas Gleixner { 278a9f3a74aSThomas Gleixner instrumentation_begin(); 279a9f3a74aSThomas Gleixner exit_to_user_mode_prepare(regs); 280a9f3a74aSThomas Gleixner instrumentation_end(); 281a9f3a74aSThomas Gleixner exit_to_user_mode(); 282a9f3a74aSThomas Gleixner } 283a5497babSThomas Gleixner 284aadfc2f9SIngo Molnar noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) 285a5497babSThomas Gleixner { 286a5497babSThomas Gleixner irqentry_state_t ret = { 287a5497babSThomas Gleixner .exit_rcu = false, 288a5497babSThomas Gleixner }; 289a5497babSThomas Gleixner 290a5497babSThomas Gleixner if (user_mode(regs)) { 291a5497babSThomas Gleixner irqentry_enter_from_user_mode(regs); 292a5497babSThomas Gleixner return ret; 293a5497babSThomas Gleixner } 294a5497babSThomas Gleixner 295a5497babSThomas Gleixner /* 296a5497babSThomas Gleixner * If this entry hit the idle task invoke rcu_irq_enter() whether 297a5497babSThomas Gleixner * RCU is watching or not. 298a5497babSThomas Gleixner * 299a5497babSThomas Gleixner * Interupts can nest when the first interrupt invokes softirq 300a5497babSThomas Gleixner * processing on return which enables interrupts. 301a5497babSThomas Gleixner * 302a5497babSThomas Gleixner * Scheduler ticks in the idle task can mark quiescent state and 303a5497babSThomas Gleixner * terminate a grace period, if and only if the timer interrupt is 304a5497babSThomas Gleixner * not nested into another interrupt. 305a5497babSThomas Gleixner * 3067f2a53c2SPaul E. McKenney * Checking for rcu_is_watching() here would prevent the nesting 307a5497babSThomas Gleixner * interrupt to invoke rcu_irq_enter(). If that nested interrupt is 308a5497babSThomas Gleixner * the tick then rcu_flavor_sched_clock_irq() would wrongfully 309a5497babSThomas Gleixner * assume that it is the first interupt and eventually claim 310a5497babSThomas Gleixner * quiescient state and end grace periods prematurely. 311a5497babSThomas Gleixner * 312a5497babSThomas Gleixner * Unconditionally invoke rcu_irq_enter() so RCU state stays 313a5497babSThomas Gleixner * consistent. 314a5497babSThomas Gleixner * 315a5497babSThomas Gleixner * TINY_RCU does not support EQS, so let the compiler eliminate 316a5497babSThomas Gleixner * this part when enabled. 317a5497babSThomas Gleixner */ 318a5497babSThomas Gleixner if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { 319a5497babSThomas Gleixner /* 320a5497babSThomas Gleixner * If RCU is not watching then the same careful 321a5497babSThomas Gleixner * sequence vs. lockdep and tracing is required 322a5497babSThomas Gleixner * as in irq_enter_from_user_mode(). 323a5497babSThomas Gleixner */ 324a5497babSThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 325a5497babSThomas Gleixner rcu_irq_enter(); 326a5497babSThomas Gleixner instrumentation_begin(); 327a5497babSThomas Gleixner trace_hardirqs_off_finish(); 328a5497babSThomas Gleixner instrumentation_end(); 329a5497babSThomas Gleixner 330a5497babSThomas Gleixner ret.exit_rcu = true; 331a5497babSThomas Gleixner return ret; 332a5497babSThomas Gleixner } 333a5497babSThomas Gleixner 334a5497babSThomas Gleixner /* 335a5497babSThomas Gleixner * If RCU is watching then RCU only wants to check whether it needs 336a5497babSThomas Gleixner * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick() 337a5497babSThomas Gleixner * already contains a warning when RCU is not watching, so no point 338a5497babSThomas Gleixner * in having another one here. 339a5497babSThomas Gleixner */ 3409d820f68SThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 341a5497babSThomas Gleixner instrumentation_begin(); 342a5497babSThomas Gleixner rcu_irq_enter_check_tick(); 3439d820f68SThomas Gleixner trace_hardirqs_off_finish(); 344a5497babSThomas Gleixner instrumentation_end(); 345a5497babSThomas Gleixner 346a5497babSThomas Gleixner return ret; 347a5497babSThomas Gleixner } 348a5497babSThomas Gleixner 349a5497babSThomas Gleixner void irqentry_exit_cond_resched(void) 350a5497babSThomas Gleixner { 351a5497babSThomas Gleixner if (!preempt_count()) { 352a5497babSThomas Gleixner /* Sanity check RCU and thread stack */ 353a5497babSThomas Gleixner rcu_irq_exit_check_preempt(); 354a5497babSThomas Gleixner if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 355a5497babSThomas Gleixner WARN_ON_ONCE(!on_thread_stack()); 356a5497babSThomas Gleixner if (need_resched()) 357a5497babSThomas Gleixner preempt_schedule_irq(); 358a5497babSThomas Gleixner } 359a5497babSThomas Gleixner } 360a5497babSThomas Gleixner 361aadfc2f9SIngo Molnar noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) 362a5497babSThomas Gleixner { 363a5497babSThomas Gleixner lockdep_assert_irqs_disabled(); 364a5497babSThomas Gleixner 365a5497babSThomas Gleixner /* Check whether this returns to user mode */ 366a5497babSThomas Gleixner if (user_mode(regs)) { 367a5497babSThomas Gleixner irqentry_exit_to_user_mode(regs); 368a5497babSThomas Gleixner } else if (!regs_irqs_disabled(regs)) { 369a5497babSThomas Gleixner /* 370a5497babSThomas Gleixner * If RCU was not watching on entry this needs to be done 371a5497babSThomas Gleixner * carefully and needs the same ordering of lockdep/tracing 372a5497babSThomas Gleixner * and RCU as the return to user mode path. 373a5497babSThomas Gleixner */ 374a5497babSThomas Gleixner if (state.exit_rcu) { 375a5497babSThomas Gleixner instrumentation_begin(); 376a5497babSThomas Gleixner /* Tell the tracer that IRET will enable interrupts */ 377a5497babSThomas Gleixner trace_hardirqs_on_prepare(); 378a5497babSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 379a5497babSThomas Gleixner instrumentation_end(); 380a5497babSThomas Gleixner rcu_irq_exit(); 381a5497babSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 382a5497babSThomas Gleixner return; 383a5497babSThomas Gleixner } 384a5497babSThomas Gleixner 385a5497babSThomas Gleixner instrumentation_begin(); 386a5497babSThomas Gleixner if (IS_ENABLED(CONFIG_PREEMPTION)) 387a5497babSThomas Gleixner irqentry_exit_cond_resched(); 388a5497babSThomas Gleixner /* Covers both tracing and lockdep */ 389a5497babSThomas Gleixner trace_hardirqs_on(); 390a5497babSThomas Gleixner instrumentation_end(); 391a5497babSThomas Gleixner } else { 392a5497babSThomas Gleixner /* 393a5497babSThomas Gleixner * IRQ flags state is correct already. Just tell RCU if it 394a5497babSThomas Gleixner * was not watching on entry. 395a5497babSThomas Gleixner */ 396a5497babSThomas Gleixner if (state.exit_rcu) 397a5497babSThomas Gleixner rcu_irq_exit(); 398a5497babSThomas Gleixner } 399a5497babSThomas Gleixner } 400