1142781e1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0 2142781e1SThomas Gleixner 3142781e1SThomas Gleixner #include <linux/context_tracking.h> 4142781e1SThomas Gleixner #include <linux/entry-common.h> 5a9f3a74aSThomas Gleixner #include <linux/livepatch.h> 6a9f3a74aSThomas Gleixner #include <linux/audit.h> 7142781e1SThomas Gleixner 8142781e1SThomas Gleixner #define CREATE_TRACE_POINTS 9142781e1SThomas Gleixner #include <trace/events/syscalls.h> 10142781e1SThomas Gleixner 11142781e1SThomas Gleixner /** 12142781e1SThomas Gleixner * enter_from_user_mode - Establish state when coming from user mode 13142781e1SThomas Gleixner * 14142781e1SThomas Gleixner * Syscall/interrupt entry disables interrupts, but user mode is traced as 15142781e1SThomas Gleixner * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 16142781e1SThomas Gleixner * 17142781e1SThomas Gleixner * 1) Tell lockdep that interrupts are disabled 18142781e1SThomas Gleixner * 2) Invoke context tracking if enabled to reactivate RCU 19142781e1SThomas Gleixner * 3) Trace interrupts off state 20142781e1SThomas Gleixner */ 21142781e1SThomas Gleixner static __always_inline void enter_from_user_mode(struct pt_regs *regs) 22142781e1SThomas Gleixner { 23142781e1SThomas Gleixner arch_check_user_regs(regs); 24142781e1SThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 25142781e1SThomas Gleixner 26142781e1SThomas Gleixner CT_WARN_ON(ct_state() != CONTEXT_USER); 27142781e1SThomas Gleixner user_exit_irqoff(); 28142781e1SThomas Gleixner 29142781e1SThomas Gleixner instrumentation_begin(); 30142781e1SThomas Gleixner trace_hardirqs_off_finish(); 31142781e1SThomas Gleixner instrumentation_end(); 32142781e1SThomas Gleixner } 33142781e1SThomas Gleixner 34142781e1SThomas Gleixner static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) 35142781e1SThomas Gleixner { 36142781e1SThomas Gleixner if (unlikely(audit_context())) { 37142781e1SThomas Gleixner unsigned long args[6]; 38142781e1SThomas Gleixner 39142781e1SThomas Gleixner syscall_get_arguments(current, regs, args); 40142781e1SThomas Gleixner audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]); 41142781e1SThomas Gleixner } 42142781e1SThomas Gleixner } 43142781e1SThomas Gleixner 44142781e1SThomas Gleixner static long syscall_trace_enter(struct pt_regs *regs, long syscall, 45142781e1SThomas Gleixner unsigned long ti_work) 46142781e1SThomas Gleixner { 47142781e1SThomas Gleixner long ret = 0; 48142781e1SThomas Gleixner 49142781e1SThomas Gleixner /* Handle ptrace */ 50142781e1SThomas Gleixner if (ti_work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) { 51142781e1SThomas Gleixner ret = arch_syscall_enter_tracehook(regs); 52142781e1SThomas Gleixner if (ret || (ti_work & _TIF_SYSCALL_EMU)) 53142781e1SThomas Gleixner return -1L; 54142781e1SThomas Gleixner } 55142781e1SThomas Gleixner 56142781e1SThomas Gleixner /* Do seccomp after ptrace, to catch any tracer changes. */ 57142781e1SThomas Gleixner if (ti_work & _TIF_SECCOMP) { 58142781e1SThomas Gleixner ret = __secure_computing(NULL); 59142781e1SThomas Gleixner if (ret == -1L) 60142781e1SThomas Gleixner return ret; 61142781e1SThomas Gleixner } 62142781e1SThomas Gleixner 63142781e1SThomas Gleixner if (unlikely(ti_work & _TIF_SYSCALL_TRACEPOINT)) 64142781e1SThomas Gleixner trace_sys_enter(regs, syscall); 65142781e1SThomas Gleixner 66142781e1SThomas Gleixner syscall_enter_audit(regs, syscall); 67142781e1SThomas Gleixner 68142781e1SThomas Gleixner return ret ? : syscall; 69142781e1SThomas Gleixner } 70142781e1SThomas Gleixner 71142781e1SThomas Gleixner noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) 72142781e1SThomas Gleixner { 73142781e1SThomas Gleixner unsigned long ti_work; 74142781e1SThomas Gleixner 75142781e1SThomas Gleixner enter_from_user_mode(regs); 76142781e1SThomas Gleixner instrumentation_begin(); 77142781e1SThomas Gleixner 78142781e1SThomas Gleixner local_irq_enable(); 79142781e1SThomas Gleixner ti_work = READ_ONCE(current_thread_info()->flags); 80142781e1SThomas Gleixner if (ti_work & SYSCALL_ENTER_WORK) 81142781e1SThomas Gleixner syscall = syscall_trace_enter(regs, syscall, ti_work); 82142781e1SThomas Gleixner instrumentation_end(); 83142781e1SThomas Gleixner 84142781e1SThomas Gleixner return syscall; 85142781e1SThomas Gleixner } 86142781e1SThomas Gleixner 87a9f3a74aSThomas Gleixner /** 88a9f3a74aSThomas Gleixner * exit_to_user_mode - Fixup state when exiting to user mode 89a9f3a74aSThomas Gleixner * 90a9f3a74aSThomas Gleixner * Syscall/interupt exit enables interrupts, but the kernel state is 91a9f3a74aSThomas Gleixner * interrupts disabled when this is invoked. Also tell RCU about it. 92a9f3a74aSThomas Gleixner * 93a9f3a74aSThomas Gleixner * 1) Trace interrupts on state 94a9f3a74aSThomas Gleixner * 2) Invoke context tracking if enabled to adjust RCU state 95a9f3a74aSThomas Gleixner * 3) Invoke architecture specific last minute exit code, e.g. speculation 96a9f3a74aSThomas Gleixner * mitigations, etc. 97a9f3a74aSThomas Gleixner * 4) Tell lockdep that interrupts are enabled 98a9f3a74aSThomas Gleixner */ 99a9f3a74aSThomas Gleixner static __always_inline void exit_to_user_mode(void) 100a9f3a74aSThomas Gleixner { 101a9f3a74aSThomas Gleixner instrumentation_begin(); 102a9f3a74aSThomas Gleixner trace_hardirqs_on_prepare(); 103a9f3a74aSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 104a9f3a74aSThomas Gleixner instrumentation_end(); 105a9f3a74aSThomas Gleixner 106a9f3a74aSThomas Gleixner user_enter_irqoff(); 107a9f3a74aSThomas Gleixner arch_exit_to_user_mode(); 108a9f3a74aSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 109a9f3a74aSThomas Gleixner } 110a9f3a74aSThomas Gleixner 111a9f3a74aSThomas Gleixner /* Workaround to allow gradual conversion of architecture code */ 11212db8b69SJens Axboe void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { } 11312db8b69SJens Axboe 11412db8b69SJens Axboe static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work) 11512db8b69SJens Axboe { 11612db8b69SJens Axboe if (ti_work & _TIF_NOTIFY_SIGNAL) 11712db8b69SJens Axboe tracehook_notify_signal(); 11812db8b69SJens Axboe 11912db8b69SJens Axboe arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING); 12012db8b69SJens Axboe } 121a9f3a74aSThomas Gleixner 122a9f3a74aSThomas Gleixner static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, 123a9f3a74aSThomas Gleixner unsigned long ti_work) 124a9f3a74aSThomas Gleixner { 125a9f3a74aSThomas Gleixner /* 126a9f3a74aSThomas Gleixner * Before returning to user space ensure that all pending work 127a9f3a74aSThomas Gleixner * items have been completed. 128a9f3a74aSThomas Gleixner */ 129a9f3a74aSThomas Gleixner while (ti_work & EXIT_TO_USER_MODE_WORK) { 130a9f3a74aSThomas Gleixner 131a9f3a74aSThomas Gleixner local_irq_enable_exit_to_user(ti_work); 132a9f3a74aSThomas Gleixner 133a9f3a74aSThomas Gleixner if (ti_work & _TIF_NEED_RESCHED) 134a9f3a74aSThomas Gleixner schedule(); 135a9f3a74aSThomas Gleixner 136a9f3a74aSThomas Gleixner if (ti_work & _TIF_UPROBE) 137a9f3a74aSThomas Gleixner uprobe_notify_resume(regs); 138a9f3a74aSThomas Gleixner 139a9f3a74aSThomas Gleixner if (ti_work & _TIF_PATCH_PENDING) 140a9f3a74aSThomas Gleixner klp_update_patch_state(current); 141a9f3a74aSThomas Gleixner 14212db8b69SJens Axboe if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 14312db8b69SJens Axboe handle_signal_work(regs, ti_work); 144a9f3a74aSThomas Gleixner 145a9f3a74aSThomas Gleixner if (ti_work & _TIF_NOTIFY_RESUME) { 146a9f3a74aSThomas Gleixner clear_thread_flag(TIF_NOTIFY_RESUME); 147a9f3a74aSThomas Gleixner tracehook_notify_resume(regs); 148a9f3a74aSThomas Gleixner rseq_handle_notify_resume(NULL, regs); 149a9f3a74aSThomas Gleixner } 150a9f3a74aSThomas Gleixner 151a9f3a74aSThomas Gleixner /* Architecture specific TIF work */ 152a9f3a74aSThomas Gleixner arch_exit_to_user_mode_work(regs, ti_work); 153a9f3a74aSThomas Gleixner 154a9f3a74aSThomas Gleixner /* 155a9f3a74aSThomas Gleixner * Disable interrupts and reevaluate the work flags as they 156a9f3a74aSThomas Gleixner * might have changed while interrupts and preemption was 157a9f3a74aSThomas Gleixner * enabled above. 158a9f3a74aSThomas Gleixner */ 159a9f3a74aSThomas Gleixner local_irq_disable_exit_to_user(); 160a9f3a74aSThomas Gleixner ti_work = READ_ONCE(current_thread_info()->flags); 161a9f3a74aSThomas Gleixner } 162a9f3a74aSThomas Gleixner 163a9f3a74aSThomas Gleixner /* Return the latest work state for arch_exit_to_user_mode() */ 164a9f3a74aSThomas Gleixner return ti_work; 165a9f3a74aSThomas Gleixner } 166a9f3a74aSThomas Gleixner 167a9f3a74aSThomas Gleixner static void exit_to_user_mode_prepare(struct pt_regs *regs) 168a9f3a74aSThomas Gleixner { 169a9f3a74aSThomas Gleixner unsigned long ti_work = READ_ONCE(current_thread_info()->flags); 170a9f3a74aSThomas Gleixner 171a9f3a74aSThomas Gleixner lockdep_assert_irqs_disabled(); 172a9f3a74aSThomas Gleixner 173a9f3a74aSThomas Gleixner if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) 174a9f3a74aSThomas Gleixner ti_work = exit_to_user_mode_loop(regs, ti_work); 175a9f3a74aSThomas Gleixner 176a9f3a74aSThomas Gleixner arch_exit_to_user_mode_prepare(regs, ti_work); 177a9f3a74aSThomas Gleixner 178a9f3a74aSThomas Gleixner /* Ensure that the address limit is intact and no locks are held */ 179a9f3a74aSThomas Gleixner addr_limit_user_check(); 180a9f3a74aSThomas Gleixner lockdep_assert_irqs_disabled(); 181a9f3a74aSThomas Gleixner lockdep_sys_exit(); 182a9f3a74aSThomas Gleixner } 183a9f3a74aSThomas Gleixner 184a9f3a74aSThomas Gleixner #ifndef _TIF_SINGLESTEP 185a9f3a74aSThomas Gleixner static inline bool report_single_step(unsigned long ti_work) 186a9f3a74aSThomas Gleixner { 187a9f3a74aSThomas Gleixner return false; 188a9f3a74aSThomas Gleixner } 189a9f3a74aSThomas Gleixner #else 190a9f3a74aSThomas Gleixner /* 191a9f3a74aSThomas Gleixner * If TIF_SYSCALL_EMU is set, then the only reason to report is when 192a9f3a74aSThomas Gleixner * TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall 193a9f3a74aSThomas Gleixner * instruction has been already reported in syscall_enter_from_usermode(). 194a9f3a74aSThomas Gleixner */ 195a9f3a74aSThomas Gleixner #define SYSEMU_STEP (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU) 196a9f3a74aSThomas Gleixner 197a9f3a74aSThomas Gleixner static inline bool report_single_step(unsigned long ti_work) 198a9f3a74aSThomas Gleixner { 199a9f3a74aSThomas Gleixner return (ti_work & SYSEMU_STEP) == _TIF_SINGLESTEP; 200a9f3a74aSThomas Gleixner } 201a9f3a74aSThomas Gleixner #endif 202a9f3a74aSThomas Gleixner 203a9f3a74aSThomas Gleixner static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work) 204a9f3a74aSThomas Gleixner { 205a9f3a74aSThomas Gleixner bool step; 206a9f3a74aSThomas Gleixner 207a9f3a74aSThomas Gleixner audit_syscall_exit(regs); 208a9f3a74aSThomas Gleixner 209a9f3a74aSThomas Gleixner if (ti_work & _TIF_SYSCALL_TRACEPOINT) 210a9f3a74aSThomas Gleixner trace_sys_exit(regs, syscall_get_return_value(current, regs)); 211a9f3a74aSThomas Gleixner 212a9f3a74aSThomas Gleixner step = report_single_step(ti_work); 213a9f3a74aSThomas Gleixner if (step || ti_work & _TIF_SYSCALL_TRACE) 214a9f3a74aSThomas Gleixner arch_syscall_exit_tracehook(regs, step); 215a9f3a74aSThomas Gleixner } 216a9f3a74aSThomas Gleixner 217a9f3a74aSThomas Gleixner /* 218a9f3a74aSThomas Gleixner * Syscall specific exit to user mode preparation. Runs with interrupts 219a9f3a74aSThomas Gleixner * enabled. 220a9f3a74aSThomas Gleixner */ 221a9f3a74aSThomas Gleixner static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) 222a9f3a74aSThomas Gleixner { 223a9f3a74aSThomas Gleixner u32 cached_flags = READ_ONCE(current_thread_info()->flags); 224a9f3a74aSThomas Gleixner unsigned long nr = syscall_get_nr(current, regs); 225a9f3a74aSThomas Gleixner 226a9f3a74aSThomas Gleixner CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 227a9f3a74aSThomas Gleixner 228a9f3a74aSThomas Gleixner if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 229a9f3a74aSThomas Gleixner if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) 230a9f3a74aSThomas Gleixner local_irq_enable(); 231a9f3a74aSThomas Gleixner } 232a9f3a74aSThomas Gleixner 233a9f3a74aSThomas Gleixner rseq_syscall(regs); 234a9f3a74aSThomas Gleixner 235a9f3a74aSThomas Gleixner /* 236a9f3a74aSThomas Gleixner * Do one-time syscall specific work. If these work items are 237a9f3a74aSThomas Gleixner * enabled, we want to run them exactly once per syscall exit with 238a9f3a74aSThomas Gleixner * interrupts enabled. 239a9f3a74aSThomas Gleixner */ 240a9f3a74aSThomas Gleixner if (unlikely(cached_flags & SYSCALL_EXIT_WORK)) 241a9f3a74aSThomas Gleixner syscall_exit_work(regs, cached_flags); 242a9f3a74aSThomas Gleixner } 243a9f3a74aSThomas Gleixner 244a9f3a74aSThomas Gleixner __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs) 245a9f3a74aSThomas Gleixner { 246a9f3a74aSThomas Gleixner instrumentation_begin(); 247a9f3a74aSThomas Gleixner syscall_exit_to_user_mode_prepare(regs); 248a9f3a74aSThomas Gleixner local_irq_disable_exit_to_user(); 249a9f3a74aSThomas Gleixner exit_to_user_mode_prepare(regs); 250a9f3a74aSThomas Gleixner instrumentation_end(); 251a9f3a74aSThomas Gleixner exit_to_user_mode(); 252a9f3a74aSThomas Gleixner } 253a9f3a74aSThomas Gleixner 254142781e1SThomas Gleixner noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) 255142781e1SThomas Gleixner { 256142781e1SThomas Gleixner enter_from_user_mode(regs); 257142781e1SThomas Gleixner } 258a9f3a74aSThomas Gleixner 259a9f3a74aSThomas Gleixner noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) 260a9f3a74aSThomas Gleixner { 261a9f3a74aSThomas Gleixner instrumentation_begin(); 262a9f3a74aSThomas Gleixner exit_to_user_mode_prepare(regs); 263a9f3a74aSThomas Gleixner instrumentation_end(); 264a9f3a74aSThomas Gleixner exit_to_user_mode(); 265a9f3a74aSThomas Gleixner } 266a5497babSThomas Gleixner 267aadfc2f9SIngo Molnar noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) 268a5497babSThomas Gleixner { 269a5497babSThomas Gleixner irqentry_state_t ret = { 270a5497babSThomas Gleixner .exit_rcu = false, 271a5497babSThomas Gleixner }; 272a5497babSThomas Gleixner 273a5497babSThomas Gleixner if (user_mode(regs)) { 274a5497babSThomas Gleixner irqentry_enter_from_user_mode(regs); 275a5497babSThomas Gleixner return ret; 276a5497babSThomas Gleixner } 277a5497babSThomas Gleixner 278a5497babSThomas Gleixner /* 279a5497babSThomas Gleixner * If this entry hit the idle task invoke rcu_irq_enter() whether 280a5497babSThomas Gleixner * RCU is watching or not. 281a5497babSThomas Gleixner * 282a5497babSThomas Gleixner * Interupts can nest when the first interrupt invokes softirq 283a5497babSThomas Gleixner * processing on return which enables interrupts. 284a5497babSThomas Gleixner * 285a5497babSThomas Gleixner * Scheduler ticks in the idle task can mark quiescent state and 286a5497babSThomas Gleixner * terminate a grace period, if and only if the timer interrupt is 287a5497babSThomas Gleixner * not nested into another interrupt. 288a5497babSThomas Gleixner * 289a5497babSThomas Gleixner * Checking for __rcu_is_watching() here would prevent the nesting 290a5497babSThomas Gleixner * interrupt to invoke rcu_irq_enter(). If that nested interrupt is 291a5497babSThomas Gleixner * the tick then rcu_flavor_sched_clock_irq() would wrongfully 292a5497babSThomas Gleixner * assume that it is the first interupt and eventually claim 293a5497babSThomas Gleixner * quiescient state and end grace periods prematurely. 294a5497babSThomas Gleixner * 295a5497babSThomas Gleixner * Unconditionally invoke rcu_irq_enter() so RCU state stays 296a5497babSThomas Gleixner * consistent. 297a5497babSThomas Gleixner * 298a5497babSThomas Gleixner * TINY_RCU does not support EQS, so let the compiler eliminate 299a5497babSThomas Gleixner * this part when enabled. 300a5497babSThomas Gleixner */ 301a5497babSThomas Gleixner if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { 302a5497babSThomas Gleixner /* 303a5497babSThomas Gleixner * If RCU is not watching then the same careful 304a5497babSThomas Gleixner * sequence vs. lockdep and tracing is required 305*45ff5105SIra Weiny * as in irqentry_enter_from_user_mode(). 306a5497babSThomas Gleixner */ 307a5497babSThomas Gleixner lockdep_hardirqs_off(CALLER_ADDR0); 308a5497babSThomas Gleixner rcu_irq_enter(); 309a5497babSThomas Gleixner instrumentation_begin(); 310a5497babSThomas Gleixner trace_hardirqs_off_finish(); 311a5497babSThomas Gleixner instrumentation_end(); 312a5497babSThomas Gleixner 313a5497babSThomas Gleixner ret.exit_rcu = true; 314a5497babSThomas Gleixner return ret; 315a5497babSThomas Gleixner } 316a5497babSThomas Gleixner 317a5497babSThomas Gleixner /* 318a5497babSThomas Gleixner * If RCU is watching then RCU only wants to check whether it needs 319a5497babSThomas Gleixner * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick() 320a5497babSThomas Gleixner * already contains a warning when RCU is not watching, so no point 321a5497babSThomas Gleixner * in having another one here. 322a5497babSThomas Gleixner */ 323a5497babSThomas Gleixner instrumentation_begin(); 324a5497babSThomas Gleixner rcu_irq_enter_check_tick(); 325a5497babSThomas Gleixner /* Use the combo lockdep/tracing function */ 326a5497babSThomas Gleixner trace_hardirqs_off(); 327a5497babSThomas Gleixner instrumentation_end(); 328a5497babSThomas Gleixner 329a5497babSThomas Gleixner return ret; 330a5497babSThomas Gleixner } 331a5497babSThomas Gleixner 332a5497babSThomas Gleixner void irqentry_exit_cond_resched(void) 333a5497babSThomas Gleixner { 334a5497babSThomas Gleixner if (!preempt_count()) { 335a5497babSThomas Gleixner /* Sanity check RCU and thread stack */ 336a5497babSThomas Gleixner rcu_irq_exit_check_preempt(); 337a5497babSThomas Gleixner if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 338a5497babSThomas Gleixner WARN_ON_ONCE(!on_thread_stack()); 339a5497babSThomas Gleixner if (need_resched()) 340a5497babSThomas Gleixner preempt_schedule_irq(); 341a5497babSThomas Gleixner } 342a5497babSThomas Gleixner } 343a5497babSThomas Gleixner 344aadfc2f9SIngo Molnar noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) 345a5497babSThomas Gleixner { 346a5497babSThomas Gleixner lockdep_assert_irqs_disabled(); 347a5497babSThomas Gleixner 348a5497babSThomas Gleixner /* Check whether this returns to user mode */ 349a5497babSThomas Gleixner if (user_mode(regs)) { 350a5497babSThomas Gleixner irqentry_exit_to_user_mode(regs); 351a5497babSThomas Gleixner } else if (!regs_irqs_disabled(regs)) { 352a5497babSThomas Gleixner /* 353a5497babSThomas Gleixner * If RCU was not watching on entry this needs to be done 354a5497babSThomas Gleixner * carefully and needs the same ordering of lockdep/tracing 355a5497babSThomas Gleixner * and RCU as the return to user mode path. 356a5497babSThomas Gleixner */ 357a5497babSThomas Gleixner if (state.exit_rcu) { 358a5497babSThomas Gleixner instrumentation_begin(); 359a5497babSThomas Gleixner /* Tell the tracer that IRET will enable interrupts */ 360a5497babSThomas Gleixner trace_hardirqs_on_prepare(); 361a5497babSThomas Gleixner lockdep_hardirqs_on_prepare(CALLER_ADDR0); 362a5497babSThomas Gleixner instrumentation_end(); 363a5497babSThomas Gleixner rcu_irq_exit(); 364a5497babSThomas Gleixner lockdep_hardirqs_on(CALLER_ADDR0); 365a5497babSThomas Gleixner return; 366a5497babSThomas Gleixner } 367a5497babSThomas Gleixner 368a5497babSThomas Gleixner instrumentation_begin(); 369a5497babSThomas Gleixner if (IS_ENABLED(CONFIG_PREEMPTION)) 370a5497babSThomas Gleixner irqentry_exit_cond_resched(); 371a5497babSThomas Gleixner /* Covers both tracing and lockdep */ 372a5497babSThomas Gleixner trace_hardirqs_on(); 373a5497babSThomas Gleixner instrumentation_end(); 374a5497babSThomas Gleixner } else { 375a5497babSThomas Gleixner /* 376a5497babSThomas Gleixner * IRQ flags state is correct already. Just tell RCU if it 377a5497babSThomas Gleixner * was not watching on entry. 378a5497babSThomas Gleixner */ 379a5497babSThomas Gleixner if (state.exit_rcu) 380a5497babSThomas Gleixner rcu_irq_exit(); 381a5497babSThomas Gleixner } 382a5497babSThomas Gleixner } 383