xref: /openbmc/linux/kernel/softirq.c (revision 548796e2)
1767a67b0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *	linux/kernel/softirq.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *	Copyright (C) 1992 Linus Torvalds
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
1040322764SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1140322764SJoe Perches 
129984de1aSPaul Gortmaker #include <linux/export.h>
131da177e4SLinus Torvalds #include <linux/kernel_stat.h>
141da177e4SLinus Torvalds #include <linux/interrupt.h>
151da177e4SLinus Torvalds #include <linux/init.h>
168b1c04acSThomas Gleixner #include <linux/local_lock.h>
171da177e4SLinus Torvalds #include <linux/mm.h>
181da177e4SLinus Torvalds #include <linux/notifier.h>
191da177e4SLinus Torvalds #include <linux/percpu.h>
201da177e4SLinus Torvalds #include <linux/cpu.h>
2183144186SRafael J. Wysocki #include <linux/freezer.h>
221da177e4SLinus Torvalds #include <linux/kthread.h>
231da177e4SLinus Torvalds #include <linux/rcupdate.h>
247e49fcceSSteven Rostedt #include <linux/ftrace.h>
2578eef01bSAndrew Morton #include <linux/smp.h>
263e339b5dSThomas Gleixner #include <linux/smpboot.h>
2779bf2bb3SThomas Gleixner #include <linux/tick.h>
28d532676cSThomas Gleixner #include <linux/irq.h>
29da044747SPeter Zijlstra #include <linux/wait_bit.h>
30a0e39ed3SHeiko Carstens 
31db1cc7aeSThomas Gleixner #include <asm/softirq_stack.h>
32db1cc7aeSThomas Gleixner 
33a0e39ed3SHeiko Carstens #define CREATE_TRACE_POINTS
34ad8d75ffSSteven Rostedt #include <trace/events/irq.h>
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds /*
371da177e4SLinus Torvalds    - No shared variables, all the data are CPU local.
381da177e4SLinus Torvalds    - If a softirq needs serialization, let it serialize itself
391da177e4SLinus Torvalds      by its own spinlocks.
401da177e4SLinus Torvalds    - Even if softirq is serialized, only local cpu is marked for
411da177e4SLinus Torvalds      execution. Hence, we get something sort of weak cpu binding.
421da177e4SLinus Torvalds      Though it is still not clear, will it result in better locality
431da177e4SLinus Torvalds      or will not.
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds    Examples:
461da177e4SLinus Torvalds    - NET RX softirq. It is multithreaded and does not require
471da177e4SLinus Torvalds      any global serialization.
481da177e4SLinus Torvalds    - NET TX softirq. It kicks software netdevice queues, hence
491da177e4SLinus Torvalds      it is logically serialized per device, but this serialization
501da177e4SLinus Torvalds      is invisible to common code.
511da177e4SLinus Torvalds    - Tasklets: serialized wrt itself.
521da177e4SLinus Torvalds  */
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds #ifndef __ARCH_IRQ_STAT
550f6f47baSFrederic Weisbecker DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
560f6f47baSFrederic Weisbecker EXPORT_PER_CPU_SYMBOL(irq_stat);
571da177e4SLinus Torvalds #endif
581da177e4SLinus Torvalds 
59978b0116SAlexey Dobriyan static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
601da177e4SLinus Torvalds 
614dd53d89SVenkatesh Pallipadi DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
621da177e4SLinus Torvalds 
63ce85b4f2SJoe Perches const char * const softirq_to_name[NR_SOFTIRQS] = {
64f660f606SSagi Grimberg 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
6509223371SShaohua Li 	"TASKLET", "SCHED", "HRTIMER", "RCU"
665d592b44SJason Baron };
675d592b44SJason Baron 
681da177e4SLinus Torvalds /*
691da177e4SLinus Torvalds  * we cannot loop indefinitely here to avoid userspace starvation,
701da177e4SLinus Torvalds  * but we also don't want to introduce a worst case 1/HZ latency
711da177e4SLinus Torvalds  * to the pending events, so lets the scheduler to balance
721da177e4SLinus Torvalds  * the softirq load for us.
731da177e4SLinus Torvalds  */
wakeup_softirqd(void)74676cb02dSThomas Gleixner static void wakeup_softirqd(void)
751da177e4SLinus Torvalds {
761da177e4SLinus Torvalds 	/* Interrupts are disabled: no need to stop preemption */
77909ea964SChristoph Lameter 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
781da177e4SLinus Torvalds 
7937aadc68SPeter Zijlstra 	if (tsk)
801da177e4SLinus Torvalds 		wake_up_process(tsk);
811da177e4SLinus Torvalds }
821da177e4SLinus Torvalds 
83ae9ef589SThomas Gleixner #ifdef CONFIG_TRACE_IRQFLAGS
84ae9ef589SThomas Gleixner DEFINE_PER_CPU(int, hardirqs_enabled);
85ae9ef589SThomas Gleixner DEFINE_PER_CPU(int, hardirq_context);
86ae9ef589SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
87ae9ef589SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
88ae9ef589SThomas Gleixner #endif
89ae9ef589SThomas Gleixner 
904cd13c21SEric Dumazet /*
918b1c04acSThomas Gleixner  * SOFTIRQ_OFFSET usage:
928b1c04acSThomas Gleixner  *
938b1c04acSThomas Gleixner  * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
948b1c04acSThomas Gleixner  * to a per CPU counter and to task::softirqs_disabled_cnt.
958b1c04acSThomas Gleixner  *
968b1c04acSThomas Gleixner  * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
978b1c04acSThomas Gleixner  *   processing.
988b1c04acSThomas Gleixner  *
998b1c04acSThomas Gleixner  * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
10075e1056fSVenkatesh Pallipadi  *   on local_bh_disable or local_bh_enable.
1018b1c04acSThomas Gleixner  *
10275e1056fSVenkatesh Pallipadi  * This lets us distinguish between whether we are currently processing
10375e1056fSVenkatesh Pallipadi  * softirq and whether we just have bh disabled.
10475e1056fSVenkatesh Pallipadi  */
1058b1c04acSThomas Gleixner #ifdef CONFIG_PREEMPT_RT
10675e1056fSVenkatesh Pallipadi 
107ae9ef589SThomas Gleixner /*
1088b1c04acSThomas Gleixner  * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
1098b1c04acSThomas Gleixner  * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
1108b1c04acSThomas Gleixner  * softirq disabled section to be preempted.
1118b1c04acSThomas Gleixner  *
1128b1c04acSThomas Gleixner  * The per task counter is used for softirq_count(), in_softirq() and
1138b1c04acSThomas Gleixner  * in_serving_softirqs() because these counts are only valid when the task
1148b1c04acSThomas Gleixner  * holding softirq_ctrl::lock is running.
1158b1c04acSThomas Gleixner  *
1168b1c04acSThomas Gleixner  * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
1178b1c04acSThomas Gleixner  * the task which is in a softirq disabled section is preempted or blocks.
1188b1c04acSThomas Gleixner  */
1198b1c04acSThomas Gleixner struct softirq_ctrl {
1208b1c04acSThomas Gleixner 	local_lock_t	lock;
1218b1c04acSThomas Gleixner 	int		cnt;
1228b1c04acSThomas Gleixner };
1238b1c04acSThomas Gleixner 
1248b1c04acSThomas Gleixner static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
1258b1c04acSThomas Gleixner 	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
1268b1c04acSThomas Gleixner };
1278b1c04acSThomas Gleixner 
12847c218dcSThomas Gleixner /**
12947c218dcSThomas Gleixner  * local_bh_blocked() - Check for idle whether BH processing is blocked
13047c218dcSThomas Gleixner  *
13147c218dcSThomas Gleixner  * Returns false if the per CPU softirq::cnt is 0 otherwise true.
13247c218dcSThomas Gleixner  *
13347c218dcSThomas Gleixner  * This is invoked from the idle task to guard against false positive
13447c218dcSThomas Gleixner  * softirq pending warnings, which would happen when the task which holds
13547c218dcSThomas Gleixner  * softirq_ctrl::lock was the only running task on the CPU and blocks on
13647c218dcSThomas Gleixner  * some other lock.
13747c218dcSThomas Gleixner  */
local_bh_blocked(void)13847c218dcSThomas Gleixner bool local_bh_blocked(void)
13947c218dcSThomas Gleixner {
14047c218dcSThomas Gleixner 	return __this_cpu_read(softirq_ctrl.cnt) != 0;
14147c218dcSThomas Gleixner }
14247c218dcSThomas Gleixner 
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)1438b1c04acSThomas Gleixner void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
1448b1c04acSThomas Gleixner {
1458b1c04acSThomas Gleixner 	unsigned long flags;
1468b1c04acSThomas Gleixner 	int newcnt;
1478b1c04acSThomas Gleixner 
1488b1c04acSThomas Gleixner 	WARN_ON_ONCE(in_hardirq());
1498b1c04acSThomas Gleixner 
1508b1c04acSThomas Gleixner 	/* First entry of a task into a BH disabled section? */
1518b1c04acSThomas Gleixner 	if (!current->softirq_disable_cnt) {
1528b1c04acSThomas Gleixner 		if (preemptible()) {
1538b1c04acSThomas Gleixner 			local_lock(&softirq_ctrl.lock);
1548b1c04acSThomas Gleixner 			/* Required to meet the RCU bottomhalf requirements. */
1558b1c04acSThomas Gleixner 			rcu_read_lock();
1568b1c04acSThomas Gleixner 		} else {
1578b1c04acSThomas Gleixner 			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
1588b1c04acSThomas Gleixner 		}
1598b1c04acSThomas Gleixner 	}
1608b1c04acSThomas Gleixner 
1618b1c04acSThomas Gleixner 	/*
1628b1c04acSThomas Gleixner 	 * Track the per CPU softirq disabled state. On RT this is per CPU
1638b1c04acSThomas Gleixner 	 * state to allow preemption of bottom half disabled sections.
1648b1c04acSThomas Gleixner 	 */
1658b1c04acSThomas Gleixner 	newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
1668b1c04acSThomas Gleixner 	/*
1678b1c04acSThomas Gleixner 	 * Reflect the result in the task state to prevent recursion on the
1688b1c04acSThomas Gleixner 	 * local lock and to make softirq_count() & al work.
1698b1c04acSThomas Gleixner 	 */
1708b1c04acSThomas Gleixner 	current->softirq_disable_cnt = newcnt;
1718b1c04acSThomas Gleixner 
1728b1c04acSThomas Gleixner 	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
1738b1c04acSThomas Gleixner 		raw_local_irq_save(flags);
1748b1c04acSThomas Gleixner 		lockdep_softirqs_off(ip);
1758b1c04acSThomas Gleixner 		raw_local_irq_restore(flags);
1768b1c04acSThomas Gleixner 	}
1778b1c04acSThomas Gleixner }
1788b1c04acSThomas Gleixner EXPORT_SYMBOL(__local_bh_disable_ip);
1798b1c04acSThomas Gleixner 
__local_bh_enable(unsigned int cnt,bool unlock)1808b1c04acSThomas Gleixner static void __local_bh_enable(unsigned int cnt, bool unlock)
1818b1c04acSThomas Gleixner {
1828b1c04acSThomas Gleixner 	unsigned long flags;
1838b1c04acSThomas Gleixner 	int newcnt;
1848b1c04acSThomas Gleixner 
1858b1c04acSThomas Gleixner 	DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
1868b1c04acSThomas Gleixner 			    this_cpu_read(softirq_ctrl.cnt));
1878b1c04acSThomas Gleixner 
1888b1c04acSThomas Gleixner 	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
1898b1c04acSThomas Gleixner 		raw_local_irq_save(flags);
1908b1c04acSThomas Gleixner 		lockdep_softirqs_on(_RET_IP_);
1918b1c04acSThomas Gleixner 		raw_local_irq_restore(flags);
1928b1c04acSThomas Gleixner 	}
1938b1c04acSThomas Gleixner 
1948b1c04acSThomas Gleixner 	newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
1958b1c04acSThomas Gleixner 	current->softirq_disable_cnt = newcnt;
1968b1c04acSThomas Gleixner 
1978b1c04acSThomas Gleixner 	if (!newcnt && unlock) {
1988b1c04acSThomas Gleixner 		rcu_read_unlock();
1998b1c04acSThomas Gleixner 		local_unlock(&softirq_ctrl.lock);
2008b1c04acSThomas Gleixner 	}
2018b1c04acSThomas Gleixner }
2028b1c04acSThomas Gleixner 
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)2038b1c04acSThomas Gleixner void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
2048b1c04acSThomas Gleixner {
2058b1c04acSThomas Gleixner 	bool preempt_on = preemptible();
2068b1c04acSThomas Gleixner 	unsigned long flags;
2078b1c04acSThomas Gleixner 	u32 pending;
2088b1c04acSThomas Gleixner 	int curcnt;
2098b1c04acSThomas Gleixner 
210fe13889cSChangbin Du 	WARN_ON_ONCE(in_hardirq());
2118b1c04acSThomas Gleixner 	lockdep_assert_irqs_enabled();
2128b1c04acSThomas Gleixner 
2138b1c04acSThomas Gleixner 	local_irq_save(flags);
2148b1c04acSThomas Gleixner 	curcnt = __this_cpu_read(softirq_ctrl.cnt);
2158b1c04acSThomas Gleixner 
2168b1c04acSThomas Gleixner 	/*
2178b1c04acSThomas Gleixner 	 * If this is not reenabling soft interrupts, no point in trying to
2188b1c04acSThomas Gleixner 	 * run pending ones.
2198b1c04acSThomas Gleixner 	 */
2208b1c04acSThomas Gleixner 	if (curcnt != cnt)
2218b1c04acSThomas Gleixner 		goto out;
2228b1c04acSThomas Gleixner 
2238b1c04acSThomas Gleixner 	pending = local_softirq_pending();
224d15121beSPaolo Abeni 	if (!pending)
2258b1c04acSThomas Gleixner 		goto out;
2268b1c04acSThomas Gleixner 
2278b1c04acSThomas Gleixner 	/*
2288b1c04acSThomas Gleixner 	 * If this was called from non preemptible context, wake up the
2298b1c04acSThomas Gleixner 	 * softirq daemon.
2308b1c04acSThomas Gleixner 	 */
2318b1c04acSThomas Gleixner 	if (!preempt_on) {
2328b1c04acSThomas Gleixner 		wakeup_softirqd();
2338b1c04acSThomas Gleixner 		goto out;
2348b1c04acSThomas Gleixner 	}
2358b1c04acSThomas Gleixner 
2368b1c04acSThomas Gleixner 	/*
2378b1c04acSThomas Gleixner 	 * Adjust softirq count to SOFTIRQ_OFFSET which makes
2388b1c04acSThomas Gleixner 	 * in_serving_softirq() become true.
2398b1c04acSThomas Gleixner 	 */
2408b1c04acSThomas Gleixner 	cnt = SOFTIRQ_OFFSET;
2418b1c04acSThomas Gleixner 	__local_bh_enable(cnt, false);
2428b1c04acSThomas Gleixner 	__do_softirq();
2438b1c04acSThomas Gleixner 
2448b1c04acSThomas Gleixner out:
2458b1c04acSThomas Gleixner 	__local_bh_enable(cnt, preempt_on);
2468b1c04acSThomas Gleixner 	local_irq_restore(flags);
2478b1c04acSThomas Gleixner }
2488b1c04acSThomas Gleixner EXPORT_SYMBOL(__local_bh_enable_ip);
2498b1c04acSThomas Gleixner 
2508b1c04acSThomas Gleixner /*
2518b1c04acSThomas Gleixner  * Invoked from ksoftirqd_run() outside of the interrupt disabled section
2528b1c04acSThomas Gleixner  * to acquire the per CPU local lock for reentrancy protection.
2538b1c04acSThomas Gleixner  */
ksoftirqd_run_begin(void)2548b1c04acSThomas Gleixner static inline void ksoftirqd_run_begin(void)
2558b1c04acSThomas Gleixner {
2568b1c04acSThomas Gleixner 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
2578b1c04acSThomas Gleixner 	local_irq_disable();
2588b1c04acSThomas Gleixner }
2598b1c04acSThomas Gleixner 
2608b1c04acSThomas Gleixner /* Counterpart to ksoftirqd_run_begin() */
ksoftirqd_run_end(void)2618b1c04acSThomas Gleixner static inline void ksoftirqd_run_end(void)
2628b1c04acSThomas Gleixner {
2638b1c04acSThomas Gleixner 	__local_bh_enable(SOFTIRQ_OFFSET, true);
2648b1c04acSThomas Gleixner 	WARN_ON_ONCE(in_interrupt());
2658b1c04acSThomas Gleixner 	local_irq_enable();
2668b1c04acSThomas Gleixner }
2678b1c04acSThomas Gleixner 
softirq_handle_begin(void)2688b1c04acSThomas Gleixner static inline void softirq_handle_begin(void) { }
softirq_handle_end(void)2698b1c04acSThomas Gleixner static inline void softirq_handle_end(void) { }
2708b1c04acSThomas Gleixner 
should_wake_ksoftirqd(void)2718b1c04acSThomas Gleixner static inline bool should_wake_ksoftirqd(void)
2728b1c04acSThomas Gleixner {
2738b1c04acSThomas Gleixner 	return !this_cpu_read(softirq_ctrl.cnt);
2748b1c04acSThomas Gleixner }
2758b1c04acSThomas Gleixner 
invoke_softirq(void)2768b1c04acSThomas Gleixner static inline void invoke_softirq(void)
2778b1c04acSThomas Gleixner {
2788b1c04acSThomas Gleixner 	if (should_wake_ksoftirqd())
2798b1c04acSThomas Gleixner 		wakeup_softirqd();
2808b1c04acSThomas Gleixner }
2818b1c04acSThomas Gleixner 
2821a90bfd2SSebastian Andrzej Siewior /*
2831a90bfd2SSebastian Andrzej Siewior  * flush_smp_call_function_queue() can raise a soft interrupt in a function
2841a90bfd2SSebastian Andrzej Siewior  * call. On RT kernels this is undesired and the only known functionality
2851a90bfd2SSebastian Andrzej Siewior  * in the block layer which does this is disabled on RT. If soft interrupts
2861a90bfd2SSebastian Andrzej Siewior  * get raised which haven't been raised before the flush, warn so it can be
2871a90bfd2SSebastian Andrzej Siewior  * investigated.
2881a90bfd2SSebastian Andrzej Siewior  */
do_softirq_post_smp_call_flush(unsigned int was_pending)2891a90bfd2SSebastian Andrzej Siewior void do_softirq_post_smp_call_flush(unsigned int was_pending)
2901a90bfd2SSebastian Andrzej Siewior {
2911a90bfd2SSebastian Andrzej Siewior 	if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
2921a90bfd2SSebastian Andrzej Siewior 		invoke_softirq();
2931a90bfd2SSebastian Andrzej Siewior }
2941a90bfd2SSebastian Andrzej Siewior 
2958b1c04acSThomas Gleixner #else /* CONFIG_PREEMPT_RT */
2968b1c04acSThomas Gleixner 
2978b1c04acSThomas Gleixner /*
2988b1c04acSThomas Gleixner  * This one is for softirq.c-internal use, where hardirqs are disabled
299ae9ef589SThomas Gleixner  * legitimately:
300ae9ef589SThomas Gleixner  */
3018b1c04acSThomas Gleixner #ifdef CONFIG_TRACE_IRQFLAGS
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)3020bd3a173SPeter Zijlstra void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
303de30a2b3SIngo Molnar {
304de30a2b3SIngo Molnar 	unsigned long flags;
305de30a2b3SIngo Molnar 
306fe13889cSChangbin Du 	WARN_ON_ONCE(in_hardirq());
307de30a2b3SIngo Molnar 
308de30a2b3SIngo Molnar 	raw_local_irq_save(flags);
3097e49fcceSSteven Rostedt 	/*
310bdb43806SPeter Zijlstra 	 * The preempt tracer hooks into preempt_count_add and will break
3117e49fcceSSteven Rostedt 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
3127e49fcceSSteven Rostedt 	 * is set and before current->softirq_enabled is cleared.
3137e49fcceSSteven Rostedt 	 * We must manually increment preempt_count here and manually
3147e49fcceSSteven Rostedt 	 * call the trace_preempt_off later.
3157e49fcceSSteven Rostedt 	 */
316bdb43806SPeter Zijlstra 	__preempt_count_add(cnt);
317de30a2b3SIngo Molnar 	/*
318de30a2b3SIngo Molnar 	 * Were softirqs turned off above:
319de30a2b3SIngo Molnar 	 */
3209ea4c380SPeter Zijlstra 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
3210d38453cSPeter Zijlstra 		lockdep_softirqs_off(ip);
322de30a2b3SIngo Molnar 	raw_local_irq_restore(flags);
3237e49fcceSSteven Rostedt 
3240f1ba9a2SHeiko Carstens 	if (preempt_count() == cnt) {
3250f1ba9a2SHeiko Carstens #ifdef CONFIG_DEBUG_PREEMPT
326f904f582SSebastian Andrzej Siewior 		current->preempt_disable_ip = get_lock_parent_ip();
3270f1ba9a2SHeiko Carstens #endif
328f904f582SSebastian Andrzej Siewior 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
329de30a2b3SIngo Molnar 	}
3300f1ba9a2SHeiko Carstens }
3310bd3a173SPeter Zijlstra EXPORT_SYMBOL(__local_bh_disable_ip);
3323c829c36STim Chen #endif /* CONFIG_TRACE_IRQFLAGS */
333de30a2b3SIngo Molnar 
__local_bh_enable(unsigned int cnt)33475e1056fSVenkatesh Pallipadi static void __local_bh_enable(unsigned int cnt)
33575e1056fSVenkatesh Pallipadi {
336f71b74bcSFrederic Weisbecker 	lockdep_assert_irqs_disabled();
33775e1056fSVenkatesh Pallipadi 
3381a63dcd8SJoel Fernandes (Google) 	if (preempt_count() == cnt)
3391a63dcd8SJoel Fernandes (Google) 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3401a63dcd8SJoel Fernandes (Google) 
3419ea4c380SPeter Zijlstra 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
3420d38453cSPeter Zijlstra 		lockdep_softirqs_on(_RET_IP_);
3431a63dcd8SJoel Fernandes (Google) 
3441a63dcd8SJoel Fernandes (Google) 	__preempt_count_sub(cnt);
34575e1056fSVenkatesh Pallipadi }
34675e1056fSVenkatesh Pallipadi 
347de30a2b3SIngo Molnar /*
348c3442697SPaul E. McKenney  * Special-case - softirqs can safely be enabled by __do_softirq(),
349de30a2b3SIngo Molnar  * without processing still-pending softirqs:
350de30a2b3SIngo Molnar  */
_local_bh_enable(void)351de30a2b3SIngo Molnar void _local_bh_enable(void)
352de30a2b3SIngo Molnar {
353fe13889cSChangbin Du 	WARN_ON_ONCE(in_hardirq());
35475e1056fSVenkatesh Pallipadi 	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
355de30a2b3SIngo Molnar }
356de30a2b3SIngo Molnar EXPORT_SYMBOL(_local_bh_enable);
357de30a2b3SIngo Molnar 
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)3580bd3a173SPeter Zijlstra void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
359de30a2b3SIngo Molnar {
360fe13889cSChangbin Du 	WARN_ON_ONCE(in_hardirq());
361f71b74bcSFrederic Weisbecker 	lockdep_assert_irqs_enabled();
3623c829c36STim Chen #ifdef CONFIG_TRACE_IRQFLAGS
3630f476b6dSJohannes Berg 	local_irq_disable();
3643c829c36STim Chen #endif
365de30a2b3SIngo Molnar 	/*
366de30a2b3SIngo Molnar 	 * Are softirqs going to be turned on now:
367de30a2b3SIngo Molnar 	 */
36875e1056fSVenkatesh Pallipadi 	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
3690d38453cSPeter Zijlstra 		lockdep_softirqs_on(ip);
370de30a2b3SIngo Molnar 	/*
371de30a2b3SIngo Molnar 	 * Keep preemption disabled until we are done with
372de30a2b3SIngo Molnar 	 * softirq processing:
373de30a2b3SIngo Molnar 	 */
37491ea62d5SPeter Zijlstra 	__preempt_count_sub(cnt - 1);
375de30a2b3SIngo Molnar 
3760bed698aSFrederic Weisbecker 	if (unlikely(!in_interrupt() && local_softirq_pending())) {
3770bed698aSFrederic Weisbecker 		/*
3780bed698aSFrederic Weisbecker 		 * Run softirq if any pending. And do it in its own stack
3790bed698aSFrederic Weisbecker 		 * as we may be calling this deep in a task call stack already.
3800bed698aSFrederic Weisbecker 		 */
381de30a2b3SIngo Molnar 		do_softirq();
3820bed698aSFrederic Weisbecker 	}
383de30a2b3SIngo Molnar 
384bdb43806SPeter Zijlstra 	preempt_count_dec();
3853c829c36STim Chen #ifdef CONFIG_TRACE_IRQFLAGS
3860f476b6dSJohannes Berg 	local_irq_enable();
3873c829c36STim Chen #endif
388de30a2b3SIngo Molnar 	preempt_check_resched();
389de30a2b3SIngo Molnar }
3900bd3a173SPeter Zijlstra EXPORT_SYMBOL(__local_bh_enable_ip);
391de30a2b3SIngo Molnar 
softirq_handle_begin(void)392f02fc963SThomas Gleixner static inline void softirq_handle_begin(void)
393f02fc963SThomas Gleixner {
394f02fc963SThomas Gleixner 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
395f02fc963SThomas Gleixner }
396f02fc963SThomas Gleixner 
softirq_handle_end(void)397f02fc963SThomas Gleixner static inline void softirq_handle_end(void)
398f02fc963SThomas Gleixner {
399f02fc963SThomas Gleixner 	__local_bh_enable(SOFTIRQ_OFFSET);
400f02fc963SThomas Gleixner 	WARN_ON_ONCE(in_interrupt());
401f02fc963SThomas Gleixner }
402f02fc963SThomas Gleixner 
ksoftirqd_run_begin(void)403f02fc963SThomas Gleixner static inline void ksoftirqd_run_begin(void)
404f02fc963SThomas Gleixner {
405f02fc963SThomas Gleixner 	local_irq_disable();
406f02fc963SThomas Gleixner }
407f02fc963SThomas Gleixner 
ksoftirqd_run_end(void)408f02fc963SThomas Gleixner static inline void ksoftirqd_run_end(void)
409f02fc963SThomas Gleixner {
410f02fc963SThomas Gleixner 	local_irq_enable();
411f02fc963SThomas Gleixner }
412f02fc963SThomas Gleixner 
should_wake_ksoftirqd(void)413f02fc963SThomas Gleixner static inline bool should_wake_ksoftirqd(void)
414f02fc963SThomas Gleixner {
415f02fc963SThomas Gleixner 	return true;
416f02fc963SThomas Gleixner }
417f02fc963SThomas Gleixner 
invoke_softirq(void)418ae9ef589SThomas Gleixner static inline void invoke_softirq(void)
419ae9ef589SThomas Gleixner {
42091cc470eSTanner Love 	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
421ae9ef589SThomas Gleixner #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
422ae9ef589SThomas Gleixner 		/*
423ae9ef589SThomas Gleixner 		 * We can safely execute softirq on the current stack if
424ae9ef589SThomas Gleixner 		 * it is the irq stack, because it should be near empty
425ae9ef589SThomas Gleixner 		 * at this stage.
426ae9ef589SThomas Gleixner 		 */
427ae9ef589SThomas Gleixner 		__do_softirq();
428ae9ef589SThomas Gleixner #else
429ae9ef589SThomas Gleixner 		/*
430ae9ef589SThomas Gleixner 		 * Otherwise, irq_exit() is called on the task stack that can
431ae9ef589SThomas Gleixner 		 * be potentially deep already. So call softirq in its own stack
432ae9ef589SThomas Gleixner 		 * to prevent from any overrun.
433ae9ef589SThomas Gleixner 		 */
434ae9ef589SThomas Gleixner 		do_softirq_own_stack();
435ae9ef589SThomas Gleixner #endif
436ae9ef589SThomas Gleixner 	} else {
437ae9ef589SThomas Gleixner 		wakeup_softirqd();
438ae9ef589SThomas Gleixner 	}
439ae9ef589SThomas Gleixner }
440ae9ef589SThomas Gleixner 
do_softirq(void)441ae9ef589SThomas Gleixner asmlinkage __visible void do_softirq(void)
442ae9ef589SThomas Gleixner {
443ae9ef589SThomas Gleixner 	__u32 pending;
444ae9ef589SThomas Gleixner 	unsigned long flags;
445ae9ef589SThomas Gleixner 
446ae9ef589SThomas Gleixner 	if (in_interrupt())
447ae9ef589SThomas Gleixner 		return;
448ae9ef589SThomas Gleixner 
449ae9ef589SThomas Gleixner 	local_irq_save(flags);
450ae9ef589SThomas Gleixner 
451ae9ef589SThomas Gleixner 	pending = local_softirq_pending();
452ae9ef589SThomas Gleixner 
453d15121beSPaolo Abeni 	if (pending)
454ae9ef589SThomas Gleixner 		do_softirq_own_stack();
455ae9ef589SThomas Gleixner 
456ae9ef589SThomas Gleixner 	local_irq_restore(flags);
457ae9ef589SThomas Gleixner }
458ae9ef589SThomas Gleixner 
4598b1c04acSThomas Gleixner #endif /* !CONFIG_PREEMPT_RT */
4608b1c04acSThomas Gleixner 
461de30a2b3SIngo Molnar /*
46234376a50SBen Greear  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
46334376a50SBen Greear  * but break the loop if need_resched() is set or after 2 ms.
46434376a50SBen Greear  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
46534376a50SBen Greear  * certain cases, such as stop_machine(), jiffies may cease to
46634376a50SBen Greear  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
46734376a50SBen Greear  * well to make sure we eventually return from this method.
4681da177e4SLinus Torvalds  *
469c10d7367SEric Dumazet  * These limits have been established via experimentation.
4701da177e4SLinus Torvalds  * The two things to balance is latency against fairness -
4711da177e4SLinus Torvalds  * we want to handle softirqs as soon as possible, but they
4721da177e4SLinus Torvalds  * should not be able to lock up the box.
4731da177e4SLinus Torvalds  */
474c10d7367SEric Dumazet #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
47534376a50SBen Greear #define MAX_SOFTIRQ_RESTART 10
4761da177e4SLinus Torvalds 
477f1a83e65SPeter Zijlstra #ifdef CONFIG_TRACE_IRQFLAGS
478f1a83e65SPeter Zijlstra /*
479f1a83e65SPeter Zijlstra  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
480f1a83e65SPeter Zijlstra  * to keep the lockdep irq context tracking as tight as possible in order to
481f1a83e65SPeter Zijlstra  * not miss-qualify lock contexts and miss possible deadlocks.
482f1a83e65SPeter Zijlstra  */
483f1a83e65SPeter Zijlstra 
lockdep_softirq_start(void)4845c4853b6SFrederic Weisbecker static inline bool lockdep_softirq_start(void)
485f1a83e65SPeter Zijlstra {
4865c4853b6SFrederic Weisbecker 	bool in_hardirq = false;
487f1a83e65SPeter Zijlstra 
488f9ad4a5fSPeter Zijlstra 	if (lockdep_hardirq_context()) {
4895c4853b6SFrederic Weisbecker 		in_hardirq = true;
4902502ec37SThomas Gleixner 		lockdep_hardirq_exit();
491f1a83e65SPeter Zijlstra 	}
492f1a83e65SPeter Zijlstra 
4935c4853b6SFrederic Weisbecker 	lockdep_softirq_enter();
4945c4853b6SFrederic Weisbecker 
4955c4853b6SFrederic Weisbecker 	return in_hardirq;
4965c4853b6SFrederic Weisbecker }
4975c4853b6SFrederic Weisbecker 
lockdep_softirq_end(bool in_hardirq)4985c4853b6SFrederic Weisbecker static inline void lockdep_softirq_end(bool in_hardirq)
499f1a83e65SPeter Zijlstra {
500f1a83e65SPeter Zijlstra 	lockdep_softirq_exit();
5015c4853b6SFrederic Weisbecker 
5025c4853b6SFrederic Weisbecker 	if (in_hardirq)
5032502ec37SThomas Gleixner 		lockdep_hardirq_enter();
504f1a83e65SPeter Zijlstra }
505f1a83e65SPeter Zijlstra #else
lockdep_softirq_start(void)5065c4853b6SFrederic Weisbecker static inline bool lockdep_softirq_start(void) { return false; }
lockdep_softirq_end(bool in_hardirq)5075c4853b6SFrederic Weisbecker static inline void lockdep_softirq_end(bool in_hardirq) { }
508f1a83e65SPeter Zijlstra #endif
509f1a83e65SPeter Zijlstra 
__do_softirq(void)510be7635e7SAlexander Potapenko asmlinkage __visible void __softirq_entry __do_softirq(void)
5111da177e4SLinus Torvalds {
512c10d7367SEric Dumazet 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
513907aed48SMel Gorman 	unsigned long old_flags = current->flags;
51434376a50SBen Greear 	int max_restart = MAX_SOFTIRQ_RESTART;
515f1a83e65SPeter Zijlstra 	struct softirq_action *h;
5165c4853b6SFrederic Weisbecker 	bool in_hardirq;
517f1a83e65SPeter Zijlstra 	__u32 pending;
5182e702b9fSJoe Perches 	int softirq_bit;
519907aed48SMel Gorman 
520907aed48SMel Gorman 	/*
521e45506acSYangtao Li 	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
522e45506acSYangtao Li 	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
523e45506acSYangtao Li 	 * again if the socket is related to swapping.
524907aed48SMel Gorman 	 */
525907aed48SMel Gorman 	current->flags &= ~PF_MEMALLOC;
5261da177e4SLinus Torvalds 
5271da177e4SLinus Torvalds 	pending = local_softirq_pending();
528829035fdSPaul Mackerras 
529f02fc963SThomas Gleixner 	softirq_handle_begin();
5305c4853b6SFrederic Weisbecker 	in_hardirq = lockdep_softirq_start();
531d3759e71SFrederic Weisbecker 	account_softirq_enter(current);
5321da177e4SLinus Torvalds 
5331da177e4SLinus Torvalds restart:
5341da177e4SLinus Torvalds 	/* Reset the pending bitmask before enabling irqs */
5353f74478bSAndi Kleen 	set_softirq_pending(0);
5361da177e4SLinus Torvalds 
537c70f5d66SAndrew Morton 	local_irq_enable();
5381da177e4SLinus Torvalds 
5391da177e4SLinus Torvalds 	h = softirq_vec;
5401da177e4SLinus Torvalds 
5412e702b9fSJoe Perches 	while ((softirq_bit = ffs(pending))) {
5422e702b9fSJoe Perches 		unsigned int vec_nr;
5432e702b9fSJoe Perches 		int prev_count;
5442e702b9fSJoe Perches 
5452e702b9fSJoe Perches 		h += softirq_bit - 1;
5462e702b9fSJoe Perches 
5472e702b9fSJoe Perches 		vec_nr = h - softirq_vec;
5482e702b9fSJoe Perches 		prev_count = preempt_count();
5498e85b4b5SThomas Gleixner 
550f4bc6bb2SThomas Gleixner 		kstat_incr_softirqs_this_cpu(vec_nr);
551f4bc6bb2SThomas Gleixner 
552f4bc6bb2SThomas Gleixner 		trace_softirq_entry(vec_nr);
5531da177e4SLinus Torvalds 		h->action(h);
554f4bc6bb2SThomas Gleixner 		trace_softirq_exit(vec_nr);
5558e85b4b5SThomas Gleixner 		if (unlikely(prev_count != preempt_count())) {
55640322764SJoe Perches 			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
5572e702b9fSJoe Perches 			       vec_nr, softirq_to_name[vec_nr], h->action,
558f4bc6bb2SThomas Gleixner 			       prev_count, preempt_count());
5594a2b4b22SPeter Zijlstra 			preempt_count_set(prev_count);
5608e85b4b5SThomas Gleixner 		}
5611da177e4SLinus Torvalds 		h++;
5622e702b9fSJoe Perches 		pending >>= softirq_bit;
5632e702b9fSJoe Perches 	}
5641da177e4SLinus Torvalds 
5658b1c04acSThomas Gleixner 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
5668b1c04acSThomas Gleixner 	    __this_cpu_read(ksoftirqd) == current)
567d28139c4SPaul E. McKenney 		rcu_softirq_qs();
5688b1c04acSThomas Gleixner 
569c70f5d66SAndrew Morton 	local_irq_disable();
5701da177e4SLinus Torvalds 
5711da177e4SLinus Torvalds 	pending = local_softirq_pending();
572c10d7367SEric Dumazet 	if (pending) {
57334376a50SBen Greear 		if (time_before(jiffies, end) && !need_resched() &&
57434376a50SBen Greear 		    --max_restart)
5751da177e4SLinus Torvalds 			goto restart;
5761da177e4SLinus Torvalds 
5771da177e4SLinus Torvalds 		wakeup_softirqd();
578c10d7367SEric Dumazet 	}
5791da177e4SLinus Torvalds 
580d3759e71SFrederic Weisbecker 	account_softirq_exit(current);
5815c4853b6SFrederic Weisbecker 	lockdep_softirq_end(in_hardirq);
582f02fc963SThomas Gleixner 	softirq_handle_end();
583717a94b5SNeilBrown 	current_restore_flags(old_flags, PF_MEMALLOC);
5841da177e4SLinus Torvalds }
5851da177e4SLinus Torvalds 
5868a6bc478SThomas Gleixner /**
5878a6bc478SThomas Gleixner  * irq_enter_rcu - Enter an interrupt context with RCU watching
588dde4b2b5SIngo Molnar  */
irq_enter_rcu(void)5898a6bc478SThomas Gleixner void irq_enter_rcu(void)
590dde4b2b5SIngo Molnar {
591d14ce74fSFrederic Weisbecker 	__irq_enter_raw();
592d14ce74fSFrederic Weisbecker 
59353e87e3cSFrederic Weisbecker 	if (tick_nohz_full_cpu(smp_processor_id()) ||
59453e87e3cSFrederic Weisbecker 	    (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
5955acac1beSFrederic Weisbecker 		tick_irq_enter();
596d14ce74fSFrederic Weisbecker 
597d14ce74fSFrederic Weisbecker 	account_hardirq_enter(current);
598dde4b2b5SIngo Molnar }
599dde4b2b5SIngo Molnar 
6008a6bc478SThomas Gleixner /**
6018a6bc478SThomas Gleixner  * irq_enter - Enter an interrupt context including RCU update
6028a6bc478SThomas Gleixner  */
irq_enter(void)6038a6bc478SThomas Gleixner void irq_enter(void)
6048a6bc478SThomas Gleixner {
6056f0e6c15SFrederic Weisbecker 	ct_irq_enter();
6068a6bc478SThomas Gleixner 	irq_enter_rcu();
6078a6bc478SThomas Gleixner }
6088a6bc478SThomas Gleixner 
tick_irq_exit(void)60967826eaeSFrederic Weisbecker static inline void tick_irq_exit(void)
61067826eaeSFrederic Weisbecker {
61167826eaeSFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
61267826eaeSFrederic Weisbecker 	int cpu = smp_processor_id();
61367826eaeSFrederic Weisbecker 
61467826eaeSFrederic Weisbecker 	/* Make sure that timer wheel updates are propagated */
615*548796e2SCruz Zhao 	if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
616fe13889cSChangbin Du 		if (!in_hardirq())
61767826eaeSFrederic Weisbecker 			tick_nohz_irq_exit();
61867826eaeSFrederic Weisbecker 	}
61967826eaeSFrederic Weisbecker #endif
62067826eaeSFrederic Weisbecker }
62167826eaeSFrederic Weisbecker 
__irq_exit_rcu(void)62259bc300bSPeter Zijlstra static inline void __irq_exit_rcu(void)
6231da177e4SLinus Torvalds {
62474eed016SThomas Gleixner #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
6254cd5d111SFrederic Weisbecker 	local_irq_disable();
62674eed016SThomas Gleixner #else
627f71b74bcSFrederic Weisbecker 	lockdep_assert_irqs_disabled();
62874eed016SThomas Gleixner #endif
629d3759e71SFrederic Weisbecker 	account_hardirq_exit(current);
630bdb43806SPeter Zijlstra 	preempt_count_sub(HARDIRQ_OFFSET);
6311da177e4SLinus Torvalds 	if (!in_interrupt() && local_softirq_pending())
6321da177e4SLinus Torvalds 		invoke_softirq();
63379bf2bb3SThomas Gleixner 
63467826eaeSFrederic Weisbecker 	tick_irq_exit();
6358a6bc478SThomas Gleixner }
6368a6bc478SThomas Gleixner 
6378a6bc478SThomas Gleixner /**
63859bc300bSPeter Zijlstra  * irq_exit_rcu() - Exit an interrupt context without updating RCU
63959bc300bSPeter Zijlstra  *
64059bc300bSPeter Zijlstra  * Also processes softirqs if needed and possible.
64159bc300bSPeter Zijlstra  */
irq_exit_rcu(void)64259bc300bSPeter Zijlstra void irq_exit_rcu(void)
64359bc300bSPeter Zijlstra {
64459bc300bSPeter Zijlstra 	__irq_exit_rcu();
64559bc300bSPeter Zijlstra 	 /* must be last! */
64659bc300bSPeter Zijlstra 	lockdep_hardirq_exit();
64759bc300bSPeter Zijlstra }
64859bc300bSPeter Zijlstra 
64959bc300bSPeter Zijlstra /**
6508a6bc478SThomas Gleixner  * irq_exit - Exit an interrupt context, update RCU and lockdep
6518a6bc478SThomas Gleixner  *
6528a6bc478SThomas Gleixner  * Also processes softirqs if needed and possible.
6538a6bc478SThomas Gleixner  */
irq_exit(void)6548a6bc478SThomas Gleixner void irq_exit(void)
6558a6bc478SThomas Gleixner {
65659bc300bSPeter Zijlstra 	__irq_exit_rcu();
6576f0e6c15SFrederic Weisbecker 	ct_irq_exit();
6582502ec37SThomas Gleixner 	 /* must be last! */
6592502ec37SThomas Gleixner 	lockdep_hardirq_exit();
6601da177e4SLinus Torvalds }
6611da177e4SLinus Torvalds 
6621da177e4SLinus Torvalds /*
6631da177e4SLinus Torvalds  * This function must run with irqs disabled!
6641da177e4SLinus Torvalds  */
raise_softirq_irqoff(unsigned int nr)6657ad5b3a5SHarvey Harrison inline void raise_softirq_irqoff(unsigned int nr)
6661da177e4SLinus Torvalds {
6671da177e4SLinus Torvalds 	__raise_softirq_irqoff(nr);
6681da177e4SLinus Torvalds 
6691da177e4SLinus Torvalds 	/*
6701da177e4SLinus Torvalds 	 * If we're in an interrupt or softirq, we're done
6711da177e4SLinus Torvalds 	 * (this also catches softirq-disabled code). We will
6721da177e4SLinus Torvalds 	 * actually run the softirq once we return from
6731da177e4SLinus Torvalds 	 * the irq or softirq.
6741da177e4SLinus Torvalds 	 *
6751da177e4SLinus Torvalds 	 * Otherwise we wake up ksoftirqd to make sure we
6761da177e4SLinus Torvalds 	 * schedule the softirq soon.
6771da177e4SLinus Torvalds 	 */
678f02fc963SThomas Gleixner 	if (!in_interrupt() && should_wake_ksoftirqd())
6791da177e4SLinus Torvalds 		wakeup_softirqd();
6801da177e4SLinus Torvalds }
6811da177e4SLinus Torvalds 
raise_softirq(unsigned int nr)6827ad5b3a5SHarvey Harrison void raise_softirq(unsigned int nr)
6831da177e4SLinus Torvalds {
6841da177e4SLinus Torvalds 	unsigned long flags;
6851da177e4SLinus Torvalds 
6861da177e4SLinus Torvalds 	local_irq_save(flags);
6871da177e4SLinus Torvalds 	raise_softirq_irqoff(nr);
6881da177e4SLinus Torvalds 	local_irq_restore(flags);
6891da177e4SLinus Torvalds }
6901da177e4SLinus Torvalds 
__raise_softirq_irqoff(unsigned int nr)691f069686eSSteven Rostedt void __raise_softirq_irqoff(unsigned int nr)
692f069686eSSteven Rostedt {
693cdabce2eSJiafei Pan 	lockdep_assert_irqs_disabled();
694f069686eSSteven Rostedt 	trace_softirq_raise(nr);
695f069686eSSteven Rostedt 	or_softirq_pending(1UL << nr);
696f069686eSSteven Rostedt }
697f069686eSSteven Rostedt 
open_softirq(int nr,void (* action)(struct softirq_action *))698962cf36cSCarlos R. Mafra void open_softirq(int nr, void (*action)(struct softirq_action *))
6991da177e4SLinus Torvalds {
7001da177e4SLinus Torvalds 	softirq_vec[nr].action = action;
7011da177e4SLinus Torvalds }
7021da177e4SLinus Torvalds 
7039ba5f005SPeter Zijlstra /*
7049ba5f005SPeter Zijlstra  * Tasklets
7059ba5f005SPeter Zijlstra  */
706ce85b4f2SJoe Perches struct tasklet_head {
70748f20a9aSOlof Johansson 	struct tasklet_struct *head;
70848f20a9aSOlof Johansson 	struct tasklet_struct **tail;
7091da177e4SLinus Torvalds };
7101da177e4SLinus Torvalds 
7114620b49fSVegard Nossum static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
7124620b49fSVegard Nossum static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
7131da177e4SLinus Torvalds 
__tasklet_schedule_common(struct tasklet_struct * t,struct tasklet_head __percpu * headp,unsigned int softirq_nr)7146498ddadSIngo Molnar static void __tasklet_schedule_common(struct tasklet_struct *t,
7156498ddadSIngo Molnar 				      struct tasklet_head __percpu *headp,
7166498ddadSIngo Molnar 				      unsigned int softirq_nr)
7171da177e4SLinus Torvalds {
7186498ddadSIngo Molnar 	struct tasklet_head *head;
7191da177e4SLinus Torvalds 	unsigned long flags;
7201da177e4SLinus Torvalds 
7211da177e4SLinus Torvalds 	local_irq_save(flags);
7226498ddadSIngo Molnar 	head = this_cpu_ptr(headp);
72348f20a9aSOlof Johansson 	t->next = NULL;
7246498ddadSIngo Molnar 	*head->tail = t;
7256498ddadSIngo Molnar 	head->tail = &(t->next);
7266498ddadSIngo Molnar 	raise_softirq_irqoff(softirq_nr);
7271da177e4SLinus Torvalds 	local_irq_restore(flags);
7281da177e4SLinus Torvalds }
7296498ddadSIngo Molnar 
__tasklet_schedule(struct tasklet_struct * t)7306498ddadSIngo Molnar void __tasklet_schedule(struct tasklet_struct *t)
7316498ddadSIngo Molnar {
7326498ddadSIngo Molnar 	__tasklet_schedule_common(t, &tasklet_vec,
7336498ddadSIngo Molnar 				  TASKLET_SOFTIRQ);
7346498ddadSIngo Molnar }
7351da177e4SLinus Torvalds EXPORT_SYMBOL(__tasklet_schedule);
7361da177e4SLinus Torvalds 
__tasklet_hi_schedule(struct tasklet_struct * t)7377ad5b3a5SHarvey Harrison void __tasklet_hi_schedule(struct tasklet_struct *t)
7381da177e4SLinus Torvalds {
7396498ddadSIngo Molnar 	__tasklet_schedule_common(t, &tasklet_hi_vec,
7406498ddadSIngo Molnar 				  HI_SOFTIRQ);
7411da177e4SLinus Torvalds }
7421da177e4SLinus Torvalds EXPORT_SYMBOL(__tasklet_hi_schedule);
7431da177e4SLinus Torvalds 
tasklet_clear_sched(struct tasklet_struct * t)744697d8c63SPeter Zijlstra static bool tasklet_clear_sched(struct tasklet_struct *t)
7456b2c339dSDirk Behme {
746697d8c63SPeter Zijlstra 	if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
747697d8c63SPeter Zijlstra 		wake_up_var(&t->state);
7486b2c339dSDirk Behme 		return true;
749697d8c63SPeter Zijlstra 	}
7506b2c339dSDirk Behme 
7516b2c339dSDirk Behme 	WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
7526b2c339dSDirk Behme 		  t->use_callback ? "callback" : "func",
7536b2c339dSDirk Behme 		  t->use_callback ? (void *)t->callback : (void *)t->func);
7546b2c339dSDirk Behme 
7556b2c339dSDirk Behme 	return false;
7566b2c339dSDirk Behme }
7576b2c339dSDirk Behme 
tasklet_action_common(struct softirq_action * a,struct tasklet_head * tl_head,unsigned int softirq_nr)75882b691beSIngo Molnar static void tasklet_action_common(struct softirq_action *a,
75982b691beSIngo Molnar 				  struct tasklet_head *tl_head,
76082b691beSIngo Molnar 				  unsigned int softirq_nr)
7611da177e4SLinus Torvalds {
7621da177e4SLinus Torvalds 	struct tasklet_struct *list;
7631da177e4SLinus Torvalds 
7641da177e4SLinus Torvalds 	local_irq_disable();
76582b691beSIngo Molnar 	list = tl_head->head;
76682b691beSIngo Molnar 	tl_head->head = NULL;
76782b691beSIngo Molnar 	tl_head->tail = &tl_head->head;
7681da177e4SLinus Torvalds 	local_irq_enable();
7691da177e4SLinus Torvalds 
7701da177e4SLinus Torvalds 	while (list) {
7711da177e4SLinus Torvalds 		struct tasklet_struct *t = list;
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds 		list = list->next;
7741da177e4SLinus Torvalds 
7751da177e4SLinus Torvalds 		if (tasklet_trylock(t)) {
7761da177e4SLinus Torvalds 			if (!atomic_read(&t->count)) {
777697d8c63SPeter Zijlstra 				if (tasklet_clear_sched(t)) {
778f4bf3ca2SLingutla Chandrasekhar 					if (t->use_callback) {
779f4bf3ca2SLingutla Chandrasekhar 						trace_tasklet_entry(t, t->callback);
78012cc923fSRomain Perier 						t->callback(t);
781f4bf3ca2SLingutla Chandrasekhar 						trace_tasklet_exit(t, t->callback);
782f4bf3ca2SLingutla Chandrasekhar 					} else {
783f4bf3ca2SLingutla Chandrasekhar 						trace_tasklet_entry(t, t->func);
7841da177e4SLinus Torvalds 						t->func(t->data);
785f4bf3ca2SLingutla Chandrasekhar 						trace_tasklet_exit(t, t->func);
786f4bf3ca2SLingutla Chandrasekhar 					}
7876b2c339dSDirk Behme 				}
7881da177e4SLinus Torvalds 				tasklet_unlock(t);
7891da177e4SLinus Torvalds 				continue;
7901da177e4SLinus Torvalds 			}
7911da177e4SLinus Torvalds 			tasklet_unlock(t);
7921da177e4SLinus Torvalds 		}
7931da177e4SLinus Torvalds 
7941da177e4SLinus Torvalds 		local_irq_disable();
79548f20a9aSOlof Johansson 		t->next = NULL;
79682b691beSIngo Molnar 		*tl_head->tail = t;
79782b691beSIngo Molnar 		tl_head->tail = &t->next;
79882b691beSIngo Molnar 		__raise_softirq_irqoff(softirq_nr);
7991da177e4SLinus Torvalds 		local_irq_enable();
8001da177e4SLinus Torvalds 	}
8011da177e4SLinus Torvalds }
8021da177e4SLinus Torvalds 
tasklet_action(struct softirq_action * a)80382b691beSIngo Molnar static __latent_entropy void tasklet_action(struct softirq_action *a)
80482b691beSIngo Molnar {
80582b691beSIngo Molnar 	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
80682b691beSIngo Molnar }
80782b691beSIngo Molnar 
tasklet_hi_action(struct softirq_action * a)8080766f788SEmese Revfy static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
8091da177e4SLinus Torvalds {
81082b691beSIngo Molnar 	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
8111da177e4SLinus Torvalds }
8121da177e4SLinus Torvalds 
tasklet_setup(struct tasklet_struct * t,void (* callback)(struct tasklet_struct *))81312cc923fSRomain Perier void tasklet_setup(struct tasklet_struct *t,
81412cc923fSRomain Perier 		   void (*callback)(struct tasklet_struct *))
81512cc923fSRomain Perier {
81612cc923fSRomain Perier 	t->next = NULL;
81712cc923fSRomain Perier 	t->state = 0;
81812cc923fSRomain Perier 	atomic_set(&t->count, 0);
81912cc923fSRomain Perier 	t->callback = callback;
82012cc923fSRomain Perier 	t->use_callback = true;
82112cc923fSRomain Perier 	t->data = 0;
82212cc923fSRomain Perier }
82312cc923fSRomain Perier EXPORT_SYMBOL(tasklet_setup);
82412cc923fSRomain Perier 
tasklet_init(struct tasklet_struct * t,void (* func)(unsigned long),unsigned long data)8251da177e4SLinus Torvalds void tasklet_init(struct tasklet_struct *t,
8261da177e4SLinus Torvalds 		  void (*func)(unsigned long), unsigned long data)
8271da177e4SLinus Torvalds {
8281da177e4SLinus Torvalds 	t->next = NULL;
8291da177e4SLinus Torvalds 	t->state = 0;
8301da177e4SLinus Torvalds 	atomic_set(&t->count, 0);
8311da177e4SLinus Torvalds 	t->func = func;
83212cc923fSRomain Perier 	t->use_callback = false;
8331da177e4SLinus Torvalds 	t->data = data;
8341da177e4SLinus Torvalds }
8351da177e4SLinus Torvalds EXPORT_SYMBOL(tasklet_init);
8361da177e4SLinus Torvalds 
837eb2dafbbSThomas Gleixner #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
838eb2dafbbSThomas Gleixner /*
839eb2dafbbSThomas Gleixner  * Do not use in new code. Waiting for tasklets from atomic contexts is
840eb2dafbbSThomas Gleixner  * error prone and should be avoided.
841eb2dafbbSThomas Gleixner  */
tasklet_unlock_spin_wait(struct tasklet_struct * t)842eb2dafbbSThomas Gleixner void tasklet_unlock_spin_wait(struct tasklet_struct *t)
843eb2dafbbSThomas Gleixner {
844eb2dafbbSThomas Gleixner 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
845eb2dafbbSThomas Gleixner 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
846eb2dafbbSThomas Gleixner 			/*
847eb2dafbbSThomas Gleixner 			 * Prevent a live lock when current preempted soft
848eb2dafbbSThomas Gleixner 			 * interrupt processing or prevents ksoftirqd from
849eb2dafbbSThomas Gleixner 			 * running. If the tasklet runs on a different CPU
850eb2dafbbSThomas Gleixner 			 * then this has no effect other than doing the BH
851eb2dafbbSThomas Gleixner 			 * disable/enable dance for nothing.
852eb2dafbbSThomas Gleixner 			 */
853eb2dafbbSThomas Gleixner 			local_bh_disable();
854eb2dafbbSThomas Gleixner 			local_bh_enable();
855eb2dafbbSThomas Gleixner 		} else {
856eb2dafbbSThomas Gleixner 			cpu_relax();
857eb2dafbbSThomas Gleixner 		}
858eb2dafbbSThomas Gleixner 	}
859eb2dafbbSThomas Gleixner }
860eb2dafbbSThomas Gleixner EXPORT_SYMBOL(tasklet_unlock_spin_wait);
861eb2dafbbSThomas Gleixner #endif
862eb2dafbbSThomas Gleixner 
tasklet_kill(struct tasklet_struct * t)8631da177e4SLinus Torvalds void tasklet_kill(struct tasklet_struct *t)
8641da177e4SLinus Torvalds {
8651da177e4SLinus Torvalds 	if (in_interrupt())
86640322764SJoe Perches 		pr_notice("Attempt to kill tasklet from interrupt\n");
8671da177e4SLinus Torvalds 
868697d8c63SPeter Zijlstra 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
869697d8c63SPeter Zijlstra 		wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
870697d8c63SPeter Zijlstra 
8711da177e4SLinus Torvalds 	tasklet_unlock_wait(t);
872697d8c63SPeter Zijlstra 	tasklet_clear_sched(t);
8731da177e4SLinus Torvalds }
8741da177e4SLinus Torvalds EXPORT_SYMBOL(tasklet_kill);
8751da177e4SLinus Torvalds 
876eb2dafbbSThomas Gleixner #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
tasklet_unlock(struct tasklet_struct * t)877da044747SPeter Zijlstra void tasklet_unlock(struct tasklet_struct *t)
878da044747SPeter Zijlstra {
879da044747SPeter Zijlstra 	smp_mb__before_atomic();
880da044747SPeter Zijlstra 	clear_bit(TASKLET_STATE_RUN, &t->state);
881da044747SPeter Zijlstra 	smp_mb__after_atomic();
882da044747SPeter Zijlstra 	wake_up_var(&t->state);
883da044747SPeter Zijlstra }
884da044747SPeter Zijlstra EXPORT_SYMBOL_GPL(tasklet_unlock);
885da044747SPeter Zijlstra 
tasklet_unlock_wait(struct tasklet_struct * t)886da044747SPeter Zijlstra void tasklet_unlock_wait(struct tasklet_struct *t)
887da044747SPeter Zijlstra {
888da044747SPeter Zijlstra 	wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
889da044747SPeter Zijlstra }
890da044747SPeter Zijlstra EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
891da044747SPeter Zijlstra #endif
892da044747SPeter Zijlstra 
softirq_init(void)8931da177e4SLinus Torvalds void __init softirq_init(void)
8941da177e4SLinus Torvalds {
89548f20a9aSOlof Johansson 	int cpu;
89648f20a9aSOlof Johansson 
89748f20a9aSOlof Johansson 	for_each_possible_cpu(cpu) {
89848f20a9aSOlof Johansson 		per_cpu(tasklet_vec, cpu).tail =
89948f20a9aSOlof Johansson 			&per_cpu(tasklet_vec, cpu).head;
90048f20a9aSOlof Johansson 		per_cpu(tasklet_hi_vec, cpu).tail =
90148f20a9aSOlof Johansson 			&per_cpu(tasklet_hi_vec, cpu).head;
90248f20a9aSOlof Johansson 	}
90348f20a9aSOlof Johansson 
904962cf36cSCarlos R. Mafra 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
905962cf36cSCarlos R. Mafra 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
9061da177e4SLinus Torvalds }
9071da177e4SLinus Torvalds 
ksoftirqd_should_run(unsigned int cpu)9083e339b5dSThomas Gleixner static int ksoftirqd_should_run(unsigned int cpu)
9091da177e4SLinus Torvalds {
9103e339b5dSThomas Gleixner 	return local_softirq_pending();
9111da177e4SLinus Torvalds }
9121da177e4SLinus Torvalds 
run_ksoftirqd(unsigned int cpu)9133e339b5dSThomas Gleixner static void run_ksoftirqd(unsigned int cpu)
9143e339b5dSThomas Gleixner {
915f02fc963SThomas Gleixner 	ksoftirqd_run_begin();
9163e339b5dSThomas Gleixner 	if (local_softirq_pending()) {
9170bed698aSFrederic Weisbecker 		/*
9180bed698aSFrederic Weisbecker 		 * We can safely run softirq on inline stack, as we are not deep
9190bed698aSFrederic Weisbecker 		 * in the task stack here.
9200bed698aSFrederic Weisbecker 		 */
921c305d524SThomas Gleixner 		__do_softirq();
922f02fc963SThomas Gleixner 		ksoftirqd_run_end();
923edf22f4cSPaul E. McKenney 		cond_resched();
9243e339b5dSThomas Gleixner 		return;
9251da177e4SLinus Torvalds 	}
926f02fc963SThomas Gleixner 	ksoftirqd_run_end();
9271da177e4SLinus Torvalds }
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
takeover_tasklets(unsigned int cpu)930c4544dbcSSebastian Andrzej Siewior static int takeover_tasklets(unsigned int cpu)
9311da177e4SLinus Torvalds {
9321da177e4SLinus Torvalds 	/* CPU is dead, so no lock needed. */
9331da177e4SLinus Torvalds 	local_irq_disable();
9341da177e4SLinus Torvalds 
9351da177e4SLinus Torvalds 	/* Find end, append list for that CPU. */
936e5e41723SChristian Borntraeger 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
937909ea964SChristoph Lameter 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
9388afecaa6SMuchun Song 		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
93948f20a9aSOlof Johansson 		per_cpu(tasklet_vec, cpu).head = NULL;
94048f20a9aSOlof Johansson 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
941e5e41723SChristian Borntraeger 	}
9421da177e4SLinus Torvalds 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
9431da177e4SLinus Torvalds 
944e5e41723SChristian Borntraeger 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
945909ea964SChristoph Lameter 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
946909ea964SChristoph Lameter 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
94748f20a9aSOlof Johansson 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
94848f20a9aSOlof Johansson 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
949e5e41723SChristian Borntraeger 	}
9501da177e4SLinus Torvalds 	raise_softirq_irqoff(HI_SOFTIRQ);
9511da177e4SLinus Torvalds 
9521da177e4SLinus Torvalds 	local_irq_enable();
953c4544dbcSSebastian Andrzej Siewior 	return 0;
9541da177e4SLinus Torvalds }
955c4544dbcSSebastian Andrzej Siewior #else
956c4544dbcSSebastian Andrzej Siewior #define takeover_tasklets	NULL
9571da177e4SLinus Torvalds #endif /* CONFIG_HOTPLUG_CPU */
9581da177e4SLinus Torvalds 
9593e339b5dSThomas Gleixner static struct smp_hotplug_thread softirq_threads = {
9603e339b5dSThomas Gleixner 	.store			= &ksoftirqd,
9613e339b5dSThomas Gleixner 	.thread_should_run	= ksoftirqd_should_run,
9623e339b5dSThomas Gleixner 	.thread_fn		= run_ksoftirqd,
9633e339b5dSThomas Gleixner 	.thread_comm		= "ksoftirqd/%u",
9643e339b5dSThomas Gleixner };
9653e339b5dSThomas Gleixner 
spawn_ksoftirqd(void)9667babe8dbSEduard - Gabriel Munteanu static __init int spawn_ksoftirqd(void)
9671da177e4SLinus Torvalds {
968c4544dbcSSebastian Andrzej Siewior 	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
969c4544dbcSSebastian Andrzej Siewior 				  takeover_tasklets);
9703e339b5dSThomas Gleixner 	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
9713e339b5dSThomas Gleixner 
9721da177e4SLinus Torvalds 	return 0;
9731da177e4SLinus Torvalds }
9747babe8dbSEduard - Gabriel Munteanu early_initcall(spawn_ksoftirqd);
97578eef01bSAndrew Morton 
97643a25632SYinghai Lu /*
97743a25632SYinghai Lu  * [ These __weak aliases are kept in a separate compilation unit, so that
97843a25632SYinghai Lu  *   GCC does not inline them incorrectly. ]
97943a25632SYinghai Lu  */
98043a25632SYinghai Lu 
early_irq_init(void)98143a25632SYinghai Lu int __init __weak early_irq_init(void)
98243a25632SYinghai Lu {
98343a25632SYinghai Lu 	return 0;
98443a25632SYinghai Lu }
98543a25632SYinghai Lu 
arch_probe_nr_irqs(void)9864a046d17SYinghai Lu int __init __weak arch_probe_nr_irqs(void)
9874a046d17SYinghai Lu {
988b683de2bSThomas Gleixner 	return NR_IRQS_LEGACY;
9894a046d17SYinghai Lu }
9904a046d17SYinghai Lu 
arch_early_irq_init(void)99143a25632SYinghai Lu int __init __weak arch_early_irq_init(void)
99243a25632SYinghai Lu {
99343a25632SYinghai Lu 	return 0;
99443a25632SYinghai Lu }
99562a08ae2SThomas Gleixner 
arch_dynirq_lower_bound(unsigned int from)99662a08ae2SThomas Gleixner unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
99762a08ae2SThomas Gleixner {
99862a08ae2SThomas Gleixner 	return from;
99962a08ae2SThomas Gleixner }
1000