xref: /openbmc/linux/kernel/softirq.c (revision 9144f784f852f9a125cabe9927b986d909bfa439)
1767a67b0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *	linux/kernel/softirq.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *	Copyright (C) 1992 Linus Torvalds
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
1040322764SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1140322764SJoe Perches 
129984de1aSPaul Gortmaker #include <linux/export.h>
131da177e4SLinus Torvalds #include <linux/kernel_stat.h>
141da177e4SLinus Torvalds #include <linux/interrupt.h>
151da177e4SLinus Torvalds #include <linux/init.h>
168b1c04acSThomas Gleixner #include <linux/local_lock.h>
171da177e4SLinus Torvalds #include <linux/mm.h>
181da177e4SLinus Torvalds #include <linux/notifier.h>
191da177e4SLinus Torvalds #include <linux/percpu.h>
201da177e4SLinus Torvalds #include <linux/cpu.h>
2183144186SRafael J. Wysocki #include <linux/freezer.h>
221da177e4SLinus Torvalds #include <linux/kthread.h>
231da177e4SLinus Torvalds #include <linux/rcupdate.h>
247e49fcceSSteven Rostedt #include <linux/ftrace.h>
2578eef01bSAndrew Morton #include <linux/smp.h>
263e339b5dSThomas Gleixner #include <linux/smpboot.h>
2779bf2bb3SThomas Gleixner #include <linux/tick.h>
28d532676cSThomas Gleixner #include <linux/irq.h>
29da044747SPeter Zijlstra #include <linux/wait_bit.h>
30a0e39ed3SHeiko Carstens 
31db1cc7aeSThomas Gleixner #include <asm/softirq_stack.h>
32db1cc7aeSThomas Gleixner 
33a0e39ed3SHeiko Carstens #define CREATE_TRACE_POINTS
34ad8d75ffSSteven Rostedt #include <trace/events/irq.h>
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds /*
371da177e4SLinus Torvalds    - No shared variables, all the data are CPU local.
381da177e4SLinus Torvalds    - If a softirq needs serialization, let it serialize itself
391da177e4SLinus Torvalds      by its own spinlocks.
401da177e4SLinus Torvalds    - Even if softirq is serialized, only local cpu is marked for
411da177e4SLinus Torvalds      execution. Hence, we get something sort of weak cpu binding.
421da177e4SLinus Torvalds      Though it is still not clear, will it result in better locality
431da177e4SLinus Torvalds      or will not.
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds    Examples:
461da177e4SLinus Torvalds    - NET RX softirq. It is multithreaded and does not require
471da177e4SLinus Torvalds      any global serialization.
481da177e4SLinus Torvalds    - NET TX softirq. It kicks software netdevice queues, hence
491da177e4SLinus Torvalds      it is logically serialized per device, but this serialization
501da177e4SLinus Torvalds      is invisible to common code.
511da177e4SLinus Torvalds    - Tasklets: serialized wrt itself.
521da177e4SLinus Torvalds  */
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds #ifndef __ARCH_IRQ_STAT
550f6f47baSFrederic Weisbecker DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
560f6f47baSFrederic Weisbecker EXPORT_PER_CPU_SYMBOL(irq_stat);
571da177e4SLinus Torvalds #endif
581da177e4SLinus Torvalds 
59978b0116SAlexey Dobriyan static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
601da177e4SLinus Torvalds 
614dd53d89SVenkatesh Pallipadi DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
621da177e4SLinus Torvalds 
63ce85b4f2SJoe Perches const char * const softirq_to_name[NR_SOFTIRQS] = {
64f660f606SSagi Grimberg 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
6509223371SShaohua Li 	"TASKLET", "SCHED", "HRTIMER", "RCU"
665d592b44SJason Baron };
675d592b44SJason Baron 
681da177e4SLinus Torvalds /*
691da177e4SLinus Torvalds  * we cannot loop indefinitely here to avoid userspace starvation,
701da177e4SLinus Torvalds  * but we also don't want to introduce a worst case 1/HZ latency
711da177e4SLinus Torvalds  * to the pending events, so lets the scheduler to balance
721da177e4SLinus Torvalds  * the softirq load for us.
731da177e4SLinus Torvalds  */
wakeup_softirqd(void)74676cb02dSThomas Gleixner static void wakeup_softirqd(void)
751da177e4SLinus Torvalds {
761da177e4SLinus Torvalds 	/* Interrupts are disabled: no need to stop preemption */
77909ea964SChristoph Lameter 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
781da177e4SLinus Torvalds 
7937aadc68SPeter Zijlstra 	if (tsk)
801da177e4SLinus Torvalds 		wake_up_process(tsk);
811da177e4SLinus Torvalds }
821da177e4SLinus Torvalds 
83ae9ef589SThomas Gleixner #ifdef CONFIG_TRACE_IRQFLAGS
84ae9ef589SThomas Gleixner DEFINE_PER_CPU(int, hardirqs_enabled);
85ae9ef589SThomas Gleixner DEFINE_PER_CPU(int, hardirq_context);
86ae9ef589SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
87ae9ef589SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
88ae9ef589SThomas Gleixner #endif
89ae9ef589SThomas Gleixner 
904cd13c21SEric Dumazet /*
918b1c04acSThomas Gleixner  * SOFTIRQ_OFFSET usage:
928b1c04acSThomas Gleixner  *
938b1c04acSThomas Gleixner  * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
948b1c04acSThomas Gleixner  * to a per CPU counter and to task::softirqs_disabled_cnt.
958b1c04acSThomas Gleixner  *
968b1c04acSThomas Gleixner  * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
978b1c04acSThomas Gleixner  *   processing.
988b1c04acSThomas Gleixner  *
998b1c04acSThomas Gleixner  * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
10075e1056fSVenkatesh Pallipadi  *   on local_bh_disable or local_bh_enable.
1018b1c04acSThomas Gleixner  *
10275e1056fSVenkatesh Pallipadi  * This lets us distinguish between whether we are currently processing
10375e1056fSVenkatesh Pallipadi  * softirq and whether we just have bh disabled.
10475e1056fSVenkatesh Pallipadi  */
1058b1c04acSThomas Gleixner #ifdef CONFIG_PREEMPT_RT
10675e1056fSVenkatesh Pallipadi 
107ae9ef589SThomas Gleixner /*
1088b1c04acSThomas Gleixner  * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
1098b1c04acSThomas Gleixner  * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
1108b1c04acSThomas Gleixner  * softirq disabled section to be preempted.
1118b1c04acSThomas Gleixner  *
1128b1c04acSThomas Gleixner  * The per task counter is used for softirq_count(), in_softirq() and
1138b1c04acSThomas Gleixner  * in_serving_softirqs() because these counts are only valid when the task
1148b1c04acSThomas Gleixner  * holding softirq_ctrl::lock is running.
1158b1c04acSThomas Gleixner  *
1168b1c04acSThomas Gleixner  * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
1178b1c04acSThomas Gleixner  * the task which is in a softirq disabled section is preempted or blocks.
1188b1c04acSThomas Gleixner  */
1198b1c04acSThomas Gleixner struct softirq_ctrl {
1208b1c04acSThomas Gleixner 	local_lock_t	lock;
1218b1c04acSThomas Gleixner 	int		cnt;
1228b1c04acSThomas Gleixner };
1238b1c04acSThomas Gleixner 
1248b1c04acSThomas Gleixner static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
1258b1c04acSThomas Gleixner 	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
1268b1c04acSThomas Gleixner };
1278b1c04acSThomas Gleixner 
12847c218dcSThomas Gleixner /**
12947c218dcSThomas Gleixner  * local_bh_blocked() - Check for idle whether BH processing is blocked
13047c218dcSThomas Gleixner  *
13147c218dcSThomas Gleixner  * Returns false if the per CPU softirq::cnt is 0 otherwise true.
13247c218dcSThomas Gleixner  *
13347c218dcSThomas Gleixner  * This is invoked from the idle task to guard against false positive
13447c218dcSThomas Gleixner  * softirq pending warnings, which would happen when the task which holds
13547c218dcSThomas Gleixner  * softirq_ctrl::lock was the only running task on the CPU and blocks on
13647c218dcSThomas Gleixner  * some other lock.
13747c218dcSThomas Gleixner  */
local_bh_blocked(void)13847c218dcSThomas Gleixner bool local_bh_blocked(void)
13947c218dcSThomas Gleixner {
14047c218dcSThomas Gleixner 	return __this_cpu_read(softirq_ctrl.cnt) != 0;
14147c218dcSThomas Gleixner }
14247c218dcSThomas Gleixner 
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)1438b1c04acSThomas Gleixner void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
1448b1c04acSThomas Gleixner {
1458b1c04acSThomas Gleixner 	unsigned long flags;
1468b1c04acSThomas Gleixner 	int newcnt;
1478b1c04acSThomas Gleixner 
1488b1c04acSThomas Gleixner 	WARN_ON_ONCE(in_hardirq());
1498b1c04acSThomas Gleixner 
1508b1c04acSThomas Gleixner 	/* First entry of a task into a BH disabled section? */
1518b1c04acSThomas Gleixner 	if (!current->softirq_disable_cnt) {
1528b1c04acSThomas Gleixner 		if (preemptible()) {
1538b1c04acSThomas Gleixner 			local_lock(&softirq_ctrl.lock);
1548b1c04acSThomas Gleixner 			/* Required to meet the RCU bottomhalf requirements. */
1558b1c04acSThomas Gleixner 			rcu_read_lock();
1568b1c04acSThomas Gleixner 		} else {
1578b1c04acSThomas Gleixner 			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
1588b1c04acSThomas Gleixner 		}
1598b1c04acSThomas Gleixner 	}
1608b1c04acSThomas Gleixner 
1618b1c04acSThomas Gleixner 	/*
1628b1c04acSThomas Gleixner 	 * Track the per CPU softirq disabled state. On RT this is per CPU
1638b1c04acSThomas Gleixner 	 * state to allow preemption of bottom half disabled sections.
1648b1c04acSThomas Gleixner 	 */
1658b1c04acSThomas Gleixner 	newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
1668b1c04acSThomas Gleixner 	/*
1678b1c04acSThomas Gleixner 	 * Reflect the result in the task state to prevent recursion on the
1688b1c04acSThomas Gleixner 	 * local lock and to make softirq_count() & al work.
1698b1c04acSThomas Gleixner 	 */
1708b1c04acSThomas Gleixner 	current->softirq_disable_cnt = newcnt;
1718b1c04acSThomas Gleixner 
1728b1c04acSThomas Gleixner 	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
1738b1c04acSThomas Gleixner 		raw_local_irq_save(flags);
1748b1c04acSThomas Gleixner 		lockdep_softirqs_off(ip);
1758b1c04acSThomas Gleixner 		raw_local_irq_restore(flags);
1768b1c04acSThomas Gleixner 	}
1778b1c04acSThomas Gleixner }
1788b1c04acSThomas Gleixner EXPORT_SYMBOL(__local_bh_disable_ip);
1798b1c04acSThomas Gleixner 
__local_bh_enable(unsigned int cnt,bool unlock)1808b1c04acSThomas Gleixner static void __local_bh_enable(unsigned int cnt, bool unlock)
1818b1c04acSThomas Gleixner {
1828b1c04acSThomas Gleixner 	unsigned long flags;
1838b1c04acSThomas Gleixner 	int newcnt;
1848b1c04acSThomas Gleixner 
1858b1c04acSThomas Gleixner 	DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
1868b1c04acSThomas Gleixner 			    this_cpu_read(softirq_ctrl.cnt));
1878b1c04acSThomas Gleixner 
1888b1c04acSThomas Gleixner 	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
1898b1c04acSThomas Gleixner 		raw_local_irq_save(flags);
1908b1c04acSThomas Gleixner 		lockdep_softirqs_on(_RET_IP_);
1918b1c04acSThomas Gleixner 		raw_local_irq_restore(flags);
1928b1c04acSThomas Gleixner 	}
1938b1c04acSThomas Gleixner 
1948b1c04acSThomas Gleixner 	newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
1958b1c04acSThomas Gleixner 	current->softirq_disable_cnt = newcnt;
1968b1c04acSThomas Gleixner 
1978b1c04acSThomas Gleixner 	if (!newcnt && unlock) {
1988b1c04acSThomas Gleixner 		rcu_read_unlock();
1998b1c04acSThomas Gleixner 		local_unlock(&softirq_ctrl.lock);
2008b1c04acSThomas Gleixner 	}
2018b1c04acSThomas Gleixner }
2028b1c04acSThomas Gleixner 
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)2038b1c04acSThomas Gleixner void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
2048b1c04acSThomas Gleixner {
2058b1c04acSThomas Gleixner 	bool preempt_on = preemptible();
2068b1c04acSThomas Gleixner 	unsigned long flags;
2078b1c04acSThomas Gleixner 	u32 pending;
2088b1c04acSThomas Gleixner 	int curcnt;
2098b1c04acSThomas Gleixner 
210fe13889cSChangbin Du 	WARN_ON_ONCE(in_hardirq());
2118b1c04acSThomas Gleixner 	lockdep_assert_irqs_enabled();
2128b1c04acSThomas Gleixner 
2138b1c04acSThomas Gleixner 	local_irq_save(flags);
2148b1c04acSThomas Gleixner 	curcnt = __this_cpu_read(softirq_ctrl.cnt);
2158b1c04acSThomas Gleixner 
2168b1c04acSThomas Gleixner 	/*
2178b1c04acSThomas Gleixner 	 * If this is not reenabling soft interrupts, no point in trying to
2188b1c04acSThomas Gleixner 	 * run pending ones.
2198b1c04acSThomas Gleixner 	 */
2208b1c04acSThomas Gleixner 	if (curcnt != cnt)
2218b1c04acSThomas Gleixner 		goto out;
2228b1c04acSThomas Gleixner 
2238b1c04acSThomas Gleixner 	pending = local_softirq_pending();
224d15121beSPaolo Abeni 	if (!pending)
2258b1c04acSThomas Gleixner 		goto out;
2268b1c04acSThomas Gleixner 
2278b1c04acSThomas Gleixner 	/*
2288b1c04acSThomas Gleixner 	 * If this was called from non preemptible context, wake up the
2298b1c04acSThomas Gleixner 	 * softirq daemon.
2308b1c04acSThomas Gleixner 	 */
2318b1c04acSThomas Gleixner 	if (!preempt_on) {
2328b1c04acSThomas Gleixner 		wakeup_softirqd();
2338b1c04acSThomas Gleixner 		goto out;
2348b1c04acSThomas Gleixner 	}
2358b1c04acSThomas Gleixner 
2368b1c04acSThomas Gleixner 	/*
2378b1c04acSThomas Gleixner 	 * Adjust softirq count to SOFTIRQ_OFFSET which makes
2388b1c04acSThomas Gleixner 	 * in_serving_softirq() become true.
2398b1c04acSThomas Gleixner 	 */
2408b1c04acSThomas Gleixner 	cnt = SOFTIRQ_OFFSET;
2418b1c04acSThomas Gleixner 	__local_bh_enable(cnt, false);
2428b1c04acSThomas Gleixner 	__do_softirq();
2438b1c04acSThomas Gleixner 
2448b1c04acSThomas Gleixner out:
2458b1c04acSThomas Gleixner 	__local_bh_enable(cnt, preempt_on);
2468b1c04acSThomas Gleixner 	local_irq_restore(flags);
2478b1c04acSThomas Gleixner }
2488b1c04acSThomas Gleixner EXPORT_SYMBOL(__local_bh_enable_ip);
2498b1c04acSThomas Gleixner 
2508b1c04acSThomas Gleixner /*
2518b1c04acSThomas Gleixner  * Invoked from ksoftirqd_run() outside of the interrupt disabled section
2528b1c04acSThomas Gleixner  * to acquire the per CPU local lock for reentrancy protection.
2538b1c04acSThomas Gleixner  */
ksoftirqd_run_begin(void)2548b1c04acSThomas Gleixner static inline void ksoftirqd_run_begin(void)
2558b1c04acSThomas Gleixner {
2568b1c04acSThomas Gleixner 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
2578b1c04acSThomas Gleixner 	local_irq_disable();
2588b1c04acSThomas Gleixner }
2598b1c04acSThomas Gleixner 
2608b1c04acSThomas Gleixner /* Counterpart to ksoftirqd_run_begin() */
ksoftirqd_run_end(void)2618b1c04acSThomas Gleixner static inline void ksoftirqd_run_end(void)
2628b1c04acSThomas Gleixner {
2638b1c04acSThomas Gleixner 	__local_bh_enable(SOFTIRQ_OFFSET, true);
2648b1c04acSThomas Gleixner 	WARN_ON_ONCE(in_interrupt());
2658b1c04acSThomas Gleixner 	local_irq_enable();
2668b1c04acSThomas Gleixner }
2678b1c04acSThomas Gleixner 
softirq_handle_begin(void)2688b1c04acSThomas Gleixner static inline void softirq_handle_begin(void) { }
softirq_handle_end(void)2698b1c04acSThomas Gleixner static inline void softirq_handle_end(void) { }
2708b1c04acSThomas Gleixner 
should_wake_ksoftirqd(void)2718b1c04acSThomas Gleixner static inline bool should_wake_ksoftirqd(void)
2728b1c04acSThomas Gleixner {
2738b1c04acSThomas Gleixner 	return !this_cpu_read(softirq_ctrl.cnt);
2748b1c04acSThomas Gleixner }
2758b1c04acSThomas Gleixner 
invoke_softirq(void)2768b1c04acSThomas Gleixner static inline void invoke_softirq(void)
2778b1c04acSThomas Gleixner {
2788b1c04acSThomas Gleixner 	if (should_wake_ksoftirqd())
2798b1c04acSThomas Gleixner 		wakeup_softirqd();
2808b1c04acSThomas Gleixner }
2818b1c04acSThomas Gleixner 
282*3dd65ffaSK Prateek Nayak #define SCHED_SOFTIRQ_MASK	BIT(SCHED_SOFTIRQ)
283*3dd65ffaSK Prateek Nayak 
2841a90bfd2SSebastian Andrzej Siewior /*
2851a90bfd2SSebastian Andrzej Siewior  * flush_smp_call_function_queue() can raise a soft interrupt in a function
286*3dd65ffaSK Prateek Nayak  * call. On RT kernels this is undesired and the only known functionalities
287*3dd65ffaSK Prateek Nayak  * are in the block layer which is disabled on RT, and in the scheduler for
288*3dd65ffaSK Prateek Nayak  * idle load balancing. If soft interrupts get raised which haven't been
289*3dd65ffaSK Prateek Nayak  * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
2901a90bfd2SSebastian Andrzej Siewior  * investigated.
2911a90bfd2SSebastian Andrzej Siewior  */
do_softirq_post_smp_call_flush(unsigned int was_pending)2921a90bfd2SSebastian Andrzej Siewior void do_softirq_post_smp_call_flush(unsigned int was_pending)
2931a90bfd2SSebastian Andrzej Siewior {
294*3dd65ffaSK Prateek Nayak 	unsigned int is_pending = local_softirq_pending();
295*3dd65ffaSK Prateek Nayak 
296*3dd65ffaSK Prateek Nayak 	if (unlikely(was_pending != is_pending)) {
297*3dd65ffaSK Prateek Nayak 		WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
2981a90bfd2SSebastian Andrzej Siewior 		invoke_softirq();
2991a90bfd2SSebastian Andrzej Siewior 	}
300*3dd65ffaSK Prateek Nayak }
3011a90bfd2SSebastian Andrzej Siewior 
3028b1c04acSThomas Gleixner #else /* CONFIG_PREEMPT_RT */
3038b1c04acSThomas Gleixner 
3048b1c04acSThomas Gleixner /*
3058b1c04acSThomas Gleixner  * This one is for softirq.c-internal use, where hardirqs are disabled
306ae9ef589SThomas Gleixner  * legitimately:
307ae9ef589SThomas Gleixner  */
3088b1c04acSThomas Gleixner #ifdef CONFIG_TRACE_IRQFLAGS
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)3090bd3a173SPeter Zijlstra void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
310de30a2b3SIngo Molnar {
311de30a2b3SIngo Molnar 	unsigned long flags;
312de30a2b3SIngo Molnar 
313fe13889cSChangbin Du 	WARN_ON_ONCE(in_hardirq());
314de30a2b3SIngo Molnar 
315de30a2b3SIngo Molnar 	raw_local_irq_save(flags);
3167e49fcceSSteven Rostedt 	/*
317bdb43806SPeter Zijlstra 	 * The preempt tracer hooks into preempt_count_add and will break
3187e49fcceSSteven Rostedt 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
3197e49fcceSSteven Rostedt 	 * is set and before current->softirq_enabled is cleared.
3207e49fcceSSteven Rostedt 	 * We must manually increment preempt_count here and manually
3217e49fcceSSteven Rostedt 	 * call the trace_preempt_off later.
3227e49fcceSSteven Rostedt 	 */
323bdb43806SPeter Zijlstra 	__preempt_count_add(cnt);
324de30a2b3SIngo Molnar 	/*
325de30a2b3SIngo Molnar 	 * Were softirqs turned off above:
326de30a2b3SIngo Molnar 	 */
3279ea4c380SPeter Zijlstra 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
3280d38453cSPeter Zijlstra 		lockdep_softirqs_off(ip);
329de30a2b3SIngo Molnar 	raw_local_irq_restore(flags);
3307e49fcceSSteven Rostedt 
3310f1ba9a2SHeiko Carstens 	if (preempt_count() == cnt) {
3320f1ba9a2SHeiko Carstens #ifdef CONFIG_DEBUG_PREEMPT
333f904f582SSebastian Andrzej Siewior 		current->preempt_disable_ip = get_lock_parent_ip();
3340f1ba9a2SHeiko Carstens #endif
335f904f582SSebastian Andrzej Siewior 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
336de30a2b3SIngo Molnar 	}
3370f1ba9a2SHeiko Carstens }
3380bd3a173SPeter Zijlstra EXPORT_SYMBOL(__local_bh_disable_ip);
3393c829c36STim Chen #endif /* CONFIG_TRACE_IRQFLAGS */
340de30a2b3SIngo Molnar 
__local_bh_enable(unsigned int cnt)34175e1056fSVenkatesh Pallipadi static void __local_bh_enable(unsigned int cnt)
34275e1056fSVenkatesh Pallipadi {
343f71b74bcSFrederic Weisbecker 	lockdep_assert_irqs_disabled();
34475e1056fSVenkatesh Pallipadi 
3451a63dcd8SJoel Fernandes (Google) 	if (preempt_count() == cnt)
3461a63dcd8SJoel Fernandes (Google) 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3471a63dcd8SJoel Fernandes (Google) 
3489ea4c380SPeter Zijlstra 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
3490d38453cSPeter Zijlstra 		lockdep_softirqs_on(_RET_IP_);
3501a63dcd8SJoel Fernandes (Google) 
3511a63dcd8SJoel Fernandes (Google) 	__preempt_count_sub(cnt);
35275e1056fSVenkatesh Pallipadi }
35375e1056fSVenkatesh Pallipadi 
354de30a2b3SIngo Molnar /*
355c3442697SPaul E. McKenney  * Special-case - softirqs can safely be enabled by __do_softirq(),
356de30a2b3SIngo Molnar  * without processing still-pending softirqs:
357de30a2b3SIngo Molnar  */
_local_bh_enable(void)358de30a2b3SIngo Molnar void _local_bh_enable(void)
359de30a2b3SIngo Molnar {
360fe13889cSChangbin Du 	WARN_ON_ONCE(in_hardirq());
36175e1056fSVenkatesh Pallipadi 	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
362de30a2b3SIngo Molnar }
363de30a2b3SIngo Molnar EXPORT_SYMBOL(_local_bh_enable);
364de30a2b3SIngo Molnar 
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)3650bd3a173SPeter Zijlstra void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
366de30a2b3SIngo Molnar {
367fe13889cSChangbin Du 	WARN_ON_ONCE(in_hardirq());
368f71b74bcSFrederic Weisbecker 	lockdep_assert_irqs_enabled();
3693c829c36STim Chen #ifdef CONFIG_TRACE_IRQFLAGS
3700f476b6dSJohannes Berg 	local_irq_disable();
3713c829c36STim Chen #endif
372de30a2b3SIngo Molnar 	/*
373de30a2b3SIngo Molnar 	 * Are softirqs going to be turned on now:
374de30a2b3SIngo Molnar 	 */
37575e1056fSVenkatesh Pallipadi 	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
3760d38453cSPeter Zijlstra 		lockdep_softirqs_on(ip);
377de30a2b3SIngo Molnar 	/*
378de30a2b3SIngo Molnar 	 * Keep preemption disabled until we are done with
379de30a2b3SIngo Molnar 	 * softirq processing:
380de30a2b3SIngo Molnar 	 */
38191ea62d5SPeter Zijlstra 	__preempt_count_sub(cnt - 1);
382de30a2b3SIngo Molnar 
3830bed698aSFrederic Weisbecker 	if (unlikely(!in_interrupt() && local_softirq_pending())) {
3840bed698aSFrederic Weisbecker 		/*
3850bed698aSFrederic Weisbecker 		 * Run softirq if any pending. And do it in its own stack
3860bed698aSFrederic Weisbecker 		 * as we may be calling this deep in a task call stack already.
3870bed698aSFrederic Weisbecker 		 */
388de30a2b3SIngo Molnar 		do_softirq();
3890bed698aSFrederic Weisbecker 	}
390de30a2b3SIngo Molnar 
391bdb43806SPeter Zijlstra 	preempt_count_dec();
3923c829c36STim Chen #ifdef CONFIG_TRACE_IRQFLAGS
3930f476b6dSJohannes Berg 	local_irq_enable();
3943c829c36STim Chen #endif
395de30a2b3SIngo Molnar 	preempt_check_resched();
396de30a2b3SIngo Molnar }
3970bd3a173SPeter Zijlstra EXPORT_SYMBOL(__local_bh_enable_ip);
398de30a2b3SIngo Molnar 
softirq_handle_begin(void)399f02fc963SThomas Gleixner static inline void softirq_handle_begin(void)
400f02fc963SThomas Gleixner {
401f02fc963SThomas Gleixner 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
402f02fc963SThomas Gleixner }
403f02fc963SThomas Gleixner 
softirq_handle_end(void)404f02fc963SThomas Gleixner static inline void softirq_handle_end(void)
405f02fc963SThomas Gleixner {
406f02fc963SThomas Gleixner 	__local_bh_enable(SOFTIRQ_OFFSET);
407f02fc963SThomas Gleixner 	WARN_ON_ONCE(in_interrupt());
408f02fc963SThomas Gleixner }
409f02fc963SThomas Gleixner 
ksoftirqd_run_begin(void)410f02fc963SThomas Gleixner static inline void ksoftirqd_run_begin(void)
411f02fc963SThomas Gleixner {
412f02fc963SThomas Gleixner 	local_irq_disable();
413f02fc963SThomas Gleixner }
414f02fc963SThomas Gleixner 
ksoftirqd_run_end(void)415f02fc963SThomas Gleixner static inline void ksoftirqd_run_end(void)
416f02fc963SThomas Gleixner {
417f02fc963SThomas Gleixner 	local_irq_enable();
418f02fc963SThomas Gleixner }
419f02fc963SThomas Gleixner 
should_wake_ksoftirqd(void)420f02fc963SThomas Gleixner static inline bool should_wake_ksoftirqd(void)
421f02fc963SThomas Gleixner {
422f02fc963SThomas Gleixner 	return true;
423f02fc963SThomas Gleixner }
424f02fc963SThomas Gleixner 
invoke_softirq(void)425ae9ef589SThomas Gleixner static inline void invoke_softirq(void)
426ae9ef589SThomas Gleixner {
42791cc470eSTanner Love 	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
428ae9ef589SThomas Gleixner #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
429ae9ef589SThomas Gleixner 		/*
430ae9ef589SThomas Gleixner 		 * We can safely execute softirq on the current stack if
431ae9ef589SThomas Gleixner 		 * it is the irq stack, because it should be near empty
432ae9ef589SThomas Gleixner 		 * at this stage.
433ae9ef589SThomas Gleixner 		 */
434ae9ef589SThomas Gleixner 		__do_softirq();
435ae9ef589SThomas Gleixner #else
436ae9ef589SThomas Gleixner 		/*
437ae9ef589SThomas Gleixner 		 * Otherwise, irq_exit() is called on the task stack that can
438ae9ef589SThomas Gleixner 		 * be potentially deep already. So call softirq in its own stack
439ae9ef589SThomas Gleixner 		 * to prevent from any overrun.
440ae9ef589SThomas Gleixner 		 */
441ae9ef589SThomas Gleixner 		do_softirq_own_stack();
442ae9ef589SThomas Gleixner #endif
443ae9ef589SThomas Gleixner 	} else {
444ae9ef589SThomas Gleixner 		wakeup_softirqd();
445ae9ef589SThomas Gleixner 	}
446ae9ef589SThomas Gleixner }
447ae9ef589SThomas Gleixner 
do_softirq(void)448ae9ef589SThomas Gleixner asmlinkage __visible void do_softirq(void)
449ae9ef589SThomas Gleixner {
450ae9ef589SThomas Gleixner 	__u32 pending;
451ae9ef589SThomas Gleixner 	unsigned long flags;
452ae9ef589SThomas Gleixner 
453ae9ef589SThomas Gleixner 	if (in_interrupt())
454ae9ef589SThomas Gleixner 		return;
455ae9ef589SThomas Gleixner 
456ae9ef589SThomas Gleixner 	local_irq_save(flags);
457ae9ef589SThomas Gleixner 
458ae9ef589SThomas Gleixner 	pending = local_softirq_pending();
459ae9ef589SThomas Gleixner 
460d15121beSPaolo Abeni 	if (pending)
461ae9ef589SThomas Gleixner 		do_softirq_own_stack();
462ae9ef589SThomas Gleixner 
463ae9ef589SThomas Gleixner 	local_irq_restore(flags);
464ae9ef589SThomas Gleixner }
465ae9ef589SThomas Gleixner 
4668b1c04acSThomas Gleixner #endif /* !CONFIG_PREEMPT_RT */
4678b1c04acSThomas Gleixner 
468de30a2b3SIngo Molnar /*
46934376a50SBen Greear  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
47034376a50SBen Greear  * but break the loop if need_resched() is set or after 2 ms.
47134376a50SBen Greear  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
47234376a50SBen Greear  * certain cases, such as stop_machine(), jiffies may cease to
47334376a50SBen Greear  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
47434376a50SBen Greear  * well to make sure we eventually return from this method.
4751da177e4SLinus Torvalds  *
476c10d7367SEric Dumazet  * These limits have been established via experimentation.
4771da177e4SLinus Torvalds  * The two things to balance is latency against fairness -
4781da177e4SLinus Torvalds  * we want to handle softirqs as soon as possible, but they
4791da177e4SLinus Torvalds  * should not be able to lock up the box.
4801da177e4SLinus Torvalds  */
481c10d7367SEric Dumazet #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
48234376a50SBen Greear #define MAX_SOFTIRQ_RESTART 10
4831da177e4SLinus Torvalds 
484f1a83e65SPeter Zijlstra #ifdef CONFIG_TRACE_IRQFLAGS
485f1a83e65SPeter Zijlstra /*
486f1a83e65SPeter Zijlstra  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
487f1a83e65SPeter Zijlstra  * to keep the lockdep irq context tracking as tight as possible in order to
488f1a83e65SPeter Zijlstra  * not miss-qualify lock contexts and miss possible deadlocks.
489f1a83e65SPeter Zijlstra  */
490f1a83e65SPeter Zijlstra 
lockdep_softirq_start(void)4915c4853b6SFrederic Weisbecker static inline bool lockdep_softirq_start(void)
492f1a83e65SPeter Zijlstra {
4935c4853b6SFrederic Weisbecker 	bool in_hardirq = false;
494f1a83e65SPeter Zijlstra 
495f9ad4a5fSPeter Zijlstra 	if (lockdep_hardirq_context()) {
4965c4853b6SFrederic Weisbecker 		in_hardirq = true;
4972502ec37SThomas Gleixner 		lockdep_hardirq_exit();
498f1a83e65SPeter Zijlstra 	}
499f1a83e65SPeter Zijlstra 
5005c4853b6SFrederic Weisbecker 	lockdep_softirq_enter();
5015c4853b6SFrederic Weisbecker 
5025c4853b6SFrederic Weisbecker 	return in_hardirq;
5035c4853b6SFrederic Weisbecker }
5045c4853b6SFrederic Weisbecker 
lockdep_softirq_end(bool in_hardirq)5055c4853b6SFrederic Weisbecker static inline void lockdep_softirq_end(bool in_hardirq)
506f1a83e65SPeter Zijlstra {
507f1a83e65SPeter Zijlstra 	lockdep_softirq_exit();
5085c4853b6SFrederic Weisbecker 
5095c4853b6SFrederic Weisbecker 	if (in_hardirq)
5102502ec37SThomas Gleixner 		lockdep_hardirq_enter();
511f1a83e65SPeter Zijlstra }
512f1a83e65SPeter Zijlstra #else
lockdep_softirq_start(void)5135c4853b6SFrederic Weisbecker static inline bool lockdep_softirq_start(void) { return false; }
lockdep_softirq_end(bool in_hardirq)5145c4853b6SFrederic Weisbecker static inline void lockdep_softirq_end(bool in_hardirq) { }
515f1a83e65SPeter Zijlstra #endif
516f1a83e65SPeter Zijlstra 
handle_softirqs(bool ksirqd)5173a83d0d2SZqiang static void handle_softirqs(bool ksirqd)
5181da177e4SLinus Torvalds {
519c10d7367SEric Dumazet 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
520907aed48SMel Gorman 	unsigned long old_flags = current->flags;
52134376a50SBen Greear 	int max_restart = MAX_SOFTIRQ_RESTART;
522f1a83e65SPeter Zijlstra 	struct softirq_action *h;
5235c4853b6SFrederic Weisbecker 	bool in_hardirq;
524f1a83e65SPeter Zijlstra 	__u32 pending;
5252e702b9fSJoe Perches 	int softirq_bit;
526907aed48SMel Gorman 
527907aed48SMel Gorman 	/*
528e45506acSYangtao Li 	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
529e45506acSYangtao Li 	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
530e45506acSYangtao Li 	 * again if the socket is related to swapping.
531907aed48SMel Gorman 	 */
532907aed48SMel Gorman 	current->flags &= ~PF_MEMALLOC;
5331da177e4SLinus Torvalds 
5341da177e4SLinus Torvalds 	pending = local_softirq_pending();
535829035fdSPaul Mackerras 
536f02fc963SThomas Gleixner 	softirq_handle_begin();
5375c4853b6SFrederic Weisbecker 	in_hardirq = lockdep_softirq_start();
538d3759e71SFrederic Weisbecker 	account_softirq_enter(current);
5391da177e4SLinus Torvalds 
5401da177e4SLinus Torvalds restart:
5411da177e4SLinus Torvalds 	/* Reset the pending bitmask before enabling irqs */
5423f74478bSAndi Kleen 	set_softirq_pending(0);
5431da177e4SLinus Torvalds 
544c70f5d66SAndrew Morton 	local_irq_enable();
5451da177e4SLinus Torvalds 
5461da177e4SLinus Torvalds 	h = softirq_vec;
5471da177e4SLinus Torvalds 
5482e702b9fSJoe Perches 	while ((softirq_bit = ffs(pending))) {
5492e702b9fSJoe Perches 		unsigned int vec_nr;
5502e702b9fSJoe Perches 		int prev_count;
5512e702b9fSJoe Perches 
5522e702b9fSJoe Perches 		h += softirq_bit - 1;
5532e702b9fSJoe Perches 
5542e702b9fSJoe Perches 		vec_nr = h - softirq_vec;
5552e702b9fSJoe Perches 		prev_count = preempt_count();
5568e85b4b5SThomas Gleixner 
557f4bc6bb2SThomas Gleixner 		kstat_incr_softirqs_this_cpu(vec_nr);
558f4bc6bb2SThomas Gleixner 
559f4bc6bb2SThomas Gleixner 		trace_softirq_entry(vec_nr);
5601da177e4SLinus Torvalds 		h->action(h);
561f4bc6bb2SThomas Gleixner 		trace_softirq_exit(vec_nr);
5628e85b4b5SThomas Gleixner 		if (unlikely(prev_count != preempt_count())) {
56340322764SJoe Perches 			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
5642e702b9fSJoe Perches 			       vec_nr, softirq_to_name[vec_nr], h->action,
565f4bc6bb2SThomas Gleixner 			       prev_count, preempt_count());
5664a2b4b22SPeter Zijlstra 			preempt_count_set(prev_count);
5678e85b4b5SThomas Gleixner 		}
5681da177e4SLinus Torvalds 		h++;
5692e702b9fSJoe Perches 		pending >>= softirq_bit;
5702e702b9fSJoe Perches 	}
5711da177e4SLinus Torvalds 
5723a83d0d2SZqiang 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
573d28139c4SPaul E. McKenney 		rcu_softirq_qs();
5748b1c04acSThomas Gleixner 
575c70f5d66SAndrew Morton 	local_irq_disable();
5761da177e4SLinus Torvalds 
5771da177e4SLinus Torvalds 	pending = local_softirq_pending();
578c10d7367SEric Dumazet 	if (pending) {
57934376a50SBen Greear 		if (time_before(jiffies, end) && !need_resched() &&
58034376a50SBen Greear 		    --max_restart)
5811da177e4SLinus Torvalds 			goto restart;
5821da177e4SLinus Torvalds 
5831da177e4SLinus Torvalds 		wakeup_softirqd();
584c10d7367SEric Dumazet 	}
5851da177e4SLinus Torvalds 
586d3759e71SFrederic Weisbecker 	account_softirq_exit(current);
5875c4853b6SFrederic Weisbecker 	lockdep_softirq_end(in_hardirq);
588f02fc963SThomas Gleixner 	softirq_handle_end();
589717a94b5SNeilBrown 	current_restore_flags(old_flags, PF_MEMALLOC);
5901da177e4SLinus Torvalds }
5911da177e4SLinus Torvalds 
__do_softirq(void)5923a83d0d2SZqiang asmlinkage __visible void __softirq_entry __do_softirq(void)
5933a83d0d2SZqiang {
5943a83d0d2SZqiang 	handle_softirqs(false);
5953a83d0d2SZqiang }
5963a83d0d2SZqiang 
5978a6bc478SThomas Gleixner /**
5988a6bc478SThomas Gleixner  * irq_enter_rcu - Enter an interrupt context with RCU watching
599dde4b2b5SIngo Molnar  */
irq_enter_rcu(void)6008a6bc478SThomas Gleixner void irq_enter_rcu(void)
601dde4b2b5SIngo Molnar {
602d14ce74fSFrederic Weisbecker 	__irq_enter_raw();
603d14ce74fSFrederic Weisbecker 
60453e87e3cSFrederic Weisbecker 	if (tick_nohz_full_cpu(smp_processor_id()) ||
60553e87e3cSFrederic Weisbecker 	    (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
6065acac1beSFrederic Weisbecker 		tick_irq_enter();
607d14ce74fSFrederic Weisbecker 
608d14ce74fSFrederic Weisbecker 	account_hardirq_enter(current);
609dde4b2b5SIngo Molnar }
610dde4b2b5SIngo Molnar 
6118a6bc478SThomas Gleixner /**
6128a6bc478SThomas Gleixner  * irq_enter - Enter an interrupt context including RCU update
6138a6bc478SThomas Gleixner  */
irq_enter(void)6148a6bc478SThomas Gleixner void irq_enter(void)
6158a6bc478SThomas Gleixner {
6166f0e6c15SFrederic Weisbecker 	ct_irq_enter();
6178a6bc478SThomas Gleixner 	irq_enter_rcu();
6188a6bc478SThomas Gleixner }
6198a6bc478SThomas Gleixner 
tick_irq_exit(void)62067826eaeSFrederic Weisbecker static inline void tick_irq_exit(void)
62167826eaeSFrederic Weisbecker {
62267826eaeSFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
62367826eaeSFrederic Weisbecker 	int cpu = smp_processor_id();
62467826eaeSFrederic Weisbecker 
62567826eaeSFrederic Weisbecker 	/* Make sure that timer wheel updates are propagated */
626548796e2SCruz Zhao 	if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
627fe13889cSChangbin Du 		if (!in_hardirq())
62867826eaeSFrederic Weisbecker 			tick_nohz_irq_exit();
62967826eaeSFrederic Weisbecker 	}
63067826eaeSFrederic Weisbecker #endif
63167826eaeSFrederic Weisbecker }
63267826eaeSFrederic Weisbecker 
__irq_exit_rcu(void)63359bc300bSPeter Zijlstra static inline void __irq_exit_rcu(void)
6341da177e4SLinus Torvalds {
63574eed016SThomas Gleixner #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
6364cd5d111SFrederic Weisbecker 	local_irq_disable();
63774eed016SThomas Gleixner #else
638f71b74bcSFrederic Weisbecker 	lockdep_assert_irqs_disabled();
63974eed016SThomas Gleixner #endif
640d3759e71SFrederic Weisbecker 	account_hardirq_exit(current);
641bdb43806SPeter Zijlstra 	preempt_count_sub(HARDIRQ_OFFSET);
6421da177e4SLinus Torvalds 	if (!in_interrupt() && local_softirq_pending())
6431da177e4SLinus Torvalds 		invoke_softirq();
64479bf2bb3SThomas Gleixner 
64567826eaeSFrederic Weisbecker 	tick_irq_exit();
6468a6bc478SThomas Gleixner }
6478a6bc478SThomas Gleixner 
6488a6bc478SThomas Gleixner /**
64959bc300bSPeter Zijlstra  * irq_exit_rcu() - Exit an interrupt context without updating RCU
65059bc300bSPeter Zijlstra  *
65159bc300bSPeter Zijlstra  * Also processes softirqs if needed and possible.
65259bc300bSPeter Zijlstra  */
irq_exit_rcu(void)65359bc300bSPeter Zijlstra void irq_exit_rcu(void)
65459bc300bSPeter Zijlstra {
65559bc300bSPeter Zijlstra 	__irq_exit_rcu();
65659bc300bSPeter Zijlstra 	 /* must be last! */
65759bc300bSPeter Zijlstra 	lockdep_hardirq_exit();
65859bc300bSPeter Zijlstra }
65959bc300bSPeter Zijlstra 
66059bc300bSPeter Zijlstra /**
6618a6bc478SThomas Gleixner  * irq_exit - Exit an interrupt context, update RCU and lockdep
6628a6bc478SThomas Gleixner  *
6638a6bc478SThomas Gleixner  * Also processes softirqs if needed and possible.
6648a6bc478SThomas Gleixner  */
irq_exit(void)6658a6bc478SThomas Gleixner void irq_exit(void)
6668a6bc478SThomas Gleixner {
66759bc300bSPeter Zijlstra 	__irq_exit_rcu();
6686f0e6c15SFrederic Weisbecker 	ct_irq_exit();
6692502ec37SThomas Gleixner 	 /* must be last! */
6702502ec37SThomas Gleixner 	lockdep_hardirq_exit();
6711da177e4SLinus Torvalds }
6721da177e4SLinus Torvalds 
6731da177e4SLinus Torvalds /*
6741da177e4SLinus Torvalds  * This function must run with irqs disabled!
6751da177e4SLinus Torvalds  */
raise_softirq_irqoff(unsigned int nr)6767ad5b3a5SHarvey Harrison inline void raise_softirq_irqoff(unsigned int nr)
6771da177e4SLinus Torvalds {
6781da177e4SLinus Torvalds 	__raise_softirq_irqoff(nr);
6791da177e4SLinus Torvalds 
6801da177e4SLinus Torvalds 	/*
6811da177e4SLinus Torvalds 	 * If we're in an interrupt or softirq, we're done
6821da177e4SLinus Torvalds 	 * (this also catches softirq-disabled code). We will
6831da177e4SLinus Torvalds 	 * actually run the softirq once we return from
6841da177e4SLinus Torvalds 	 * the irq or softirq.
6851da177e4SLinus Torvalds 	 *
6861da177e4SLinus Torvalds 	 * Otherwise we wake up ksoftirqd to make sure we
6871da177e4SLinus Torvalds 	 * schedule the softirq soon.
6881da177e4SLinus Torvalds 	 */
689f02fc963SThomas Gleixner 	if (!in_interrupt() && should_wake_ksoftirqd())
6901da177e4SLinus Torvalds 		wakeup_softirqd();
6911da177e4SLinus Torvalds }
6921da177e4SLinus Torvalds 
raise_softirq(unsigned int nr)6937ad5b3a5SHarvey Harrison void raise_softirq(unsigned int nr)
6941da177e4SLinus Torvalds {
6951da177e4SLinus Torvalds 	unsigned long flags;
6961da177e4SLinus Torvalds 
6971da177e4SLinus Torvalds 	local_irq_save(flags);
6981da177e4SLinus Torvalds 	raise_softirq_irqoff(nr);
6991da177e4SLinus Torvalds 	local_irq_restore(flags);
7001da177e4SLinus Torvalds }
7011da177e4SLinus Torvalds 
__raise_softirq_irqoff(unsigned int nr)702f069686eSSteven Rostedt void __raise_softirq_irqoff(unsigned int nr)
703f069686eSSteven Rostedt {
704cdabce2eSJiafei Pan 	lockdep_assert_irqs_disabled();
705f069686eSSteven Rostedt 	trace_softirq_raise(nr);
706f069686eSSteven Rostedt 	or_softirq_pending(1UL << nr);
707f069686eSSteven Rostedt }
708f069686eSSteven Rostedt 
open_softirq(int nr,void (* action)(struct softirq_action *))709962cf36cSCarlos R. Mafra void open_softirq(int nr, void (*action)(struct softirq_action *))
7101da177e4SLinus Torvalds {
7111da177e4SLinus Torvalds 	softirq_vec[nr].action = action;
7121da177e4SLinus Torvalds }
7131da177e4SLinus Torvalds 
7149ba5f005SPeter Zijlstra /*
7159ba5f005SPeter Zijlstra  * Tasklets
7169ba5f005SPeter Zijlstra  */
717ce85b4f2SJoe Perches struct tasklet_head {
71848f20a9aSOlof Johansson 	struct tasklet_struct *head;
71948f20a9aSOlof Johansson 	struct tasklet_struct **tail;
7201da177e4SLinus Torvalds };
7211da177e4SLinus Torvalds 
7224620b49fSVegard Nossum static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
7234620b49fSVegard Nossum static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
7241da177e4SLinus Torvalds 
__tasklet_schedule_common(struct tasklet_struct * t,struct tasklet_head __percpu * headp,unsigned int softirq_nr)7256498ddadSIngo Molnar static void __tasklet_schedule_common(struct tasklet_struct *t,
7266498ddadSIngo Molnar 				      struct tasklet_head __percpu *headp,
7276498ddadSIngo Molnar 				      unsigned int softirq_nr)
7281da177e4SLinus Torvalds {
7296498ddadSIngo Molnar 	struct tasklet_head *head;
7301da177e4SLinus Torvalds 	unsigned long flags;
7311da177e4SLinus Torvalds 
7321da177e4SLinus Torvalds 	local_irq_save(flags);
7336498ddadSIngo Molnar 	head = this_cpu_ptr(headp);
73448f20a9aSOlof Johansson 	t->next = NULL;
7356498ddadSIngo Molnar 	*head->tail = t;
7366498ddadSIngo Molnar 	head->tail = &(t->next);
7376498ddadSIngo Molnar 	raise_softirq_irqoff(softirq_nr);
7381da177e4SLinus Torvalds 	local_irq_restore(flags);
7391da177e4SLinus Torvalds }
7406498ddadSIngo Molnar 
__tasklet_schedule(struct tasklet_struct * t)7416498ddadSIngo Molnar void __tasklet_schedule(struct tasklet_struct *t)
7426498ddadSIngo Molnar {
7436498ddadSIngo Molnar 	__tasklet_schedule_common(t, &tasklet_vec,
7446498ddadSIngo Molnar 				  TASKLET_SOFTIRQ);
7456498ddadSIngo Molnar }
7461da177e4SLinus Torvalds EXPORT_SYMBOL(__tasklet_schedule);
7471da177e4SLinus Torvalds 
__tasklet_hi_schedule(struct tasklet_struct * t)7487ad5b3a5SHarvey Harrison void __tasklet_hi_schedule(struct tasklet_struct *t)
7491da177e4SLinus Torvalds {
7506498ddadSIngo Molnar 	__tasklet_schedule_common(t, &tasklet_hi_vec,
7516498ddadSIngo Molnar 				  HI_SOFTIRQ);
7521da177e4SLinus Torvalds }
7531da177e4SLinus Torvalds EXPORT_SYMBOL(__tasklet_hi_schedule);
7541da177e4SLinus Torvalds 
tasklet_clear_sched(struct tasklet_struct * t)755697d8c63SPeter Zijlstra static bool tasklet_clear_sched(struct tasklet_struct *t)
7566b2c339dSDirk Behme {
757697d8c63SPeter Zijlstra 	if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
758697d8c63SPeter Zijlstra 		wake_up_var(&t->state);
7596b2c339dSDirk Behme 		return true;
760697d8c63SPeter Zijlstra 	}
7616b2c339dSDirk Behme 
7626b2c339dSDirk Behme 	WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
7636b2c339dSDirk Behme 		  t->use_callback ? "callback" : "func",
7646b2c339dSDirk Behme 		  t->use_callback ? (void *)t->callback : (void *)t->func);
7656b2c339dSDirk Behme 
7666b2c339dSDirk Behme 	return false;
7676b2c339dSDirk Behme }
7686b2c339dSDirk Behme 
tasklet_action_common(struct softirq_action * a,struct tasklet_head * tl_head,unsigned int softirq_nr)76982b691beSIngo Molnar static void tasklet_action_common(struct softirq_action *a,
77082b691beSIngo Molnar 				  struct tasklet_head *tl_head,
77182b691beSIngo Molnar 				  unsigned int softirq_nr)
7721da177e4SLinus Torvalds {
7731da177e4SLinus Torvalds 	struct tasklet_struct *list;
7741da177e4SLinus Torvalds 
7751da177e4SLinus Torvalds 	local_irq_disable();
77682b691beSIngo Molnar 	list = tl_head->head;
77782b691beSIngo Molnar 	tl_head->head = NULL;
77882b691beSIngo Molnar 	tl_head->tail = &tl_head->head;
7791da177e4SLinus Torvalds 	local_irq_enable();
7801da177e4SLinus Torvalds 
7811da177e4SLinus Torvalds 	while (list) {
7821da177e4SLinus Torvalds 		struct tasklet_struct *t = list;
7831da177e4SLinus Torvalds 
7841da177e4SLinus Torvalds 		list = list->next;
7851da177e4SLinus Torvalds 
7861da177e4SLinus Torvalds 		if (tasklet_trylock(t)) {
7871da177e4SLinus Torvalds 			if (!atomic_read(&t->count)) {
788697d8c63SPeter Zijlstra 				if (tasklet_clear_sched(t)) {
789f4bf3ca2SLingutla Chandrasekhar 					if (t->use_callback) {
790f4bf3ca2SLingutla Chandrasekhar 						trace_tasklet_entry(t, t->callback);
79112cc923fSRomain Perier 						t->callback(t);
792f4bf3ca2SLingutla Chandrasekhar 						trace_tasklet_exit(t, t->callback);
793f4bf3ca2SLingutla Chandrasekhar 					} else {
794f4bf3ca2SLingutla Chandrasekhar 						trace_tasklet_entry(t, t->func);
7951da177e4SLinus Torvalds 						t->func(t->data);
796f4bf3ca2SLingutla Chandrasekhar 						trace_tasklet_exit(t, t->func);
797f4bf3ca2SLingutla Chandrasekhar 					}
7986b2c339dSDirk Behme 				}
7991da177e4SLinus Torvalds 				tasklet_unlock(t);
8001da177e4SLinus Torvalds 				continue;
8011da177e4SLinus Torvalds 			}
8021da177e4SLinus Torvalds 			tasklet_unlock(t);
8031da177e4SLinus Torvalds 		}
8041da177e4SLinus Torvalds 
8051da177e4SLinus Torvalds 		local_irq_disable();
80648f20a9aSOlof Johansson 		t->next = NULL;
80782b691beSIngo Molnar 		*tl_head->tail = t;
80882b691beSIngo Molnar 		tl_head->tail = &t->next;
80982b691beSIngo Molnar 		__raise_softirq_irqoff(softirq_nr);
8101da177e4SLinus Torvalds 		local_irq_enable();
8111da177e4SLinus Torvalds 	}
8121da177e4SLinus Torvalds }
8131da177e4SLinus Torvalds 
tasklet_action(struct softirq_action * a)81482b691beSIngo Molnar static __latent_entropy void tasklet_action(struct softirq_action *a)
81582b691beSIngo Molnar {
81682b691beSIngo Molnar 	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
81782b691beSIngo Molnar }
81882b691beSIngo Molnar 
tasklet_hi_action(struct softirq_action * a)8190766f788SEmese Revfy static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
8201da177e4SLinus Torvalds {
82182b691beSIngo Molnar 	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
8221da177e4SLinus Torvalds }
8231da177e4SLinus Torvalds 
tasklet_setup(struct tasklet_struct * t,void (* callback)(struct tasklet_struct *))82412cc923fSRomain Perier void tasklet_setup(struct tasklet_struct *t,
82512cc923fSRomain Perier 		   void (*callback)(struct tasklet_struct *))
82612cc923fSRomain Perier {
82712cc923fSRomain Perier 	t->next = NULL;
82812cc923fSRomain Perier 	t->state = 0;
82912cc923fSRomain Perier 	atomic_set(&t->count, 0);
83012cc923fSRomain Perier 	t->callback = callback;
83112cc923fSRomain Perier 	t->use_callback = true;
83212cc923fSRomain Perier 	t->data = 0;
83312cc923fSRomain Perier }
83412cc923fSRomain Perier EXPORT_SYMBOL(tasklet_setup);
83512cc923fSRomain Perier 
tasklet_init(struct tasklet_struct * t,void (* func)(unsigned long),unsigned long data)8361da177e4SLinus Torvalds void tasklet_init(struct tasklet_struct *t,
8371da177e4SLinus Torvalds 		  void (*func)(unsigned long), unsigned long data)
8381da177e4SLinus Torvalds {
8391da177e4SLinus Torvalds 	t->next = NULL;
8401da177e4SLinus Torvalds 	t->state = 0;
8411da177e4SLinus Torvalds 	atomic_set(&t->count, 0);
8421da177e4SLinus Torvalds 	t->func = func;
84312cc923fSRomain Perier 	t->use_callback = false;
8441da177e4SLinus Torvalds 	t->data = data;
8451da177e4SLinus Torvalds }
8461da177e4SLinus Torvalds EXPORT_SYMBOL(tasklet_init);
8471da177e4SLinus Torvalds 
848eb2dafbbSThomas Gleixner #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
849eb2dafbbSThomas Gleixner /*
850eb2dafbbSThomas Gleixner  * Do not use in new code. Waiting for tasklets from atomic contexts is
851eb2dafbbSThomas Gleixner  * error prone and should be avoided.
852eb2dafbbSThomas Gleixner  */
tasklet_unlock_spin_wait(struct tasklet_struct * t)853eb2dafbbSThomas Gleixner void tasklet_unlock_spin_wait(struct tasklet_struct *t)
854eb2dafbbSThomas Gleixner {
855eb2dafbbSThomas Gleixner 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
856eb2dafbbSThomas Gleixner 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
857eb2dafbbSThomas Gleixner 			/*
858eb2dafbbSThomas Gleixner 			 * Prevent a live lock when current preempted soft
859eb2dafbbSThomas Gleixner 			 * interrupt processing or prevents ksoftirqd from
860eb2dafbbSThomas Gleixner 			 * running. If the tasklet runs on a different CPU
861eb2dafbbSThomas Gleixner 			 * then this has no effect other than doing the BH
862eb2dafbbSThomas Gleixner 			 * disable/enable dance for nothing.
863eb2dafbbSThomas Gleixner 			 */
864eb2dafbbSThomas Gleixner 			local_bh_disable();
865eb2dafbbSThomas Gleixner 			local_bh_enable();
866eb2dafbbSThomas Gleixner 		} else {
867eb2dafbbSThomas Gleixner 			cpu_relax();
868eb2dafbbSThomas Gleixner 		}
869eb2dafbbSThomas Gleixner 	}
870eb2dafbbSThomas Gleixner }
871eb2dafbbSThomas Gleixner EXPORT_SYMBOL(tasklet_unlock_spin_wait);
872eb2dafbbSThomas Gleixner #endif
873eb2dafbbSThomas Gleixner 
tasklet_kill(struct tasklet_struct * t)8741da177e4SLinus Torvalds void tasklet_kill(struct tasklet_struct *t)
8751da177e4SLinus Torvalds {
8761da177e4SLinus Torvalds 	if (in_interrupt())
87740322764SJoe Perches 		pr_notice("Attempt to kill tasklet from interrupt\n");
8781da177e4SLinus Torvalds 
879697d8c63SPeter Zijlstra 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
880697d8c63SPeter Zijlstra 		wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
881697d8c63SPeter Zijlstra 
8821da177e4SLinus Torvalds 	tasklet_unlock_wait(t);
883697d8c63SPeter Zijlstra 	tasklet_clear_sched(t);
8841da177e4SLinus Torvalds }
8851da177e4SLinus Torvalds EXPORT_SYMBOL(tasklet_kill);
8861da177e4SLinus Torvalds 
887eb2dafbbSThomas Gleixner #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
tasklet_unlock(struct tasklet_struct * t)888da044747SPeter Zijlstra void tasklet_unlock(struct tasklet_struct *t)
889da044747SPeter Zijlstra {
890da044747SPeter Zijlstra 	smp_mb__before_atomic();
891da044747SPeter Zijlstra 	clear_bit(TASKLET_STATE_RUN, &t->state);
892da044747SPeter Zijlstra 	smp_mb__after_atomic();
893da044747SPeter Zijlstra 	wake_up_var(&t->state);
894da044747SPeter Zijlstra }
895da044747SPeter Zijlstra EXPORT_SYMBOL_GPL(tasklet_unlock);
896da044747SPeter Zijlstra 
tasklet_unlock_wait(struct tasklet_struct * t)897da044747SPeter Zijlstra void tasklet_unlock_wait(struct tasklet_struct *t)
898da044747SPeter Zijlstra {
899da044747SPeter Zijlstra 	wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
900da044747SPeter Zijlstra }
901da044747SPeter Zijlstra EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
902da044747SPeter Zijlstra #endif
903da044747SPeter Zijlstra 
softirq_init(void)9041da177e4SLinus Torvalds void __init softirq_init(void)
9051da177e4SLinus Torvalds {
90648f20a9aSOlof Johansson 	int cpu;
90748f20a9aSOlof Johansson 
90848f20a9aSOlof Johansson 	for_each_possible_cpu(cpu) {
90948f20a9aSOlof Johansson 		per_cpu(tasklet_vec, cpu).tail =
91048f20a9aSOlof Johansson 			&per_cpu(tasklet_vec, cpu).head;
91148f20a9aSOlof Johansson 		per_cpu(tasklet_hi_vec, cpu).tail =
91248f20a9aSOlof Johansson 			&per_cpu(tasklet_hi_vec, cpu).head;
91348f20a9aSOlof Johansson 	}
91448f20a9aSOlof Johansson 
915962cf36cSCarlos R. Mafra 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
916962cf36cSCarlos R. Mafra 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
9171da177e4SLinus Torvalds }
9181da177e4SLinus Torvalds 
ksoftirqd_should_run(unsigned int cpu)9193e339b5dSThomas Gleixner static int ksoftirqd_should_run(unsigned int cpu)
9201da177e4SLinus Torvalds {
9213e339b5dSThomas Gleixner 	return local_softirq_pending();
9221da177e4SLinus Torvalds }
9231da177e4SLinus Torvalds 
run_ksoftirqd(unsigned int cpu)9243e339b5dSThomas Gleixner static void run_ksoftirqd(unsigned int cpu)
9253e339b5dSThomas Gleixner {
926f02fc963SThomas Gleixner 	ksoftirqd_run_begin();
9273e339b5dSThomas Gleixner 	if (local_softirq_pending()) {
9280bed698aSFrederic Weisbecker 		/*
9290bed698aSFrederic Weisbecker 		 * We can safely run softirq on inline stack, as we are not deep
9300bed698aSFrederic Weisbecker 		 * in the task stack here.
9310bed698aSFrederic Weisbecker 		 */
9323a83d0d2SZqiang 		handle_softirqs(true);
933f02fc963SThomas Gleixner 		ksoftirqd_run_end();
934edf22f4cSPaul E. McKenney 		cond_resched();
9353e339b5dSThomas Gleixner 		return;
9361da177e4SLinus Torvalds 	}
937f02fc963SThomas Gleixner 	ksoftirqd_run_end();
9381da177e4SLinus Torvalds }
9391da177e4SLinus Torvalds 
9401da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
takeover_tasklets(unsigned int cpu)941c4544dbcSSebastian Andrzej Siewior static int takeover_tasklets(unsigned int cpu)
9421da177e4SLinus Torvalds {
9431da177e4SLinus Torvalds 	/* CPU is dead, so no lock needed. */
9441da177e4SLinus Torvalds 	local_irq_disable();
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds 	/* Find end, append list for that CPU. */
947e5e41723SChristian Borntraeger 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
948909ea964SChristoph Lameter 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
9498afecaa6SMuchun Song 		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
95048f20a9aSOlof Johansson 		per_cpu(tasklet_vec, cpu).head = NULL;
95148f20a9aSOlof Johansson 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
952e5e41723SChristian Borntraeger 	}
9531da177e4SLinus Torvalds 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
9541da177e4SLinus Torvalds 
955e5e41723SChristian Borntraeger 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
956909ea964SChristoph Lameter 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
957909ea964SChristoph Lameter 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
95848f20a9aSOlof Johansson 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
95948f20a9aSOlof Johansson 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
960e5e41723SChristian Borntraeger 	}
9611da177e4SLinus Torvalds 	raise_softirq_irqoff(HI_SOFTIRQ);
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds 	local_irq_enable();
964c4544dbcSSebastian Andrzej Siewior 	return 0;
9651da177e4SLinus Torvalds }
966c4544dbcSSebastian Andrzej Siewior #else
967c4544dbcSSebastian Andrzej Siewior #define takeover_tasklets	NULL
9681da177e4SLinus Torvalds #endif /* CONFIG_HOTPLUG_CPU */
9691da177e4SLinus Torvalds 
9703e339b5dSThomas Gleixner static struct smp_hotplug_thread softirq_threads = {
9713e339b5dSThomas Gleixner 	.store			= &ksoftirqd,
9723e339b5dSThomas Gleixner 	.thread_should_run	= ksoftirqd_should_run,
9733e339b5dSThomas Gleixner 	.thread_fn		= run_ksoftirqd,
9743e339b5dSThomas Gleixner 	.thread_comm		= "ksoftirqd/%u",
9753e339b5dSThomas Gleixner };
9763e339b5dSThomas Gleixner 
spawn_ksoftirqd(void)9777babe8dbSEduard - Gabriel Munteanu static __init int spawn_ksoftirqd(void)
9781da177e4SLinus Torvalds {
979c4544dbcSSebastian Andrzej Siewior 	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
980c4544dbcSSebastian Andrzej Siewior 				  takeover_tasklets);
9813e339b5dSThomas Gleixner 	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
9823e339b5dSThomas Gleixner 
9831da177e4SLinus Torvalds 	return 0;
9841da177e4SLinus Torvalds }
9857babe8dbSEduard - Gabriel Munteanu early_initcall(spawn_ksoftirqd);
98678eef01bSAndrew Morton 
98743a25632SYinghai Lu /*
98843a25632SYinghai Lu  * [ These __weak aliases are kept in a separate compilation unit, so that
98943a25632SYinghai Lu  *   GCC does not inline them incorrectly. ]
99043a25632SYinghai Lu  */
99143a25632SYinghai Lu 
early_irq_init(void)99243a25632SYinghai Lu int __init __weak early_irq_init(void)
99343a25632SYinghai Lu {
99443a25632SYinghai Lu 	return 0;
99543a25632SYinghai Lu }
99643a25632SYinghai Lu 
arch_probe_nr_irqs(void)9974a046d17SYinghai Lu int __init __weak arch_probe_nr_irqs(void)
9984a046d17SYinghai Lu {
999b683de2bSThomas Gleixner 	return NR_IRQS_LEGACY;
10004a046d17SYinghai Lu }
10014a046d17SYinghai Lu 
arch_early_irq_init(void)100243a25632SYinghai Lu int __init __weak arch_early_irq_init(void)
100343a25632SYinghai Lu {
100443a25632SYinghai Lu 	return 0;
100543a25632SYinghai Lu }
100662a08ae2SThomas Gleixner 
arch_dynirq_lower_bound(unsigned int from)100762a08ae2SThomas Gleixner unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
100862a08ae2SThomas Gleixner {
100962a08ae2SThomas Gleixner 	return from;
101062a08ae2SThomas Gleixner }
1011