1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/local_lock.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 #include <linux/wait_bit.h>
30
31 #include <asm/softirq_stack.h>
32
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/irq.h>
35
36 /*
37 - No shared variables, all the data are CPU local.
38 - If a softirq needs serialization, let it serialize itself
39 by its own spinlocks.
40 - Even if softirq is serialized, only local cpu is marked for
41 execution. Hence, we get something sort of weak cpu binding.
42 Though it is still not clear, will it result in better locality
43 or will not.
44
45 Examples:
46 - NET RX softirq. It is multithreaded and does not require
47 any global serialization.
48 - NET TX softirq. It kicks software netdevice queues, hence
49 it is logically serialized per device, but this serialization
50 is invisible to common code.
51 - Tasklets: serialized wrt itself.
52 */
53
54 #ifndef __ARCH_IRQ_STAT
55 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
56 EXPORT_PER_CPU_SYMBOL(irq_stat);
57 #endif
58
59 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
60
61 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62
63 const char * const softirq_to_name[NR_SOFTIRQS] = {
64 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
65 "TASKLET", "SCHED", "HRTIMER", "RCU"
66 };
67
68 /*
69 * we cannot loop indefinitely here to avoid userspace starvation,
70 * but we also don't want to introduce a worst case 1/HZ latency
71 * to the pending events, so lets the scheduler to balance
72 * the softirq load for us.
73 */
wakeup_softirqd(void)74 static void wakeup_softirqd(void)
75 {
76 /* Interrupts are disabled: no need to stop preemption */
77 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
78
79 if (tsk)
80 wake_up_process(tsk);
81 }
82
83 #ifdef CONFIG_TRACE_IRQFLAGS
84 DEFINE_PER_CPU(int, hardirqs_enabled);
85 DEFINE_PER_CPU(int, hardirq_context);
86 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
87 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
88 #endif
89
90 /*
91 * SOFTIRQ_OFFSET usage:
92 *
93 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
94 * to a per CPU counter and to task::softirqs_disabled_cnt.
95 *
96 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
97 * processing.
98 *
99 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100 * on local_bh_disable or local_bh_enable.
101 *
102 * This lets us distinguish between whether we are currently processing
103 * softirq and whether we just have bh disabled.
104 */
105 #ifdef CONFIG_PREEMPT_RT
106
107 /*
108 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
109 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
110 * softirq disabled section to be preempted.
111 *
112 * The per task counter is used for softirq_count(), in_softirq() and
113 * in_serving_softirqs() because these counts are only valid when the task
114 * holding softirq_ctrl::lock is running.
115 *
116 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
117 * the task which is in a softirq disabled section is preempted or blocks.
118 */
119 struct softirq_ctrl {
120 local_lock_t lock;
121 int cnt;
122 };
123
124 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
125 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
126 };
127
128 /**
129 * local_bh_blocked() - Check for idle whether BH processing is blocked
130 *
131 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
132 *
133 * This is invoked from the idle task to guard against false positive
134 * softirq pending warnings, which would happen when the task which holds
135 * softirq_ctrl::lock was the only running task on the CPU and blocks on
136 * some other lock.
137 */
local_bh_blocked(void)138 bool local_bh_blocked(void)
139 {
140 return __this_cpu_read(softirq_ctrl.cnt) != 0;
141 }
142
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)143 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
144 {
145 unsigned long flags;
146 int newcnt;
147
148 WARN_ON_ONCE(in_hardirq());
149
150 /* First entry of a task into a BH disabled section? */
151 if (!current->softirq_disable_cnt) {
152 if (preemptible()) {
153 local_lock(&softirq_ctrl.lock);
154 /* Required to meet the RCU bottomhalf requirements. */
155 rcu_read_lock();
156 } else {
157 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
158 }
159 }
160
161 /*
162 * Track the per CPU softirq disabled state. On RT this is per CPU
163 * state to allow preemption of bottom half disabled sections.
164 */
165 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
166 /*
167 * Reflect the result in the task state to prevent recursion on the
168 * local lock and to make softirq_count() & al work.
169 */
170 current->softirq_disable_cnt = newcnt;
171
172 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
173 raw_local_irq_save(flags);
174 lockdep_softirqs_off(ip);
175 raw_local_irq_restore(flags);
176 }
177 }
178 EXPORT_SYMBOL(__local_bh_disable_ip);
179
__local_bh_enable(unsigned int cnt,bool unlock)180 static void __local_bh_enable(unsigned int cnt, bool unlock)
181 {
182 unsigned long flags;
183 int newcnt;
184
185 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
186 this_cpu_read(softirq_ctrl.cnt));
187
188 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
189 raw_local_irq_save(flags);
190 lockdep_softirqs_on(_RET_IP_);
191 raw_local_irq_restore(flags);
192 }
193
194 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
195 current->softirq_disable_cnt = newcnt;
196
197 if (!newcnt && unlock) {
198 rcu_read_unlock();
199 local_unlock(&softirq_ctrl.lock);
200 }
201 }
202
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)203 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
204 {
205 bool preempt_on = preemptible();
206 unsigned long flags;
207 u32 pending;
208 int curcnt;
209
210 WARN_ON_ONCE(in_hardirq());
211 lockdep_assert_irqs_enabled();
212
213 local_irq_save(flags);
214 curcnt = __this_cpu_read(softirq_ctrl.cnt);
215
216 /*
217 * If this is not reenabling soft interrupts, no point in trying to
218 * run pending ones.
219 */
220 if (curcnt != cnt)
221 goto out;
222
223 pending = local_softirq_pending();
224 if (!pending)
225 goto out;
226
227 /*
228 * If this was called from non preemptible context, wake up the
229 * softirq daemon.
230 */
231 if (!preempt_on) {
232 wakeup_softirqd();
233 goto out;
234 }
235
236 /*
237 * Adjust softirq count to SOFTIRQ_OFFSET which makes
238 * in_serving_softirq() become true.
239 */
240 cnt = SOFTIRQ_OFFSET;
241 __local_bh_enable(cnt, false);
242 __do_softirq();
243
244 out:
245 __local_bh_enable(cnt, preempt_on);
246 local_irq_restore(flags);
247 }
248 EXPORT_SYMBOL(__local_bh_enable_ip);
249
250 /*
251 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
252 * to acquire the per CPU local lock for reentrancy protection.
253 */
ksoftirqd_run_begin(void)254 static inline void ksoftirqd_run_begin(void)
255 {
256 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
257 local_irq_disable();
258 }
259
260 /* Counterpart to ksoftirqd_run_begin() */
ksoftirqd_run_end(void)261 static inline void ksoftirqd_run_end(void)
262 {
263 __local_bh_enable(SOFTIRQ_OFFSET, true);
264 WARN_ON_ONCE(in_interrupt());
265 local_irq_enable();
266 }
267
softirq_handle_begin(void)268 static inline void softirq_handle_begin(void) { }
softirq_handle_end(void)269 static inline void softirq_handle_end(void) { }
270
should_wake_ksoftirqd(void)271 static inline bool should_wake_ksoftirqd(void)
272 {
273 return !this_cpu_read(softirq_ctrl.cnt);
274 }
275
invoke_softirq(void)276 static inline void invoke_softirq(void)
277 {
278 if (should_wake_ksoftirqd())
279 wakeup_softirqd();
280 }
281
282 /*
283 * flush_smp_call_function_queue() can raise a soft interrupt in a function
284 * call. On RT kernels this is undesired and the only known functionality
285 * in the block layer which does this is disabled on RT. If soft interrupts
286 * get raised which haven't been raised before the flush, warn so it can be
287 * investigated.
288 */
do_softirq_post_smp_call_flush(unsigned int was_pending)289 void do_softirq_post_smp_call_flush(unsigned int was_pending)
290 {
291 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
292 invoke_softirq();
293 }
294
295 #else /* CONFIG_PREEMPT_RT */
296
297 /*
298 * This one is for softirq.c-internal use, where hardirqs are disabled
299 * legitimately:
300 */
301 #ifdef CONFIG_TRACE_IRQFLAGS
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)302 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
303 {
304 unsigned long flags;
305
306 WARN_ON_ONCE(in_hardirq());
307
308 raw_local_irq_save(flags);
309 /*
310 * The preempt tracer hooks into preempt_count_add and will break
311 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
312 * is set and before current->softirq_enabled is cleared.
313 * We must manually increment preempt_count here and manually
314 * call the trace_preempt_off later.
315 */
316 __preempt_count_add(cnt);
317 /*
318 * Were softirqs turned off above:
319 */
320 if (softirq_count() == (cnt & SOFTIRQ_MASK))
321 lockdep_softirqs_off(ip);
322 raw_local_irq_restore(flags);
323
324 if (preempt_count() == cnt) {
325 #ifdef CONFIG_DEBUG_PREEMPT
326 current->preempt_disable_ip = get_lock_parent_ip();
327 #endif
328 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
329 }
330 }
331 EXPORT_SYMBOL(__local_bh_disable_ip);
332 #endif /* CONFIG_TRACE_IRQFLAGS */
333
__local_bh_enable(unsigned int cnt)334 static void __local_bh_enable(unsigned int cnt)
335 {
336 lockdep_assert_irqs_disabled();
337
338 if (preempt_count() == cnt)
339 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
340
341 if (softirq_count() == (cnt & SOFTIRQ_MASK))
342 lockdep_softirqs_on(_RET_IP_);
343
344 __preempt_count_sub(cnt);
345 }
346
347 /*
348 * Special-case - softirqs can safely be enabled by __do_softirq(),
349 * without processing still-pending softirqs:
350 */
_local_bh_enable(void)351 void _local_bh_enable(void)
352 {
353 WARN_ON_ONCE(in_hardirq());
354 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
355 }
356 EXPORT_SYMBOL(_local_bh_enable);
357
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)358 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
359 {
360 WARN_ON_ONCE(in_hardirq());
361 lockdep_assert_irqs_enabled();
362 #ifdef CONFIG_TRACE_IRQFLAGS
363 local_irq_disable();
364 #endif
365 /*
366 * Are softirqs going to be turned on now:
367 */
368 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
369 lockdep_softirqs_on(ip);
370 /*
371 * Keep preemption disabled until we are done with
372 * softirq processing:
373 */
374 __preempt_count_sub(cnt - 1);
375
376 if (unlikely(!in_interrupt() && local_softirq_pending())) {
377 /*
378 * Run softirq if any pending. And do it in its own stack
379 * as we may be calling this deep in a task call stack already.
380 */
381 do_softirq();
382 }
383
384 preempt_count_dec();
385 #ifdef CONFIG_TRACE_IRQFLAGS
386 local_irq_enable();
387 #endif
388 preempt_check_resched();
389 }
390 EXPORT_SYMBOL(__local_bh_enable_ip);
391
softirq_handle_begin(void)392 static inline void softirq_handle_begin(void)
393 {
394 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
395 }
396
softirq_handle_end(void)397 static inline void softirq_handle_end(void)
398 {
399 __local_bh_enable(SOFTIRQ_OFFSET);
400 WARN_ON_ONCE(in_interrupt());
401 }
402
ksoftirqd_run_begin(void)403 static inline void ksoftirqd_run_begin(void)
404 {
405 local_irq_disable();
406 }
407
ksoftirqd_run_end(void)408 static inline void ksoftirqd_run_end(void)
409 {
410 local_irq_enable();
411 }
412
should_wake_ksoftirqd(void)413 static inline bool should_wake_ksoftirqd(void)
414 {
415 return true;
416 }
417
invoke_softirq(void)418 static inline void invoke_softirq(void)
419 {
420 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
421 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
422 /*
423 * We can safely execute softirq on the current stack if
424 * it is the irq stack, because it should be near empty
425 * at this stage.
426 */
427 __do_softirq();
428 #else
429 /*
430 * Otherwise, irq_exit() is called on the task stack that can
431 * be potentially deep already. So call softirq in its own stack
432 * to prevent from any overrun.
433 */
434 do_softirq_own_stack();
435 #endif
436 } else {
437 wakeup_softirqd();
438 }
439 }
440
do_softirq(void)441 asmlinkage __visible void do_softirq(void)
442 {
443 __u32 pending;
444 unsigned long flags;
445
446 if (in_interrupt())
447 return;
448
449 local_irq_save(flags);
450
451 pending = local_softirq_pending();
452
453 if (pending)
454 do_softirq_own_stack();
455
456 local_irq_restore(flags);
457 }
458
459 #endif /* !CONFIG_PREEMPT_RT */
460
461 /*
462 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
463 * but break the loop if need_resched() is set or after 2 ms.
464 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
465 * certain cases, such as stop_machine(), jiffies may cease to
466 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
467 * well to make sure we eventually return from this method.
468 *
469 * These limits have been established via experimentation.
470 * The two things to balance is latency against fairness -
471 * we want to handle softirqs as soon as possible, but they
472 * should not be able to lock up the box.
473 */
474 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
475 #define MAX_SOFTIRQ_RESTART 10
476
477 #ifdef CONFIG_TRACE_IRQFLAGS
478 /*
479 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
480 * to keep the lockdep irq context tracking as tight as possible in order to
481 * not miss-qualify lock contexts and miss possible deadlocks.
482 */
483
lockdep_softirq_start(void)484 static inline bool lockdep_softirq_start(void)
485 {
486 bool in_hardirq = false;
487
488 if (lockdep_hardirq_context()) {
489 in_hardirq = true;
490 lockdep_hardirq_exit();
491 }
492
493 lockdep_softirq_enter();
494
495 return in_hardirq;
496 }
497
lockdep_softirq_end(bool in_hardirq)498 static inline void lockdep_softirq_end(bool in_hardirq)
499 {
500 lockdep_softirq_exit();
501
502 if (in_hardirq)
503 lockdep_hardirq_enter();
504 }
505 #else
lockdep_softirq_start(void)506 static inline bool lockdep_softirq_start(void) { return false; }
lockdep_softirq_end(bool in_hardirq)507 static inline void lockdep_softirq_end(bool in_hardirq) { }
508 #endif
509
handle_softirqs(bool ksirqd)510 static void handle_softirqs(bool ksirqd)
511 {
512 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
513 unsigned long old_flags = current->flags;
514 int max_restart = MAX_SOFTIRQ_RESTART;
515 struct softirq_action *h;
516 bool in_hardirq;
517 __u32 pending;
518 int softirq_bit;
519
520 /*
521 * Mask out PF_MEMALLOC as the current task context is borrowed for the
522 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
523 * again if the socket is related to swapping.
524 */
525 current->flags &= ~PF_MEMALLOC;
526
527 pending = local_softirq_pending();
528
529 softirq_handle_begin();
530 in_hardirq = lockdep_softirq_start();
531 account_softirq_enter(current);
532
533 restart:
534 /* Reset the pending bitmask before enabling irqs */
535 set_softirq_pending(0);
536
537 local_irq_enable();
538
539 h = softirq_vec;
540
541 while ((softirq_bit = ffs(pending))) {
542 unsigned int vec_nr;
543 int prev_count;
544
545 h += softirq_bit - 1;
546
547 vec_nr = h - softirq_vec;
548 prev_count = preempt_count();
549
550 kstat_incr_softirqs_this_cpu(vec_nr);
551
552 trace_softirq_entry(vec_nr);
553 h->action(h);
554 trace_softirq_exit(vec_nr);
555 if (unlikely(prev_count != preempt_count())) {
556 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
557 vec_nr, softirq_to_name[vec_nr], h->action,
558 prev_count, preempt_count());
559 preempt_count_set(prev_count);
560 }
561 h++;
562 pending >>= softirq_bit;
563 }
564
565 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
566 rcu_softirq_qs();
567
568 local_irq_disable();
569
570 pending = local_softirq_pending();
571 if (pending) {
572 if (time_before(jiffies, end) && !need_resched() &&
573 --max_restart)
574 goto restart;
575
576 wakeup_softirqd();
577 }
578
579 account_softirq_exit(current);
580 lockdep_softirq_end(in_hardirq);
581 softirq_handle_end();
582 current_restore_flags(old_flags, PF_MEMALLOC);
583 }
584
__do_softirq(void)585 asmlinkage __visible void __softirq_entry __do_softirq(void)
586 {
587 handle_softirqs(false);
588 }
589
590 /**
591 * irq_enter_rcu - Enter an interrupt context with RCU watching
592 */
irq_enter_rcu(void)593 void irq_enter_rcu(void)
594 {
595 __irq_enter_raw();
596
597 if (tick_nohz_full_cpu(smp_processor_id()) ||
598 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
599 tick_irq_enter();
600
601 account_hardirq_enter(current);
602 }
603
604 /**
605 * irq_enter - Enter an interrupt context including RCU update
606 */
irq_enter(void)607 void irq_enter(void)
608 {
609 ct_irq_enter();
610 irq_enter_rcu();
611 }
612
tick_irq_exit(void)613 static inline void tick_irq_exit(void)
614 {
615 #ifdef CONFIG_NO_HZ_COMMON
616 int cpu = smp_processor_id();
617
618 /* Make sure that timer wheel updates are propagated */
619 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
620 if (!in_hardirq())
621 tick_nohz_irq_exit();
622 }
623 #endif
624 }
625
__irq_exit_rcu(void)626 static inline void __irq_exit_rcu(void)
627 {
628 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
629 local_irq_disable();
630 #else
631 lockdep_assert_irqs_disabled();
632 #endif
633 account_hardirq_exit(current);
634 preempt_count_sub(HARDIRQ_OFFSET);
635 if (!in_interrupt() && local_softirq_pending())
636 invoke_softirq();
637
638 tick_irq_exit();
639 }
640
641 /**
642 * irq_exit_rcu() - Exit an interrupt context without updating RCU
643 *
644 * Also processes softirqs if needed and possible.
645 */
irq_exit_rcu(void)646 void irq_exit_rcu(void)
647 {
648 __irq_exit_rcu();
649 /* must be last! */
650 lockdep_hardirq_exit();
651 }
652
653 /**
654 * irq_exit - Exit an interrupt context, update RCU and lockdep
655 *
656 * Also processes softirqs if needed and possible.
657 */
irq_exit(void)658 void irq_exit(void)
659 {
660 __irq_exit_rcu();
661 ct_irq_exit();
662 /* must be last! */
663 lockdep_hardirq_exit();
664 }
665
666 /*
667 * This function must run with irqs disabled!
668 */
raise_softirq_irqoff(unsigned int nr)669 inline void raise_softirq_irqoff(unsigned int nr)
670 {
671 __raise_softirq_irqoff(nr);
672
673 /*
674 * If we're in an interrupt or softirq, we're done
675 * (this also catches softirq-disabled code). We will
676 * actually run the softirq once we return from
677 * the irq or softirq.
678 *
679 * Otherwise we wake up ksoftirqd to make sure we
680 * schedule the softirq soon.
681 */
682 if (!in_interrupt() && should_wake_ksoftirqd())
683 wakeup_softirqd();
684 }
685
raise_softirq(unsigned int nr)686 void raise_softirq(unsigned int nr)
687 {
688 unsigned long flags;
689
690 local_irq_save(flags);
691 raise_softirq_irqoff(nr);
692 local_irq_restore(flags);
693 }
694
__raise_softirq_irqoff(unsigned int nr)695 void __raise_softirq_irqoff(unsigned int nr)
696 {
697 lockdep_assert_irqs_disabled();
698 trace_softirq_raise(nr);
699 or_softirq_pending(1UL << nr);
700 }
701
open_softirq(int nr,void (* action)(struct softirq_action *))702 void open_softirq(int nr, void (*action)(struct softirq_action *))
703 {
704 softirq_vec[nr].action = action;
705 }
706
707 /*
708 * Tasklets
709 */
710 struct tasklet_head {
711 struct tasklet_struct *head;
712 struct tasklet_struct **tail;
713 };
714
715 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
716 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
717
__tasklet_schedule_common(struct tasklet_struct * t,struct tasklet_head __percpu * headp,unsigned int softirq_nr)718 static void __tasklet_schedule_common(struct tasklet_struct *t,
719 struct tasklet_head __percpu *headp,
720 unsigned int softirq_nr)
721 {
722 struct tasklet_head *head;
723 unsigned long flags;
724
725 local_irq_save(flags);
726 head = this_cpu_ptr(headp);
727 t->next = NULL;
728 *head->tail = t;
729 head->tail = &(t->next);
730 raise_softirq_irqoff(softirq_nr);
731 local_irq_restore(flags);
732 }
733
__tasklet_schedule(struct tasklet_struct * t)734 void __tasklet_schedule(struct tasklet_struct *t)
735 {
736 __tasklet_schedule_common(t, &tasklet_vec,
737 TASKLET_SOFTIRQ);
738 }
739 EXPORT_SYMBOL(__tasklet_schedule);
740
__tasklet_hi_schedule(struct tasklet_struct * t)741 void __tasklet_hi_schedule(struct tasklet_struct *t)
742 {
743 __tasklet_schedule_common(t, &tasklet_hi_vec,
744 HI_SOFTIRQ);
745 }
746 EXPORT_SYMBOL(__tasklet_hi_schedule);
747
tasklet_clear_sched(struct tasklet_struct * t)748 static bool tasklet_clear_sched(struct tasklet_struct *t)
749 {
750 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
751 wake_up_var(&t->state);
752 return true;
753 }
754
755 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
756 t->use_callback ? "callback" : "func",
757 t->use_callback ? (void *)t->callback : (void *)t->func);
758
759 return false;
760 }
761
tasklet_action_common(struct softirq_action * a,struct tasklet_head * tl_head,unsigned int softirq_nr)762 static void tasklet_action_common(struct softirq_action *a,
763 struct tasklet_head *tl_head,
764 unsigned int softirq_nr)
765 {
766 struct tasklet_struct *list;
767
768 local_irq_disable();
769 list = tl_head->head;
770 tl_head->head = NULL;
771 tl_head->tail = &tl_head->head;
772 local_irq_enable();
773
774 while (list) {
775 struct tasklet_struct *t = list;
776
777 list = list->next;
778
779 if (tasklet_trylock(t)) {
780 if (!atomic_read(&t->count)) {
781 if (tasklet_clear_sched(t)) {
782 if (t->use_callback) {
783 trace_tasklet_entry(t, t->callback);
784 t->callback(t);
785 trace_tasklet_exit(t, t->callback);
786 } else {
787 trace_tasklet_entry(t, t->func);
788 t->func(t->data);
789 trace_tasklet_exit(t, t->func);
790 }
791 }
792 tasklet_unlock(t);
793 continue;
794 }
795 tasklet_unlock(t);
796 }
797
798 local_irq_disable();
799 t->next = NULL;
800 *tl_head->tail = t;
801 tl_head->tail = &t->next;
802 __raise_softirq_irqoff(softirq_nr);
803 local_irq_enable();
804 }
805 }
806
tasklet_action(struct softirq_action * a)807 static __latent_entropy void tasklet_action(struct softirq_action *a)
808 {
809 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
810 }
811
tasklet_hi_action(struct softirq_action * a)812 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
813 {
814 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
815 }
816
tasklet_setup(struct tasklet_struct * t,void (* callback)(struct tasklet_struct *))817 void tasklet_setup(struct tasklet_struct *t,
818 void (*callback)(struct tasklet_struct *))
819 {
820 t->next = NULL;
821 t->state = 0;
822 atomic_set(&t->count, 0);
823 t->callback = callback;
824 t->use_callback = true;
825 t->data = 0;
826 }
827 EXPORT_SYMBOL(tasklet_setup);
828
tasklet_init(struct tasklet_struct * t,void (* func)(unsigned long),unsigned long data)829 void tasklet_init(struct tasklet_struct *t,
830 void (*func)(unsigned long), unsigned long data)
831 {
832 t->next = NULL;
833 t->state = 0;
834 atomic_set(&t->count, 0);
835 t->func = func;
836 t->use_callback = false;
837 t->data = data;
838 }
839 EXPORT_SYMBOL(tasklet_init);
840
841 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
842 /*
843 * Do not use in new code. Waiting for tasklets from atomic contexts is
844 * error prone and should be avoided.
845 */
tasklet_unlock_spin_wait(struct tasklet_struct * t)846 void tasklet_unlock_spin_wait(struct tasklet_struct *t)
847 {
848 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
849 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
850 /*
851 * Prevent a live lock when current preempted soft
852 * interrupt processing or prevents ksoftirqd from
853 * running. If the tasklet runs on a different CPU
854 * then this has no effect other than doing the BH
855 * disable/enable dance for nothing.
856 */
857 local_bh_disable();
858 local_bh_enable();
859 } else {
860 cpu_relax();
861 }
862 }
863 }
864 EXPORT_SYMBOL(tasklet_unlock_spin_wait);
865 #endif
866
tasklet_kill(struct tasklet_struct * t)867 void tasklet_kill(struct tasklet_struct *t)
868 {
869 if (in_interrupt())
870 pr_notice("Attempt to kill tasklet from interrupt\n");
871
872 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
873 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
874
875 tasklet_unlock_wait(t);
876 tasklet_clear_sched(t);
877 }
878 EXPORT_SYMBOL(tasklet_kill);
879
880 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
tasklet_unlock(struct tasklet_struct * t)881 void tasklet_unlock(struct tasklet_struct *t)
882 {
883 smp_mb__before_atomic();
884 clear_bit(TASKLET_STATE_RUN, &t->state);
885 smp_mb__after_atomic();
886 wake_up_var(&t->state);
887 }
888 EXPORT_SYMBOL_GPL(tasklet_unlock);
889
tasklet_unlock_wait(struct tasklet_struct * t)890 void tasklet_unlock_wait(struct tasklet_struct *t)
891 {
892 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
893 }
894 EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
895 #endif
896
softirq_init(void)897 void __init softirq_init(void)
898 {
899 int cpu;
900
901 for_each_possible_cpu(cpu) {
902 per_cpu(tasklet_vec, cpu).tail =
903 &per_cpu(tasklet_vec, cpu).head;
904 per_cpu(tasklet_hi_vec, cpu).tail =
905 &per_cpu(tasklet_hi_vec, cpu).head;
906 }
907
908 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
909 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
910 }
911
ksoftirqd_should_run(unsigned int cpu)912 static int ksoftirqd_should_run(unsigned int cpu)
913 {
914 return local_softirq_pending();
915 }
916
run_ksoftirqd(unsigned int cpu)917 static void run_ksoftirqd(unsigned int cpu)
918 {
919 ksoftirqd_run_begin();
920 if (local_softirq_pending()) {
921 /*
922 * We can safely run softirq on inline stack, as we are not deep
923 * in the task stack here.
924 */
925 handle_softirqs(true);
926 ksoftirqd_run_end();
927 cond_resched();
928 return;
929 }
930 ksoftirqd_run_end();
931 }
932
933 #ifdef CONFIG_HOTPLUG_CPU
takeover_tasklets(unsigned int cpu)934 static int takeover_tasklets(unsigned int cpu)
935 {
936 /* CPU is dead, so no lock needed. */
937 local_irq_disable();
938
939 /* Find end, append list for that CPU. */
940 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
941 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
942 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
943 per_cpu(tasklet_vec, cpu).head = NULL;
944 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
945 }
946 raise_softirq_irqoff(TASKLET_SOFTIRQ);
947
948 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
949 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
950 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
951 per_cpu(tasklet_hi_vec, cpu).head = NULL;
952 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
953 }
954 raise_softirq_irqoff(HI_SOFTIRQ);
955
956 local_irq_enable();
957 return 0;
958 }
959 #else
960 #define takeover_tasklets NULL
961 #endif /* CONFIG_HOTPLUG_CPU */
962
963 static struct smp_hotplug_thread softirq_threads = {
964 .store = &ksoftirqd,
965 .thread_should_run = ksoftirqd_should_run,
966 .thread_fn = run_ksoftirqd,
967 .thread_comm = "ksoftirqd/%u",
968 };
969
spawn_ksoftirqd(void)970 static __init int spawn_ksoftirqd(void)
971 {
972 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
973 takeover_tasklets);
974 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
975
976 return 0;
977 }
978 early_initcall(spawn_ksoftirqd);
979
980 /*
981 * [ These __weak aliases are kept in a separate compilation unit, so that
982 * GCC does not inline them incorrectly. ]
983 */
984
early_irq_init(void)985 int __init __weak early_irq_init(void)
986 {
987 return 0;
988 }
989
arch_probe_nr_irqs(void)990 int __init __weak arch_probe_nr_irqs(void)
991 {
992 return NR_IRQS_LEGACY;
993 }
994
arch_early_irq_init(void)995 int __init __weak arch_early_irq_init(void)
996 {
997 return 0;
998 }
999
arch_dynirq_lower_bound(unsigned int from)1000 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1001 {
1002 return from;
1003 }
1004