xref: /openbmc/linux/kernel/softirq.c (revision 2208f39c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	linux/kernel/softirq.c
4  *
5  *	Copyright (C) 1992 Linus Torvalds
6  *
7  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/notifier.h>
18 #include <linux/percpu.h>
19 #include <linux/cpu.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ftrace.h>
24 #include <linux/smp.h>
25 #include <linux/smpboot.h>
26 #include <linux/tick.h>
27 #include <linux/irq.h>
28 
29 #define CREATE_TRACE_POINTS
30 #include <trace/events/irq.h>
31 
32 /*
33    - No shared variables, all the data are CPU local.
34    - If a softirq needs serialization, let it serialize itself
35      by its own spinlocks.
36    - Even if softirq is serialized, only local cpu is marked for
37      execution. Hence, we get something sort of weak cpu binding.
38      Though it is still not clear, will it result in better locality
39      or will not.
40 
41    Examples:
42    - NET RX softirq. It is multithreaded and does not require
43      any global serialization.
44    - NET TX softirq. It kicks software netdevice queues, hence
45      it is logically serialized per device, but this serialization
46      is invisible to common code.
47    - Tasklets: serialized wrt itself.
48  */
49 
50 #ifndef __ARCH_IRQ_STAT
51 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52 EXPORT_PER_CPU_SYMBOL(irq_stat);
53 #endif
54 
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56 
57 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58 
59 const char * const softirq_to_name[NR_SOFTIRQS] = {
60 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
61 	"TASKLET", "SCHED", "HRTIMER", "RCU"
62 };
63 
64 /*
65  * we cannot loop indefinitely here to avoid userspace starvation,
66  * but we also don't want to introduce a worst case 1/HZ latency
67  * to the pending events, so lets the scheduler to balance
68  * the softirq load for us.
69  */
70 static void wakeup_softirqd(void)
71 {
72 	/* Interrupts are disabled: no need to stop preemption */
73 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74 
75 	if (tsk && tsk->state != TASK_RUNNING)
76 		wake_up_process(tsk);
77 }
78 
79 /*
80  * If ksoftirqd is scheduled, we do not want to process pending softirqs
81  * right now. Let ksoftirqd handle this at its own rate, to get fairness,
82  * unless we're doing some of the synchronous softirqs.
83  */
84 #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85 static bool ksoftirqd_running(unsigned long pending)
86 {
87 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
88 
89 	if (pending & SOFTIRQ_NOW_MASK)
90 		return false;
91 	return tsk && (tsk->state == TASK_RUNNING) &&
92 		!__kthread_should_park(tsk);
93 }
94 
95 /*
96  * preempt_count and SOFTIRQ_OFFSET usage:
97  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
98  *   softirq processing.
99  * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100  *   on local_bh_disable or local_bh_enable.
101  * This lets us distinguish between whether we are currently processing
102  * softirq and whether we just have bh disabled.
103  */
104 
105 /*
106  * This one is for softirq.c-internal use,
107  * where hardirqs are disabled legitimately:
108  */
109 #ifdef CONFIG_TRACE_IRQFLAGS
110 
111 DEFINE_PER_CPU(int, hardirqs_enabled);
112 DEFINE_PER_CPU(int, hardirq_context);
113 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
114 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
115 
116 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
117 {
118 	unsigned long flags;
119 
120 	WARN_ON_ONCE(in_irq());
121 
122 	raw_local_irq_save(flags);
123 	/*
124 	 * The preempt tracer hooks into preempt_count_add and will break
125 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
126 	 * is set and before current->softirq_enabled is cleared.
127 	 * We must manually increment preempt_count here and manually
128 	 * call the trace_preempt_off later.
129 	 */
130 	__preempt_count_add(cnt);
131 	/*
132 	 * Were softirqs turned off above:
133 	 */
134 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
135 		lockdep_softirqs_off(ip);
136 	raw_local_irq_restore(flags);
137 
138 	if (preempt_count() == cnt) {
139 #ifdef CONFIG_DEBUG_PREEMPT
140 		current->preempt_disable_ip = get_lock_parent_ip();
141 #endif
142 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
143 	}
144 }
145 EXPORT_SYMBOL(__local_bh_disable_ip);
146 #endif /* CONFIG_TRACE_IRQFLAGS */
147 
148 static void __local_bh_enable(unsigned int cnt)
149 {
150 	lockdep_assert_irqs_disabled();
151 
152 	if (preempt_count() == cnt)
153 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
154 
155 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
156 		lockdep_softirqs_on(_RET_IP_);
157 
158 	__preempt_count_sub(cnt);
159 }
160 
161 /*
162  * Special-case - softirqs can safely be enabled by __do_softirq(),
163  * without processing still-pending softirqs:
164  */
165 void _local_bh_enable(void)
166 {
167 	WARN_ON_ONCE(in_irq());
168 	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
169 }
170 EXPORT_SYMBOL(_local_bh_enable);
171 
172 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
173 {
174 	WARN_ON_ONCE(in_irq());
175 	lockdep_assert_irqs_enabled();
176 #ifdef CONFIG_TRACE_IRQFLAGS
177 	local_irq_disable();
178 #endif
179 	/*
180 	 * Are softirqs going to be turned on now:
181 	 */
182 	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
183 		lockdep_softirqs_on(ip);
184 	/*
185 	 * Keep preemption disabled until we are done with
186 	 * softirq processing:
187 	 */
188 	preempt_count_sub(cnt - 1);
189 
190 	if (unlikely(!in_interrupt() && local_softirq_pending())) {
191 		/*
192 		 * Run softirq if any pending. And do it in its own stack
193 		 * as we may be calling this deep in a task call stack already.
194 		 */
195 		do_softirq();
196 	}
197 
198 	preempt_count_dec();
199 #ifdef CONFIG_TRACE_IRQFLAGS
200 	local_irq_enable();
201 #endif
202 	preempt_check_resched();
203 }
204 EXPORT_SYMBOL(__local_bh_enable_ip);
205 
206 /*
207  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
208  * but break the loop if need_resched() is set or after 2 ms.
209  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
210  * certain cases, such as stop_machine(), jiffies may cease to
211  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
212  * well to make sure we eventually return from this method.
213  *
214  * These limits have been established via experimentation.
215  * The two things to balance is latency against fairness -
216  * we want to handle softirqs as soon as possible, but they
217  * should not be able to lock up the box.
218  */
219 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
220 #define MAX_SOFTIRQ_RESTART 10
221 
222 #ifdef CONFIG_TRACE_IRQFLAGS
223 /*
224  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
225  * to keep the lockdep irq context tracking as tight as possible in order to
226  * not miss-qualify lock contexts and miss possible deadlocks.
227  */
228 
229 static inline bool lockdep_softirq_start(void)
230 {
231 	bool in_hardirq = false;
232 
233 	if (lockdep_hardirq_context()) {
234 		in_hardirq = true;
235 		lockdep_hardirq_exit();
236 	}
237 
238 	lockdep_softirq_enter();
239 
240 	return in_hardirq;
241 }
242 
243 static inline void lockdep_softirq_end(bool in_hardirq)
244 {
245 	lockdep_softirq_exit();
246 
247 	if (in_hardirq)
248 		lockdep_hardirq_enter();
249 }
250 #else
251 static inline bool lockdep_softirq_start(void) { return false; }
252 static inline void lockdep_softirq_end(bool in_hardirq) { }
253 #endif
254 
255 asmlinkage __visible void __softirq_entry __do_softirq(void)
256 {
257 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
258 	unsigned long old_flags = current->flags;
259 	int max_restart = MAX_SOFTIRQ_RESTART;
260 	struct softirq_action *h;
261 	bool in_hardirq;
262 	__u32 pending;
263 	int softirq_bit;
264 
265 	/*
266 	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
267 	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
268 	 * again if the socket is related to swapping.
269 	 */
270 	current->flags &= ~PF_MEMALLOC;
271 
272 	pending = local_softirq_pending();
273 	account_irq_enter_time(current);
274 
275 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
276 	in_hardirq = lockdep_softirq_start();
277 
278 restart:
279 	/* Reset the pending bitmask before enabling irqs */
280 	set_softirq_pending(0);
281 
282 	local_irq_enable();
283 
284 	h = softirq_vec;
285 
286 	while ((softirq_bit = ffs(pending))) {
287 		unsigned int vec_nr;
288 		int prev_count;
289 
290 		h += softirq_bit - 1;
291 
292 		vec_nr = h - softirq_vec;
293 		prev_count = preempt_count();
294 
295 		kstat_incr_softirqs_this_cpu(vec_nr);
296 
297 		trace_softirq_entry(vec_nr);
298 		h->action(h);
299 		trace_softirq_exit(vec_nr);
300 		if (unlikely(prev_count != preempt_count())) {
301 			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
302 			       vec_nr, softirq_to_name[vec_nr], h->action,
303 			       prev_count, preempt_count());
304 			preempt_count_set(prev_count);
305 		}
306 		h++;
307 		pending >>= softirq_bit;
308 	}
309 
310 	if (__this_cpu_read(ksoftirqd) == current)
311 		rcu_softirq_qs();
312 	local_irq_disable();
313 
314 	pending = local_softirq_pending();
315 	if (pending) {
316 		if (time_before(jiffies, end) && !need_resched() &&
317 		    --max_restart)
318 			goto restart;
319 
320 		wakeup_softirqd();
321 	}
322 
323 	lockdep_softirq_end(in_hardirq);
324 	account_irq_exit_time(current);
325 	__local_bh_enable(SOFTIRQ_OFFSET);
326 	WARN_ON_ONCE(in_interrupt());
327 	current_restore_flags(old_flags, PF_MEMALLOC);
328 }
329 
330 asmlinkage __visible void do_softirq(void)
331 {
332 	__u32 pending;
333 	unsigned long flags;
334 
335 	if (in_interrupt())
336 		return;
337 
338 	local_irq_save(flags);
339 
340 	pending = local_softirq_pending();
341 
342 	if (pending && !ksoftirqd_running(pending))
343 		do_softirq_own_stack();
344 
345 	local_irq_restore(flags);
346 }
347 
348 /**
349  * irq_enter_rcu - Enter an interrupt context with RCU watching
350  */
351 void irq_enter_rcu(void)
352 {
353 	if (is_idle_task(current) && !in_interrupt()) {
354 		/*
355 		 * Prevent raise_softirq from needlessly waking up ksoftirqd
356 		 * here, as softirq will be serviced on return from interrupt.
357 		 */
358 		local_bh_disable();
359 		tick_irq_enter();
360 		_local_bh_enable();
361 	}
362 	__irq_enter();
363 }
364 
365 /**
366  * irq_enter - Enter an interrupt context including RCU update
367  */
368 void irq_enter(void)
369 {
370 	rcu_irq_enter();
371 	irq_enter_rcu();
372 }
373 
374 static inline void invoke_softirq(void)
375 {
376 	if (ksoftirqd_running(local_softirq_pending()))
377 		return;
378 
379 	if (!force_irqthreads) {
380 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
381 		/*
382 		 * We can safely execute softirq on the current stack if
383 		 * it is the irq stack, because it should be near empty
384 		 * at this stage.
385 		 */
386 		__do_softirq();
387 #else
388 		/*
389 		 * Otherwise, irq_exit() is called on the task stack that can
390 		 * be potentially deep already. So call softirq in its own stack
391 		 * to prevent from any overrun.
392 		 */
393 		do_softirq_own_stack();
394 #endif
395 	} else {
396 		wakeup_softirqd();
397 	}
398 }
399 
400 static inline void tick_irq_exit(void)
401 {
402 #ifdef CONFIG_NO_HZ_COMMON
403 	int cpu = smp_processor_id();
404 
405 	/* Make sure that timer wheel updates are propagated */
406 	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
407 		if (!in_irq())
408 			tick_nohz_irq_exit();
409 	}
410 #endif
411 }
412 
413 static inline void __irq_exit_rcu(void)
414 {
415 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
416 	local_irq_disable();
417 #else
418 	lockdep_assert_irqs_disabled();
419 #endif
420 	account_irq_exit_time(current);
421 	preempt_count_sub(HARDIRQ_OFFSET);
422 	if (!in_interrupt() && local_softirq_pending())
423 		invoke_softirq();
424 
425 	tick_irq_exit();
426 }
427 
428 /**
429  * irq_exit_rcu() - Exit an interrupt context without updating RCU
430  *
431  * Also processes softirqs if needed and possible.
432  */
433 void irq_exit_rcu(void)
434 {
435 	__irq_exit_rcu();
436 	 /* must be last! */
437 	lockdep_hardirq_exit();
438 }
439 
440 /**
441  * irq_exit - Exit an interrupt context, update RCU and lockdep
442  *
443  * Also processes softirqs if needed and possible.
444  */
445 void irq_exit(void)
446 {
447 	__irq_exit_rcu();
448 	rcu_irq_exit();
449 	 /* must be last! */
450 	lockdep_hardirq_exit();
451 }
452 
453 /*
454  * This function must run with irqs disabled!
455  */
456 inline void raise_softirq_irqoff(unsigned int nr)
457 {
458 	__raise_softirq_irqoff(nr);
459 
460 	/*
461 	 * If we're in an interrupt or softirq, we're done
462 	 * (this also catches softirq-disabled code). We will
463 	 * actually run the softirq once we return from
464 	 * the irq or softirq.
465 	 *
466 	 * Otherwise we wake up ksoftirqd to make sure we
467 	 * schedule the softirq soon.
468 	 */
469 	if (!in_interrupt())
470 		wakeup_softirqd();
471 }
472 
473 void raise_softirq(unsigned int nr)
474 {
475 	unsigned long flags;
476 
477 	local_irq_save(flags);
478 	raise_softirq_irqoff(nr);
479 	local_irq_restore(flags);
480 }
481 
482 void __raise_softirq_irqoff(unsigned int nr)
483 {
484 	lockdep_assert_irqs_disabled();
485 	trace_softirq_raise(nr);
486 	or_softirq_pending(1UL << nr);
487 }
488 
489 void open_softirq(int nr, void (*action)(struct softirq_action *))
490 {
491 	softirq_vec[nr].action = action;
492 }
493 
494 /*
495  * Tasklets
496  */
497 struct tasklet_head {
498 	struct tasklet_struct *head;
499 	struct tasklet_struct **tail;
500 };
501 
502 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
503 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
504 
505 static void __tasklet_schedule_common(struct tasklet_struct *t,
506 				      struct tasklet_head __percpu *headp,
507 				      unsigned int softirq_nr)
508 {
509 	struct tasklet_head *head;
510 	unsigned long flags;
511 
512 	local_irq_save(flags);
513 	head = this_cpu_ptr(headp);
514 	t->next = NULL;
515 	*head->tail = t;
516 	head->tail = &(t->next);
517 	raise_softirq_irqoff(softirq_nr);
518 	local_irq_restore(flags);
519 }
520 
521 void __tasklet_schedule(struct tasklet_struct *t)
522 {
523 	__tasklet_schedule_common(t, &tasklet_vec,
524 				  TASKLET_SOFTIRQ);
525 }
526 EXPORT_SYMBOL(__tasklet_schedule);
527 
528 void __tasklet_hi_schedule(struct tasklet_struct *t)
529 {
530 	__tasklet_schedule_common(t, &tasklet_hi_vec,
531 				  HI_SOFTIRQ);
532 }
533 EXPORT_SYMBOL(__tasklet_hi_schedule);
534 
535 static void tasklet_action_common(struct softirq_action *a,
536 				  struct tasklet_head *tl_head,
537 				  unsigned int softirq_nr)
538 {
539 	struct tasklet_struct *list;
540 
541 	local_irq_disable();
542 	list = tl_head->head;
543 	tl_head->head = NULL;
544 	tl_head->tail = &tl_head->head;
545 	local_irq_enable();
546 
547 	while (list) {
548 		struct tasklet_struct *t = list;
549 
550 		list = list->next;
551 
552 		if (tasklet_trylock(t)) {
553 			if (!atomic_read(&t->count)) {
554 				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
555 							&t->state))
556 					BUG();
557 				if (t->use_callback)
558 					t->callback(t);
559 				else
560 					t->func(t->data);
561 				tasklet_unlock(t);
562 				continue;
563 			}
564 			tasklet_unlock(t);
565 		}
566 
567 		local_irq_disable();
568 		t->next = NULL;
569 		*tl_head->tail = t;
570 		tl_head->tail = &t->next;
571 		__raise_softirq_irqoff(softirq_nr);
572 		local_irq_enable();
573 	}
574 }
575 
576 static __latent_entropy void tasklet_action(struct softirq_action *a)
577 {
578 	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
579 }
580 
581 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
582 {
583 	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
584 }
585 
586 void tasklet_setup(struct tasklet_struct *t,
587 		   void (*callback)(struct tasklet_struct *))
588 {
589 	t->next = NULL;
590 	t->state = 0;
591 	atomic_set(&t->count, 0);
592 	t->callback = callback;
593 	t->use_callback = true;
594 	t->data = 0;
595 }
596 EXPORT_SYMBOL(tasklet_setup);
597 
598 void tasklet_init(struct tasklet_struct *t,
599 		  void (*func)(unsigned long), unsigned long data)
600 {
601 	t->next = NULL;
602 	t->state = 0;
603 	atomic_set(&t->count, 0);
604 	t->func = func;
605 	t->use_callback = false;
606 	t->data = data;
607 }
608 EXPORT_SYMBOL(tasklet_init);
609 
610 void tasklet_kill(struct tasklet_struct *t)
611 {
612 	if (in_interrupt())
613 		pr_notice("Attempt to kill tasklet from interrupt\n");
614 
615 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
616 		do {
617 			yield();
618 		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
619 	}
620 	tasklet_unlock_wait(t);
621 	clear_bit(TASKLET_STATE_SCHED, &t->state);
622 }
623 EXPORT_SYMBOL(tasklet_kill);
624 
625 void __init softirq_init(void)
626 {
627 	int cpu;
628 
629 	for_each_possible_cpu(cpu) {
630 		per_cpu(tasklet_vec, cpu).tail =
631 			&per_cpu(tasklet_vec, cpu).head;
632 		per_cpu(tasklet_hi_vec, cpu).tail =
633 			&per_cpu(tasklet_hi_vec, cpu).head;
634 	}
635 
636 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
637 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
638 }
639 
640 static int ksoftirqd_should_run(unsigned int cpu)
641 {
642 	return local_softirq_pending();
643 }
644 
645 static void run_ksoftirqd(unsigned int cpu)
646 {
647 	local_irq_disable();
648 	if (local_softirq_pending()) {
649 		/*
650 		 * We can safely run softirq on inline stack, as we are not deep
651 		 * in the task stack here.
652 		 */
653 		__do_softirq();
654 		local_irq_enable();
655 		cond_resched();
656 		return;
657 	}
658 	local_irq_enable();
659 }
660 
661 #ifdef CONFIG_HOTPLUG_CPU
662 /*
663  * tasklet_kill_immediate is called to remove a tasklet which can already be
664  * scheduled for execution on @cpu.
665  *
666  * Unlike tasklet_kill, this function removes the tasklet
667  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
668  *
669  * When this function is called, @cpu must be in the CPU_DEAD state.
670  */
671 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
672 {
673 	struct tasklet_struct **i;
674 
675 	BUG_ON(cpu_online(cpu));
676 	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
677 
678 	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
679 		return;
680 
681 	/* CPU is dead, so no lock needed. */
682 	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
683 		if (*i == t) {
684 			*i = t->next;
685 			/* If this was the tail element, move the tail ptr */
686 			if (*i == NULL)
687 				per_cpu(tasklet_vec, cpu).tail = i;
688 			return;
689 		}
690 	}
691 	BUG();
692 }
693 
694 static int takeover_tasklets(unsigned int cpu)
695 {
696 	/* CPU is dead, so no lock needed. */
697 	local_irq_disable();
698 
699 	/* Find end, append list for that CPU. */
700 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
701 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
702 		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
703 		per_cpu(tasklet_vec, cpu).head = NULL;
704 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
705 	}
706 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
707 
708 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
709 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
710 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
711 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
712 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
713 	}
714 	raise_softirq_irqoff(HI_SOFTIRQ);
715 
716 	local_irq_enable();
717 	return 0;
718 }
719 #else
720 #define takeover_tasklets	NULL
721 #endif /* CONFIG_HOTPLUG_CPU */
722 
723 static struct smp_hotplug_thread softirq_threads = {
724 	.store			= &ksoftirqd,
725 	.thread_should_run	= ksoftirqd_should_run,
726 	.thread_fn		= run_ksoftirqd,
727 	.thread_comm		= "ksoftirqd/%u",
728 };
729 
730 static __init int spawn_ksoftirqd(void)
731 {
732 	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
733 				  takeover_tasklets);
734 	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
735 
736 	return 0;
737 }
738 early_initcall(spawn_ksoftirqd);
739 
740 /*
741  * [ These __weak aliases are kept in a separate compilation unit, so that
742  *   GCC does not inline them incorrectly. ]
743  */
744 
745 int __init __weak early_irq_init(void)
746 {
747 	return 0;
748 }
749 
750 int __init __weak arch_probe_nr_irqs(void)
751 {
752 	return NR_IRQS_LEGACY;
753 }
754 
755 int __init __weak arch_early_irq_init(void)
756 {
757 	return 0;
758 }
759 
760 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
761 {
762 	return from;
763 }
764