xref: /openbmc/linux/kernel/softirq.c (revision a16be368)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	linux/kernel/softirq.c
4  *
5  *	Copyright (C) 1992 Linus Torvalds
6  *
7  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/notifier.h>
18 #include <linux/percpu.h>
19 #include <linux/cpu.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ftrace.h>
24 #include <linux/smp.h>
25 #include <linux/smpboot.h>
26 #include <linux/tick.h>
27 #include <linux/irq.h>
28 
29 #define CREATE_TRACE_POINTS
30 #include <trace/events/irq.h>
31 
32 /*
33    - No shared variables, all the data are CPU local.
34    - If a softirq needs serialization, let it serialize itself
35      by its own spinlocks.
36    - Even if softirq is serialized, only local cpu is marked for
37      execution. Hence, we get something sort of weak cpu binding.
38      Though it is still not clear, will it result in better locality
39      or will not.
40 
41    Examples:
42    - NET RX softirq. It is multithreaded and does not require
43      any global serialization.
44    - NET TX softirq. It kicks software netdevice queues, hence
45      it is logically serialized per device, but this serialization
46      is invisible to common code.
47    - Tasklets: serialized wrt itself.
48  */
49 
50 #ifndef __ARCH_IRQ_STAT
51 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52 EXPORT_PER_CPU_SYMBOL(irq_stat);
53 #endif
54 
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56 
57 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58 
59 const char * const softirq_to_name[NR_SOFTIRQS] = {
60 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
61 	"TASKLET", "SCHED", "HRTIMER", "RCU"
62 };
63 
64 /*
65  * we cannot loop indefinitely here to avoid userspace starvation,
66  * but we also don't want to introduce a worst case 1/HZ latency
67  * to the pending events, so lets the scheduler to balance
68  * the softirq load for us.
69  */
70 static void wakeup_softirqd(void)
71 {
72 	/* Interrupts are disabled: no need to stop preemption */
73 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74 
75 	if (tsk && tsk->state != TASK_RUNNING)
76 		wake_up_process(tsk);
77 }
78 
79 /*
80  * If ksoftirqd is scheduled, we do not want to process pending softirqs
81  * right now. Let ksoftirqd handle this at its own rate, to get fairness,
82  * unless we're doing some of the synchronous softirqs.
83  */
84 #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85 static bool ksoftirqd_running(unsigned long pending)
86 {
87 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
88 
89 	if (pending & SOFTIRQ_NOW_MASK)
90 		return false;
91 	return tsk && (tsk->state == TASK_RUNNING) &&
92 		!__kthread_should_park(tsk);
93 }
94 
95 /*
96  * preempt_count and SOFTIRQ_OFFSET usage:
97  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
98  *   softirq processing.
99  * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100  *   on local_bh_disable or local_bh_enable.
101  * This lets us distinguish between whether we are currently processing
102  * softirq and whether we just have bh disabled.
103  */
104 
105 /*
106  * This one is for softirq.c-internal use,
107  * where hardirqs are disabled legitimately:
108  */
109 #ifdef CONFIG_TRACE_IRQFLAGS
110 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
111 {
112 	unsigned long flags;
113 
114 	WARN_ON_ONCE(in_irq());
115 
116 	raw_local_irq_save(flags);
117 	/*
118 	 * The preempt tracer hooks into preempt_count_add and will break
119 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
120 	 * is set and before current->softirq_enabled is cleared.
121 	 * We must manually increment preempt_count here and manually
122 	 * call the trace_preempt_off later.
123 	 */
124 	__preempt_count_add(cnt);
125 	/*
126 	 * Were softirqs turned off above:
127 	 */
128 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
129 		lockdep_softirqs_off(ip);
130 	raw_local_irq_restore(flags);
131 
132 	if (preempt_count() == cnt) {
133 #ifdef CONFIG_DEBUG_PREEMPT
134 		current->preempt_disable_ip = get_lock_parent_ip();
135 #endif
136 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
137 	}
138 }
139 EXPORT_SYMBOL(__local_bh_disable_ip);
140 #endif /* CONFIG_TRACE_IRQFLAGS */
141 
142 static void __local_bh_enable(unsigned int cnt)
143 {
144 	lockdep_assert_irqs_disabled();
145 
146 	if (preempt_count() == cnt)
147 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
148 
149 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
150 		lockdep_softirqs_on(_RET_IP_);
151 
152 	__preempt_count_sub(cnt);
153 }
154 
155 /*
156  * Special-case - softirqs can safely be enabled by __do_softirq(),
157  * without processing still-pending softirqs:
158  */
159 void _local_bh_enable(void)
160 {
161 	WARN_ON_ONCE(in_irq());
162 	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
163 }
164 EXPORT_SYMBOL(_local_bh_enable);
165 
166 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
167 {
168 	WARN_ON_ONCE(in_irq());
169 	lockdep_assert_irqs_enabled();
170 #ifdef CONFIG_TRACE_IRQFLAGS
171 	local_irq_disable();
172 #endif
173 	/*
174 	 * Are softirqs going to be turned on now:
175 	 */
176 	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
177 		lockdep_softirqs_on(ip);
178 	/*
179 	 * Keep preemption disabled until we are done with
180 	 * softirq processing:
181 	 */
182 	preempt_count_sub(cnt - 1);
183 
184 	if (unlikely(!in_interrupt() && local_softirq_pending())) {
185 		/*
186 		 * Run softirq if any pending. And do it in its own stack
187 		 * as we may be calling this deep in a task call stack already.
188 		 */
189 		do_softirq();
190 	}
191 
192 	preempt_count_dec();
193 #ifdef CONFIG_TRACE_IRQFLAGS
194 	local_irq_enable();
195 #endif
196 	preempt_check_resched();
197 }
198 EXPORT_SYMBOL(__local_bh_enable_ip);
199 
200 /*
201  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
202  * but break the loop if need_resched() is set or after 2 ms.
203  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
204  * certain cases, such as stop_machine(), jiffies may cease to
205  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
206  * well to make sure we eventually return from this method.
207  *
208  * These limits have been established via experimentation.
209  * The two things to balance is latency against fairness -
210  * we want to handle softirqs as soon as possible, but they
211  * should not be able to lock up the box.
212  */
213 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
214 #define MAX_SOFTIRQ_RESTART 10
215 
216 #ifdef CONFIG_TRACE_IRQFLAGS
217 /*
218  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
219  * to keep the lockdep irq context tracking as tight as possible in order to
220  * not miss-qualify lock contexts and miss possible deadlocks.
221  */
222 
223 static inline bool lockdep_softirq_start(void)
224 {
225 	bool in_hardirq = false;
226 
227 	if (lockdep_hardirq_context(current)) {
228 		in_hardirq = true;
229 		lockdep_hardirq_exit();
230 	}
231 
232 	lockdep_softirq_enter();
233 
234 	return in_hardirq;
235 }
236 
237 static inline void lockdep_softirq_end(bool in_hardirq)
238 {
239 	lockdep_softirq_exit();
240 
241 	if (in_hardirq)
242 		lockdep_hardirq_enter();
243 }
244 #else
245 static inline bool lockdep_softirq_start(void) { return false; }
246 static inline void lockdep_softirq_end(bool in_hardirq) { }
247 #endif
248 
249 asmlinkage __visible void __softirq_entry __do_softirq(void)
250 {
251 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
252 	unsigned long old_flags = current->flags;
253 	int max_restart = MAX_SOFTIRQ_RESTART;
254 	struct softirq_action *h;
255 	bool in_hardirq;
256 	__u32 pending;
257 	int softirq_bit;
258 
259 	/*
260 	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
261 	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
262 	 * again if the socket is related to swapping.
263 	 */
264 	current->flags &= ~PF_MEMALLOC;
265 
266 	pending = local_softirq_pending();
267 	account_irq_enter_time(current);
268 
269 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
270 	in_hardirq = lockdep_softirq_start();
271 
272 restart:
273 	/* Reset the pending bitmask before enabling irqs */
274 	set_softirq_pending(0);
275 
276 	local_irq_enable();
277 
278 	h = softirq_vec;
279 
280 	while ((softirq_bit = ffs(pending))) {
281 		unsigned int vec_nr;
282 		int prev_count;
283 
284 		h += softirq_bit - 1;
285 
286 		vec_nr = h - softirq_vec;
287 		prev_count = preempt_count();
288 
289 		kstat_incr_softirqs_this_cpu(vec_nr);
290 
291 		trace_softirq_entry(vec_nr);
292 		h->action(h);
293 		trace_softirq_exit(vec_nr);
294 		if (unlikely(prev_count != preempt_count())) {
295 			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
296 			       vec_nr, softirq_to_name[vec_nr], h->action,
297 			       prev_count, preempt_count());
298 			preempt_count_set(prev_count);
299 		}
300 		h++;
301 		pending >>= softirq_bit;
302 	}
303 
304 	if (__this_cpu_read(ksoftirqd) == current)
305 		rcu_softirq_qs();
306 	local_irq_disable();
307 
308 	pending = local_softirq_pending();
309 	if (pending) {
310 		if (time_before(jiffies, end) && !need_resched() &&
311 		    --max_restart)
312 			goto restart;
313 
314 		wakeup_softirqd();
315 	}
316 
317 	lockdep_softirq_end(in_hardirq);
318 	account_irq_exit_time(current);
319 	__local_bh_enable(SOFTIRQ_OFFSET);
320 	WARN_ON_ONCE(in_interrupt());
321 	current_restore_flags(old_flags, PF_MEMALLOC);
322 }
323 
324 asmlinkage __visible void do_softirq(void)
325 {
326 	__u32 pending;
327 	unsigned long flags;
328 
329 	if (in_interrupt())
330 		return;
331 
332 	local_irq_save(flags);
333 
334 	pending = local_softirq_pending();
335 
336 	if (pending && !ksoftirqd_running(pending))
337 		do_softirq_own_stack();
338 
339 	local_irq_restore(flags);
340 }
341 
342 /**
343  * irq_enter_rcu - Enter an interrupt context with RCU watching
344  */
345 void irq_enter_rcu(void)
346 {
347 	if (is_idle_task(current) && !in_interrupt()) {
348 		/*
349 		 * Prevent raise_softirq from needlessly waking up ksoftirqd
350 		 * here, as softirq will be serviced on return from interrupt.
351 		 */
352 		local_bh_disable();
353 		tick_irq_enter();
354 		_local_bh_enable();
355 	}
356 	__irq_enter();
357 }
358 
359 /**
360  * irq_enter - Enter an interrupt context including RCU update
361  */
362 void irq_enter(void)
363 {
364 	rcu_irq_enter();
365 	irq_enter_rcu();
366 }
367 
368 static inline void invoke_softirq(void)
369 {
370 	if (ksoftirqd_running(local_softirq_pending()))
371 		return;
372 
373 	if (!force_irqthreads) {
374 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
375 		/*
376 		 * We can safely execute softirq on the current stack if
377 		 * it is the irq stack, because it should be near empty
378 		 * at this stage.
379 		 */
380 		__do_softirq();
381 #else
382 		/*
383 		 * Otherwise, irq_exit() is called on the task stack that can
384 		 * be potentially deep already. So call softirq in its own stack
385 		 * to prevent from any overrun.
386 		 */
387 		do_softirq_own_stack();
388 #endif
389 	} else {
390 		wakeup_softirqd();
391 	}
392 }
393 
394 static inline void tick_irq_exit(void)
395 {
396 #ifdef CONFIG_NO_HZ_COMMON
397 	int cpu = smp_processor_id();
398 
399 	/* Make sure that timer wheel updates are propagated */
400 	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
401 		if (!in_irq())
402 			tick_nohz_irq_exit();
403 	}
404 #endif
405 }
406 
407 /**
408  * irq_exit_rcu() - Exit an interrupt context without updating RCU
409  *
410  * Also processes softirqs if needed and possible.
411  */
412 void irq_exit_rcu(void)
413 {
414 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
415 	local_irq_disable();
416 #else
417 	lockdep_assert_irqs_disabled();
418 #endif
419 	account_irq_exit_time(current);
420 	preempt_count_sub(HARDIRQ_OFFSET);
421 	if (!in_interrupt() && local_softirq_pending())
422 		invoke_softirq();
423 
424 	tick_irq_exit();
425 }
426 
427 /**
428  * irq_exit - Exit an interrupt context, update RCU and lockdep
429  *
430  * Also processes softirqs if needed and possible.
431  */
432 void irq_exit(void)
433 {
434 	irq_exit_rcu();
435 	rcu_irq_exit();
436 	 /* must be last! */
437 	lockdep_hardirq_exit();
438 }
439 
440 /*
441  * This function must run with irqs disabled!
442  */
443 inline void raise_softirq_irqoff(unsigned int nr)
444 {
445 	__raise_softirq_irqoff(nr);
446 
447 	/*
448 	 * If we're in an interrupt or softirq, we're done
449 	 * (this also catches softirq-disabled code). We will
450 	 * actually run the softirq once we return from
451 	 * the irq or softirq.
452 	 *
453 	 * Otherwise we wake up ksoftirqd to make sure we
454 	 * schedule the softirq soon.
455 	 */
456 	if (!in_interrupt())
457 		wakeup_softirqd();
458 }
459 
460 void raise_softirq(unsigned int nr)
461 {
462 	unsigned long flags;
463 
464 	local_irq_save(flags);
465 	raise_softirq_irqoff(nr);
466 	local_irq_restore(flags);
467 }
468 
469 void __raise_softirq_irqoff(unsigned int nr)
470 {
471 	trace_softirq_raise(nr);
472 	or_softirq_pending(1UL << nr);
473 }
474 
475 void open_softirq(int nr, void (*action)(struct softirq_action *))
476 {
477 	softirq_vec[nr].action = action;
478 }
479 
480 /*
481  * Tasklets
482  */
483 struct tasklet_head {
484 	struct tasklet_struct *head;
485 	struct tasklet_struct **tail;
486 };
487 
488 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
489 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
490 
491 static void __tasklet_schedule_common(struct tasklet_struct *t,
492 				      struct tasklet_head __percpu *headp,
493 				      unsigned int softirq_nr)
494 {
495 	struct tasklet_head *head;
496 	unsigned long flags;
497 
498 	local_irq_save(flags);
499 	head = this_cpu_ptr(headp);
500 	t->next = NULL;
501 	*head->tail = t;
502 	head->tail = &(t->next);
503 	raise_softirq_irqoff(softirq_nr);
504 	local_irq_restore(flags);
505 }
506 
507 void __tasklet_schedule(struct tasklet_struct *t)
508 {
509 	__tasklet_schedule_common(t, &tasklet_vec,
510 				  TASKLET_SOFTIRQ);
511 }
512 EXPORT_SYMBOL(__tasklet_schedule);
513 
514 void __tasklet_hi_schedule(struct tasklet_struct *t)
515 {
516 	__tasklet_schedule_common(t, &tasklet_hi_vec,
517 				  HI_SOFTIRQ);
518 }
519 EXPORT_SYMBOL(__tasklet_hi_schedule);
520 
521 static void tasklet_action_common(struct softirq_action *a,
522 				  struct tasklet_head *tl_head,
523 				  unsigned int softirq_nr)
524 {
525 	struct tasklet_struct *list;
526 
527 	local_irq_disable();
528 	list = tl_head->head;
529 	tl_head->head = NULL;
530 	tl_head->tail = &tl_head->head;
531 	local_irq_enable();
532 
533 	while (list) {
534 		struct tasklet_struct *t = list;
535 
536 		list = list->next;
537 
538 		if (tasklet_trylock(t)) {
539 			if (!atomic_read(&t->count)) {
540 				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
541 							&t->state))
542 					BUG();
543 				t->func(t->data);
544 				tasklet_unlock(t);
545 				continue;
546 			}
547 			tasklet_unlock(t);
548 		}
549 
550 		local_irq_disable();
551 		t->next = NULL;
552 		*tl_head->tail = t;
553 		tl_head->tail = &t->next;
554 		__raise_softirq_irqoff(softirq_nr);
555 		local_irq_enable();
556 	}
557 }
558 
559 static __latent_entropy void tasklet_action(struct softirq_action *a)
560 {
561 	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
562 }
563 
564 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
565 {
566 	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
567 }
568 
569 void tasklet_init(struct tasklet_struct *t,
570 		  void (*func)(unsigned long), unsigned long data)
571 {
572 	t->next = NULL;
573 	t->state = 0;
574 	atomic_set(&t->count, 0);
575 	t->func = func;
576 	t->data = data;
577 }
578 EXPORT_SYMBOL(tasklet_init);
579 
580 void tasklet_kill(struct tasklet_struct *t)
581 {
582 	if (in_interrupt())
583 		pr_notice("Attempt to kill tasklet from interrupt\n");
584 
585 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
586 		do {
587 			yield();
588 		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
589 	}
590 	tasklet_unlock_wait(t);
591 	clear_bit(TASKLET_STATE_SCHED, &t->state);
592 }
593 EXPORT_SYMBOL(tasklet_kill);
594 
595 void __init softirq_init(void)
596 {
597 	int cpu;
598 
599 	for_each_possible_cpu(cpu) {
600 		per_cpu(tasklet_vec, cpu).tail =
601 			&per_cpu(tasklet_vec, cpu).head;
602 		per_cpu(tasklet_hi_vec, cpu).tail =
603 			&per_cpu(tasklet_hi_vec, cpu).head;
604 	}
605 
606 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
607 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
608 }
609 
610 static int ksoftirqd_should_run(unsigned int cpu)
611 {
612 	return local_softirq_pending();
613 }
614 
615 static void run_ksoftirqd(unsigned int cpu)
616 {
617 	local_irq_disable();
618 	if (local_softirq_pending()) {
619 		/*
620 		 * We can safely run softirq on inline stack, as we are not deep
621 		 * in the task stack here.
622 		 */
623 		__do_softirq();
624 		local_irq_enable();
625 		cond_resched();
626 		return;
627 	}
628 	local_irq_enable();
629 }
630 
631 #ifdef CONFIG_HOTPLUG_CPU
632 /*
633  * tasklet_kill_immediate is called to remove a tasklet which can already be
634  * scheduled for execution on @cpu.
635  *
636  * Unlike tasklet_kill, this function removes the tasklet
637  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
638  *
639  * When this function is called, @cpu must be in the CPU_DEAD state.
640  */
641 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
642 {
643 	struct tasklet_struct **i;
644 
645 	BUG_ON(cpu_online(cpu));
646 	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
647 
648 	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
649 		return;
650 
651 	/* CPU is dead, so no lock needed. */
652 	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
653 		if (*i == t) {
654 			*i = t->next;
655 			/* If this was the tail element, move the tail ptr */
656 			if (*i == NULL)
657 				per_cpu(tasklet_vec, cpu).tail = i;
658 			return;
659 		}
660 	}
661 	BUG();
662 }
663 
664 static int takeover_tasklets(unsigned int cpu)
665 {
666 	/* CPU is dead, so no lock needed. */
667 	local_irq_disable();
668 
669 	/* Find end, append list for that CPU. */
670 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
671 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
672 		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
673 		per_cpu(tasklet_vec, cpu).head = NULL;
674 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
675 	}
676 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
677 
678 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
679 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
680 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
681 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
682 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
683 	}
684 	raise_softirq_irqoff(HI_SOFTIRQ);
685 
686 	local_irq_enable();
687 	return 0;
688 }
689 #else
690 #define takeover_tasklets	NULL
691 #endif /* CONFIG_HOTPLUG_CPU */
692 
693 static struct smp_hotplug_thread softirq_threads = {
694 	.store			= &ksoftirqd,
695 	.thread_should_run	= ksoftirqd_should_run,
696 	.thread_fn		= run_ksoftirqd,
697 	.thread_comm		= "ksoftirqd/%u",
698 };
699 
700 static __init int spawn_ksoftirqd(void)
701 {
702 	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
703 				  takeover_tasklets);
704 	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
705 
706 	return 0;
707 }
708 early_initcall(spawn_ksoftirqd);
709 
710 /*
711  * [ These __weak aliases are kept in a separate compilation unit, so that
712  *   GCC does not inline them incorrectly. ]
713  */
714 
715 int __init __weak early_irq_init(void)
716 {
717 	return 0;
718 }
719 
720 int __init __weak arch_probe_nr_irqs(void)
721 {
722 	return NR_IRQS_LEGACY;
723 }
724 
725 int __init __weak arch_early_irq_init(void)
726 {
727 	return 0;
728 }
729 
730 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
731 {
732 	return from;
733 }
734