1 /* 2 * linux/kernel/softirq.c 3 * 4 * Copyright (C) 1992 Linus Torvalds 5 * 6 * Distribute under GPLv2. 7 * 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/export.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/interrupt.h> 16 #include <linux/init.h> 17 #include <linux/mm.h> 18 #include <linux/notifier.h> 19 #include <linux/percpu.h> 20 #include <linux/cpu.h> 21 #include <linux/freezer.h> 22 #include <linux/kthread.h> 23 #include <linux/rcupdate.h> 24 #include <linux/ftrace.h> 25 #include <linux/smp.h> 26 #include <linux/smpboot.h> 27 #include <linux/tick.h> 28 #include <linux/irq.h> 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/irq.h> 32 33 /* 34 - No shared variables, all the data are CPU local. 35 - If a softirq needs serialization, let it serialize itself 36 by its own spinlocks. 37 - Even if softirq is serialized, only local cpu is marked for 38 execution. Hence, we get something sort of weak cpu binding. 39 Though it is still not clear, will it result in better locality 40 or will not. 41 42 Examples: 43 - NET RX softirq. It is multithreaded and does not require 44 any global serialization. 45 - NET TX softirq. It kicks software netdevice queues, hence 46 it is logically serialized per device, but this serialization 47 is invisible to common code. 48 - Tasklets: serialized wrt itself. 49 */ 50 51 #ifndef __ARCH_IRQ_STAT 52 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; 53 EXPORT_SYMBOL(irq_stat); 54 #endif 55 56 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; 57 58 DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 59 60 const char * const softirq_to_name[NR_SOFTIRQS] = { 61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", 62 "TASKLET", "SCHED", "HRTIMER", "RCU" 63 }; 64 65 /* 66 * we cannot loop indefinitely here to avoid userspace starvation, 67 * but we also don't want to introduce a worst case 1/HZ latency 68 * to the pending events, so lets the scheduler to balance 69 * the softirq load for us. 70 */ 71 static void wakeup_softirqd(void) 72 { 73 /* Interrupts are disabled: no need to stop preemption */ 74 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 75 76 if (tsk && tsk->state != TASK_RUNNING) 77 wake_up_process(tsk); 78 } 79 80 /* 81 * If ksoftirqd is scheduled, we do not want to process pending softirqs 82 * right now. Let ksoftirqd handle this at its own rate, to get fairness. 83 */ 84 static bool ksoftirqd_running(void) 85 { 86 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 87 88 return tsk && (tsk->state == TASK_RUNNING); 89 } 90 91 /* 92 * preempt_count and SOFTIRQ_OFFSET usage: 93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving 94 * softirq processing. 95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) 96 * on local_bh_disable or local_bh_enable. 97 * This lets us distinguish between whether we are currently processing 98 * softirq and whether we just have bh disabled. 99 */ 100 101 /* 102 * This one is for softirq.c-internal use, 103 * where hardirqs are disabled legitimately: 104 */ 105 #ifdef CONFIG_TRACE_IRQFLAGS 106 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) 107 { 108 unsigned long flags; 109 110 WARN_ON_ONCE(in_irq()); 111 112 raw_local_irq_save(flags); 113 /* 114 * The preempt tracer hooks into preempt_count_add and will break 115 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET 116 * is set and before current->softirq_enabled is cleared. 117 * We must manually increment preempt_count here and manually 118 * call the trace_preempt_off later. 119 */ 120 __preempt_count_add(cnt); 121 /* 122 * Were softirqs turned off above: 123 */ 124 if (softirq_count() == (cnt & SOFTIRQ_MASK)) 125 trace_softirqs_off(ip); 126 raw_local_irq_restore(flags); 127 128 if (preempt_count() == cnt) { 129 #ifdef CONFIG_DEBUG_PREEMPT 130 current->preempt_disable_ip = get_lock_parent_ip(); 131 #endif 132 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); 133 } 134 } 135 EXPORT_SYMBOL(__local_bh_disable_ip); 136 #endif /* CONFIG_TRACE_IRQFLAGS */ 137 138 static void __local_bh_enable(unsigned int cnt) 139 { 140 lockdep_assert_irqs_disabled(); 141 142 if (softirq_count() == (cnt & SOFTIRQ_MASK)) 143 trace_softirqs_on(_RET_IP_); 144 preempt_count_sub(cnt); 145 } 146 147 /* 148 * Special-case - softirqs can safely be enabled in 149 * cond_resched_softirq(), or by __do_softirq(), 150 * without processing still-pending softirqs: 151 */ 152 void _local_bh_enable(void) 153 { 154 WARN_ON_ONCE(in_irq()); 155 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); 156 } 157 EXPORT_SYMBOL(_local_bh_enable); 158 159 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) 160 { 161 WARN_ON_ONCE(in_irq()); 162 lockdep_assert_irqs_enabled(); 163 #ifdef CONFIG_TRACE_IRQFLAGS 164 local_irq_disable(); 165 #endif 166 /* 167 * Are softirqs going to be turned on now: 168 */ 169 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) 170 trace_softirqs_on(ip); 171 /* 172 * Keep preemption disabled until we are done with 173 * softirq processing: 174 */ 175 preempt_count_sub(cnt - 1); 176 177 if (unlikely(!in_interrupt() && local_softirq_pending())) { 178 /* 179 * Run softirq if any pending. And do it in its own stack 180 * as we may be calling this deep in a task call stack already. 181 */ 182 do_softirq(); 183 } 184 185 preempt_count_dec(); 186 #ifdef CONFIG_TRACE_IRQFLAGS 187 local_irq_enable(); 188 #endif 189 preempt_check_resched(); 190 } 191 EXPORT_SYMBOL(__local_bh_enable_ip); 192 193 /* 194 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, 195 * but break the loop if need_resched() is set or after 2 ms. 196 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in 197 * certain cases, such as stop_machine(), jiffies may cease to 198 * increment and so we need the MAX_SOFTIRQ_RESTART limit as 199 * well to make sure we eventually return from this method. 200 * 201 * These limits have been established via experimentation. 202 * The two things to balance is latency against fairness - 203 * we want to handle softirqs as soon as possible, but they 204 * should not be able to lock up the box. 205 */ 206 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) 207 #define MAX_SOFTIRQ_RESTART 10 208 209 #ifdef CONFIG_TRACE_IRQFLAGS 210 /* 211 * When we run softirqs from irq_exit() and thus on the hardirq stack we need 212 * to keep the lockdep irq context tracking as tight as possible in order to 213 * not miss-qualify lock contexts and miss possible deadlocks. 214 */ 215 216 static inline bool lockdep_softirq_start(void) 217 { 218 bool in_hardirq = false; 219 220 if (trace_hardirq_context(current)) { 221 in_hardirq = true; 222 trace_hardirq_exit(); 223 } 224 225 lockdep_softirq_enter(); 226 227 return in_hardirq; 228 } 229 230 static inline void lockdep_softirq_end(bool in_hardirq) 231 { 232 lockdep_softirq_exit(); 233 234 if (in_hardirq) 235 trace_hardirq_enter(); 236 } 237 #else 238 static inline bool lockdep_softirq_start(void) { return false; } 239 static inline void lockdep_softirq_end(bool in_hardirq) { } 240 #endif 241 242 asmlinkage __visible void __softirq_entry __do_softirq(void) 243 { 244 unsigned long end = jiffies + MAX_SOFTIRQ_TIME; 245 unsigned long old_flags = current->flags; 246 int max_restart = MAX_SOFTIRQ_RESTART; 247 struct softirq_action *h; 248 bool in_hardirq; 249 __u32 pending; 250 int softirq_bit; 251 252 /* 253 * Mask out PF_MEMALLOC s current task context is borrowed for the 254 * softirq. A softirq handled such as network RX might set PF_MEMALLOC 255 * again if the socket is related to swap 256 */ 257 current->flags &= ~PF_MEMALLOC; 258 259 pending = local_softirq_pending(); 260 account_irq_enter_time(current); 261 262 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); 263 in_hardirq = lockdep_softirq_start(); 264 265 restart: 266 /* Reset the pending bitmask before enabling irqs */ 267 set_softirq_pending(0); 268 269 local_irq_enable(); 270 271 h = softirq_vec; 272 273 while ((softirq_bit = ffs(pending))) { 274 unsigned int vec_nr; 275 int prev_count; 276 277 h += softirq_bit - 1; 278 279 vec_nr = h - softirq_vec; 280 prev_count = preempt_count(); 281 282 kstat_incr_softirqs_this_cpu(vec_nr); 283 284 trace_softirq_entry(vec_nr); 285 h->action(h); 286 trace_softirq_exit(vec_nr); 287 if (unlikely(prev_count != preempt_count())) { 288 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", 289 vec_nr, softirq_to_name[vec_nr], h->action, 290 prev_count, preempt_count()); 291 preempt_count_set(prev_count); 292 } 293 h++; 294 pending >>= softirq_bit; 295 } 296 297 rcu_bh_qs(); 298 local_irq_disable(); 299 300 pending = local_softirq_pending(); 301 if (pending) { 302 if (time_before(jiffies, end) && !need_resched() && 303 --max_restart) 304 goto restart; 305 306 wakeup_softirqd(); 307 } 308 309 lockdep_softirq_end(in_hardirq); 310 account_irq_exit_time(current); 311 __local_bh_enable(SOFTIRQ_OFFSET); 312 WARN_ON_ONCE(in_interrupt()); 313 current_restore_flags(old_flags, PF_MEMALLOC); 314 } 315 316 asmlinkage __visible void do_softirq(void) 317 { 318 __u32 pending; 319 unsigned long flags; 320 321 if (in_interrupt()) 322 return; 323 324 local_irq_save(flags); 325 326 pending = local_softirq_pending(); 327 328 if (pending && !ksoftirqd_running()) 329 do_softirq_own_stack(); 330 331 local_irq_restore(flags); 332 } 333 334 /* 335 * Enter an interrupt context. 336 */ 337 void irq_enter(void) 338 { 339 rcu_irq_enter(); 340 if (is_idle_task(current) && !in_interrupt()) { 341 /* 342 * Prevent raise_softirq from needlessly waking up ksoftirqd 343 * here, as softirq will be serviced on return from interrupt. 344 */ 345 local_bh_disable(); 346 tick_irq_enter(); 347 _local_bh_enable(); 348 } 349 350 __irq_enter(); 351 } 352 353 static inline void invoke_softirq(void) 354 { 355 if (ksoftirqd_running()) 356 return; 357 358 if (!force_irqthreads) { 359 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK 360 /* 361 * We can safely execute softirq on the current stack if 362 * it is the irq stack, because it should be near empty 363 * at this stage. 364 */ 365 __do_softirq(); 366 #else 367 /* 368 * Otherwise, irq_exit() is called on the task stack that can 369 * be potentially deep already. So call softirq in its own stack 370 * to prevent from any overrun. 371 */ 372 do_softirq_own_stack(); 373 #endif 374 } else { 375 wakeup_softirqd(); 376 } 377 } 378 379 static inline void tick_irq_exit(void) 380 { 381 #ifdef CONFIG_NO_HZ_COMMON 382 int cpu = smp_processor_id(); 383 384 /* Make sure that timer wheel updates are propagated */ 385 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { 386 if (!in_interrupt()) 387 tick_nohz_irq_exit(); 388 } 389 #endif 390 } 391 392 /* 393 * Exit an interrupt context. Process softirqs if needed and possible: 394 */ 395 void irq_exit(void) 396 { 397 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED 398 local_irq_disable(); 399 #else 400 lockdep_assert_irqs_disabled(); 401 #endif 402 account_irq_exit_time(current); 403 preempt_count_sub(HARDIRQ_OFFSET); 404 if (!in_interrupt() && local_softirq_pending()) 405 invoke_softirq(); 406 407 tick_irq_exit(); 408 rcu_irq_exit(); 409 trace_hardirq_exit(); /* must be last! */ 410 } 411 412 /* 413 * This function must run with irqs disabled! 414 */ 415 inline void raise_softirq_irqoff(unsigned int nr) 416 { 417 __raise_softirq_irqoff(nr); 418 419 /* 420 * If we're in an interrupt or softirq, we're done 421 * (this also catches softirq-disabled code). We will 422 * actually run the softirq once we return from 423 * the irq or softirq. 424 * 425 * Otherwise we wake up ksoftirqd to make sure we 426 * schedule the softirq soon. 427 */ 428 if (!in_interrupt()) 429 wakeup_softirqd(); 430 } 431 432 void raise_softirq(unsigned int nr) 433 { 434 unsigned long flags; 435 436 local_irq_save(flags); 437 raise_softirq_irqoff(nr); 438 local_irq_restore(flags); 439 } 440 441 void __raise_softirq_irqoff(unsigned int nr) 442 { 443 trace_softirq_raise(nr); 444 or_softirq_pending(1UL << nr); 445 } 446 447 void open_softirq(int nr, void (*action)(struct softirq_action *)) 448 { 449 softirq_vec[nr].action = action; 450 } 451 452 /* 453 * Tasklets 454 */ 455 struct tasklet_head { 456 struct tasklet_struct *head; 457 struct tasklet_struct **tail; 458 }; 459 460 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 461 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 462 463 void __tasklet_schedule(struct tasklet_struct *t) 464 { 465 unsigned long flags; 466 467 local_irq_save(flags); 468 t->next = NULL; 469 *__this_cpu_read(tasklet_vec.tail) = t; 470 __this_cpu_write(tasklet_vec.tail, &(t->next)); 471 raise_softirq_irqoff(TASKLET_SOFTIRQ); 472 local_irq_restore(flags); 473 } 474 EXPORT_SYMBOL(__tasklet_schedule); 475 476 void __tasklet_hi_schedule(struct tasklet_struct *t) 477 { 478 unsigned long flags; 479 480 local_irq_save(flags); 481 t->next = NULL; 482 *__this_cpu_read(tasklet_hi_vec.tail) = t; 483 __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); 484 raise_softirq_irqoff(HI_SOFTIRQ); 485 local_irq_restore(flags); 486 } 487 EXPORT_SYMBOL(__tasklet_hi_schedule); 488 489 void __tasklet_hi_schedule_first(struct tasklet_struct *t) 490 { 491 lockdep_assert_irqs_disabled(); 492 493 t->next = __this_cpu_read(tasklet_hi_vec.head); 494 __this_cpu_write(tasklet_hi_vec.head, t); 495 __raise_softirq_irqoff(HI_SOFTIRQ); 496 } 497 EXPORT_SYMBOL(__tasklet_hi_schedule_first); 498 499 static __latent_entropy void tasklet_action(struct softirq_action *a) 500 { 501 struct tasklet_struct *list; 502 503 local_irq_disable(); 504 list = __this_cpu_read(tasklet_vec.head); 505 __this_cpu_write(tasklet_vec.head, NULL); 506 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); 507 local_irq_enable(); 508 509 while (list) { 510 struct tasklet_struct *t = list; 511 512 list = list->next; 513 514 if (tasklet_trylock(t)) { 515 if (!atomic_read(&t->count)) { 516 if (!test_and_clear_bit(TASKLET_STATE_SCHED, 517 &t->state)) 518 BUG(); 519 t->func(t->data); 520 tasklet_unlock(t); 521 continue; 522 } 523 tasklet_unlock(t); 524 } 525 526 local_irq_disable(); 527 t->next = NULL; 528 *__this_cpu_read(tasklet_vec.tail) = t; 529 __this_cpu_write(tasklet_vec.tail, &(t->next)); 530 __raise_softirq_irqoff(TASKLET_SOFTIRQ); 531 local_irq_enable(); 532 } 533 } 534 535 static __latent_entropy void tasklet_hi_action(struct softirq_action *a) 536 { 537 struct tasklet_struct *list; 538 539 local_irq_disable(); 540 list = __this_cpu_read(tasklet_hi_vec.head); 541 __this_cpu_write(tasklet_hi_vec.head, NULL); 542 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); 543 local_irq_enable(); 544 545 while (list) { 546 struct tasklet_struct *t = list; 547 548 list = list->next; 549 550 if (tasklet_trylock(t)) { 551 if (!atomic_read(&t->count)) { 552 if (!test_and_clear_bit(TASKLET_STATE_SCHED, 553 &t->state)) 554 BUG(); 555 t->func(t->data); 556 tasklet_unlock(t); 557 continue; 558 } 559 tasklet_unlock(t); 560 } 561 562 local_irq_disable(); 563 t->next = NULL; 564 *__this_cpu_read(tasklet_hi_vec.tail) = t; 565 __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); 566 __raise_softirq_irqoff(HI_SOFTIRQ); 567 local_irq_enable(); 568 } 569 } 570 571 void tasklet_init(struct tasklet_struct *t, 572 void (*func)(unsigned long), unsigned long data) 573 { 574 t->next = NULL; 575 t->state = 0; 576 atomic_set(&t->count, 0); 577 t->func = func; 578 t->data = data; 579 } 580 EXPORT_SYMBOL(tasklet_init); 581 582 void tasklet_kill(struct tasklet_struct *t) 583 { 584 if (in_interrupt()) 585 pr_notice("Attempt to kill tasklet from interrupt\n"); 586 587 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 588 do { 589 yield(); 590 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 591 } 592 tasklet_unlock_wait(t); 593 clear_bit(TASKLET_STATE_SCHED, &t->state); 594 } 595 EXPORT_SYMBOL(tasklet_kill); 596 597 /* 598 * tasklet_hrtimer 599 */ 600 601 /* 602 * The trampoline is called when the hrtimer expires. It schedules a tasklet 603 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended 604 * hrtimer callback, but from softirq context. 605 */ 606 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) 607 { 608 struct tasklet_hrtimer *ttimer = 609 container_of(timer, struct tasklet_hrtimer, timer); 610 611 tasklet_hi_schedule(&ttimer->tasklet); 612 return HRTIMER_NORESTART; 613 } 614 615 /* 616 * Helper function which calls the hrtimer callback from 617 * tasklet/softirq context 618 */ 619 static void __tasklet_hrtimer_trampoline(unsigned long data) 620 { 621 struct tasklet_hrtimer *ttimer = (void *)data; 622 enum hrtimer_restart restart; 623 624 restart = ttimer->function(&ttimer->timer); 625 if (restart != HRTIMER_NORESTART) 626 hrtimer_restart(&ttimer->timer); 627 } 628 629 /** 630 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks 631 * @ttimer: tasklet_hrtimer which is initialized 632 * @function: hrtimer callback function which gets called from softirq context 633 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) 634 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) 635 */ 636 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, 637 enum hrtimer_restart (*function)(struct hrtimer *), 638 clockid_t which_clock, enum hrtimer_mode mode) 639 { 640 hrtimer_init(&ttimer->timer, which_clock, mode); 641 ttimer->timer.function = __hrtimer_tasklet_trampoline; 642 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, 643 (unsigned long)ttimer); 644 ttimer->function = function; 645 } 646 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); 647 648 void __init softirq_init(void) 649 { 650 int cpu; 651 652 for_each_possible_cpu(cpu) { 653 per_cpu(tasklet_vec, cpu).tail = 654 &per_cpu(tasklet_vec, cpu).head; 655 per_cpu(tasklet_hi_vec, cpu).tail = 656 &per_cpu(tasklet_hi_vec, cpu).head; 657 } 658 659 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 660 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 661 } 662 663 static int ksoftirqd_should_run(unsigned int cpu) 664 { 665 return local_softirq_pending(); 666 } 667 668 static void run_ksoftirqd(unsigned int cpu) 669 { 670 local_irq_disable(); 671 if (local_softirq_pending()) { 672 /* 673 * We can safely run softirq on inline stack, as we are not deep 674 * in the task stack here. 675 */ 676 __do_softirq(); 677 local_irq_enable(); 678 cond_resched_rcu_qs(); 679 return; 680 } 681 local_irq_enable(); 682 } 683 684 #ifdef CONFIG_HOTPLUG_CPU 685 /* 686 * tasklet_kill_immediate is called to remove a tasklet which can already be 687 * scheduled for execution on @cpu. 688 * 689 * Unlike tasklet_kill, this function removes the tasklet 690 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. 691 * 692 * When this function is called, @cpu must be in the CPU_DEAD state. 693 */ 694 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) 695 { 696 struct tasklet_struct **i; 697 698 BUG_ON(cpu_online(cpu)); 699 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); 700 701 if (!test_bit(TASKLET_STATE_SCHED, &t->state)) 702 return; 703 704 /* CPU is dead, so no lock needed. */ 705 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 706 if (*i == t) { 707 *i = t->next; 708 /* If this was the tail element, move the tail ptr */ 709 if (*i == NULL) 710 per_cpu(tasklet_vec, cpu).tail = i; 711 return; 712 } 713 } 714 BUG(); 715 } 716 717 static int takeover_tasklets(unsigned int cpu) 718 { 719 /* CPU is dead, so no lock needed. */ 720 local_irq_disable(); 721 722 /* Find end, append list for that CPU. */ 723 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { 724 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; 725 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); 726 per_cpu(tasklet_vec, cpu).head = NULL; 727 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; 728 } 729 raise_softirq_irqoff(TASKLET_SOFTIRQ); 730 731 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { 732 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; 733 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); 734 per_cpu(tasklet_hi_vec, cpu).head = NULL; 735 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; 736 } 737 raise_softirq_irqoff(HI_SOFTIRQ); 738 739 local_irq_enable(); 740 return 0; 741 } 742 #else 743 #define takeover_tasklets NULL 744 #endif /* CONFIG_HOTPLUG_CPU */ 745 746 static struct smp_hotplug_thread softirq_threads = { 747 .store = &ksoftirqd, 748 .thread_should_run = ksoftirqd_should_run, 749 .thread_fn = run_ksoftirqd, 750 .thread_comm = "ksoftirqd/%u", 751 }; 752 753 static __init int spawn_ksoftirqd(void) 754 { 755 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, 756 takeover_tasklets); 757 BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); 758 759 return 0; 760 } 761 early_initcall(spawn_ksoftirqd); 762 763 /* 764 * [ These __weak aliases are kept in a separate compilation unit, so that 765 * GCC does not inline them incorrectly. ] 766 */ 767 768 int __init __weak early_irq_init(void) 769 { 770 return 0; 771 } 772 773 int __init __weak arch_probe_nr_irqs(void) 774 { 775 return NR_IRQS_LEGACY; 776 } 777 778 int __init __weak arch_early_irq_init(void) 779 { 780 return 0; 781 } 782 783 unsigned int __weak arch_dynirq_lower_bound(unsigned int from) 784 { 785 return from; 786 } 787