1 /* 2 * linux/kernel/softirq.c 3 * 4 * Copyright (C) 1992 Linus Torvalds 5 * 6 * Distribute under GPLv2. 7 * 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 9 * 10 * Remote softirq infrastructure is by Jens Axboe. 11 */ 12 13 #include <linux/export.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/interrupt.h> 16 #include <linux/init.h> 17 #include <linux/mm.h> 18 #include <linux/notifier.h> 19 #include <linux/percpu.h> 20 #include <linux/cpu.h> 21 #include <linux/freezer.h> 22 #include <linux/kthread.h> 23 #include <linux/rcupdate.h> 24 #include <linux/ftrace.h> 25 #include <linux/smp.h> 26 #include <linux/tick.h> 27 28 #define CREATE_TRACE_POINTS 29 #include <trace/events/irq.h> 30 31 #include <asm/irq.h> 32 /* 33 - No shared variables, all the data are CPU local. 34 - If a softirq needs serialization, let it serialize itself 35 by its own spinlocks. 36 - Even if softirq is serialized, only local cpu is marked for 37 execution. Hence, we get something sort of weak cpu binding. 38 Though it is still not clear, will it result in better locality 39 or will not. 40 41 Examples: 42 - NET RX softirq. It is multithreaded and does not require 43 any global serialization. 44 - NET TX softirq. It kicks software netdevice queues, hence 45 it is logically serialized per device, but this serialization 46 is invisible to common code. 47 - Tasklets: serialized wrt itself. 48 */ 49 50 #ifndef __ARCH_IRQ_STAT 51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; 52 EXPORT_SYMBOL(irq_stat); 53 #endif 54 55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; 56 57 DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 58 59 char *softirq_to_name[NR_SOFTIRQS] = { 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 61 "TASKLET", "SCHED", "HRTIMER", "RCU" 62 }; 63 64 /* 65 * we cannot loop indefinitely here to avoid userspace starvation, 66 * but we also don't want to introduce a worst case 1/HZ latency 67 * to the pending events, so lets the scheduler to balance 68 * the softirq load for us. 69 */ 70 static void wakeup_softirqd(void) 71 { 72 /* Interrupts are disabled: no need to stop preemption */ 73 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 74 75 if (tsk && tsk->state != TASK_RUNNING) 76 wake_up_process(tsk); 77 } 78 79 /* 80 * preempt_count and SOFTIRQ_OFFSET usage: 81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving 82 * softirq processing. 83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) 84 * on local_bh_disable or local_bh_enable. 85 * This lets us distinguish between whether we are currently processing 86 * softirq and whether we just have bh disabled. 87 */ 88 89 /* 90 * This one is for softirq.c-internal use, 91 * where hardirqs are disabled legitimately: 92 */ 93 #ifdef CONFIG_TRACE_IRQFLAGS 94 static void __local_bh_disable(unsigned long ip, unsigned int cnt) 95 { 96 unsigned long flags; 97 98 WARN_ON_ONCE(in_irq()); 99 100 raw_local_irq_save(flags); 101 /* 102 * The preempt tracer hooks into add_preempt_count and will break 103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET 104 * is set and before current->softirq_enabled is cleared. 105 * We must manually increment preempt_count here and manually 106 * call the trace_preempt_off later. 107 */ 108 preempt_count() += cnt; 109 /* 110 * Were softirqs turned off above: 111 */ 112 if (softirq_count() == cnt) 113 trace_softirqs_off(ip); 114 raw_local_irq_restore(flags); 115 116 if (preempt_count() == cnt) 117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 118 } 119 #else /* !CONFIG_TRACE_IRQFLAGS */ 120 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) 121 { 122 add_preempt_count(cnt); 123 barrier(); 124 } 125 #endif /* CONFIG_TRACE_IRQFLAGS */ 126 127 void local_bh_disable(void) 128 { 129 __local_bh_disable((unsigned long)__builtin_return_address(0), 130 SOFTIRQ_DISABLE_OFFSET); 131 } 132 133 EXPORT_SYMBOL(local_bh_disable); 134 135 static void __local_bh_enable(unsigned int cnt) 136 { 137 WARN_ON_ONCE(in_irq()); 138 WARN_ON_ONCE(!irqs_disabled()); 139 140 if (softirq_count() == cnt) 141 trace_softirqs_on((unsigned long)__builtin_return_address(0)); 142 sub_preempt_count(cnt); 143 } 144 145 /* 146 * Special-case - softirqs can safely be enabled in 147 * cond_resched_softirq(), or by __do_softirq(), 148 * without processing still-pending softirqs: 149 */ 150 void _local_bh_enable(void) 151 { 152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); 153 } 154 155 EXPORT_SYMBOL(_local_bh_enable); 156 157 static inline void _local_bh_enable_ip(unsigned long ip) 158 { 159 WARN_ON_ONCE(in_irq() || irqs_disabled()); 160 #ifdef CONFIG_TRACE_IRQFLAGS 161 local_irq_disable(); 162 #endif 163 /* 164 * Are softirqs going to be turned on now: 165 */ 166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) 167 trace_softirqs_on(ip); 168 /* 169 * Keep preemption disabled until we are done with 170 * softirq processing: 171 */ 172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); 173 174 if (unlikely(!in_interrupt() && local_softirq_pending())) 175 do_softirq(); 176 177 dec_preempt_count(); 178 #ifdef CONFIG_TRACE_IRQFLAGS 179 local_irq_enable(); 180 #endif 181 preempt_check_resched(); 182 } 183 184 void local_bh_enable(void) 185 { 186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 187 } 188 EXPORT_SYMBOL(local_bh_enable); 189 190 void local_bh_enable_ip(unsigned long ip) 191 { 192 _local_bh_enable_ip(ip); 193 } 194 EXPORT_SYMBOL(local_bh_enable_ip); 195 196 /* 197 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 198 * and we fall back to softirqd after that. 199 * 200 * This number has been established via experimentation. 201 * The two things to balance is latency against fairness - 202 * we want to handle softirqs as soon as possible, but they 203 * should not be able to lock up the box. 204 */ 205 #define MAX_SOFTIRQ_RESTART 10 206 207 asmlinkage void __do_softirq(void) 208 { 209 struct softirq_action *h; 210 __u32 pending; 211 int max_restart = MAX_SOFTIRQ_RESTART; 212 int cpu; 213 214 pending = local_softirq_pending(); 215 account_system_vtime(current); 216 217 __local_bh_disable((unsigned long)__builtin_return_address(0), 218 SOFTIRQ_OFFSET); 219 lockdep_softirq_enter(); 220 221 cpu = smp_processor_id(); 222 restart: 223 /* Reset the pending bitmask before enabling irqs */ 224 set_softirq_pending(0); 225 226 local_irq_enable(); 227 228 h = softirq_vec; 229 230 do { 231 if (pending & 1) { 232 unsigned int vec_nr = h - softirq_vec; 233 int prev_count = preempt_count(); 234 235 kstat_incr_softirqs_this_cpu(vec_nr); 236 237 trace_softirq_entry(vec_nr); 238 h->action(h); 239 trace_softirq_exit(vec_nr); 240 if (unlikely(prev_count != preempt_count())) { 241 printk(KERN_ERR "huh, entered softirq %u %s %p" 242 "with preempt_count %08x," 243 " exited with %08x?\n", vec_nr, 244 softirq_to_name[vec_nr], h->action, 245 prev_count, preempt_count()); 246 preempt_count() = prev_count; 247 } 248 249 rcu_bh_qs(cpu); 250 } 251 h++; 252 pending >>= 1; 253 } while (pending); 254 255 local_irq_disable(); 256 257 pending = local_softirq_pending(); 258 if (pending && --max_restart) 259 goto restart; 260 261 if (pending) 262 wakeup_softirqd(); 263 264 lockdep_softirq_exit(); 265 266 account_system_vtime(current); 267 __local_bh_enable(SOFTIRQ_OFFSET); 268 } 269 270 #ifndef __ARCH_HAS_DO_SOFTIRQ 271 272 asmlinkage void do_softirq(void) 273 { 274 __u32 pending; 275 unsigned long flags; 276 277 if (in_interrupt()) 278 return; 279 280 local_irq_save(flags); 281 282 pending = local_softirq_pending(); 283 284 if (pending) 285 __do_softirq(); 286 287 local_irq_restore(flags); 288 } 289 290 #endif 291 292 /* 293 * Enter an interrupt context. 294 */ 295 void irq_enter(void) 296 { 297 int cpu = smp_processor_id(); 298 299 rcu_irq_enter(); 300 if (is_idle_task(current) && !in_interrupt()) { 301 /* 302 * Prevent raise_softirq from needlessly waking up ksoftirqd 303 * here, as softirq will be serviced on return from interrupt. 304 */ 305 local_bh_disable(); 306 tick_check_idle(cpu); 307 _local_bh_enable(); 308 } 309 310 __irq_enter(); 311 } 312 313 static inline void invoke_softirq(void) 314 { 315 if (!force_irqthreads) { 316 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 317 __do_softirq(); 318 #else 319 do_softirq(); 320 #endif 321 } else { 322 __local_bh_disable((unsigned long)__builtin_return_address(0), 323 SOFTIRQ_OFFSET); 324 wakeup_softirqd(); 325 __local_bh_enable(SOFTIRQ_OFFSET); 326 } 327 } 328 329 /* 330 * Exit an interrupt context. Process softirqs if needed and possible: 331 */ 332 void irq_exit(void) 333 { 334 account_system_vtime(current); 335 trace_hardirq_exit(); 336 sub_preempt_count(IRQ_EXIT_OFFSET); 337 if (!in_interrupt() && local_softirq_pending()) 338 invoke_softirq(); 339 340 #ifdef CONFIG_NO_HZ 341 /* Make sure that timer wheel updates are propagated */ 342 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) 343 tick_nohz_irq_exit(); 344 #endif 345 rcu_irq_exit(); 346 sched_preempt_enable_no_resched(); 347 } 348 349 /* 350 * This function must run with irqs disabled! 351 */ 352 inline void raise_softirq_irqoff(unsigned int nr) 353 { 354 __raise_softirq_irqoff(nr); 355 356 /* 357 * If we're in an interrupt or softirq, we're done 358 * (this also catches softirq-disabled code). We will 359 * actually run the softirq once we return from 360 * the irq or softirq. 361 * 362 * Otherwise we wake up ksoftirqd to make sure we 363 * schedule the softirq soon. 364 */ 365 if (!in_interrupt()) 366 wakeup_softirqd(); 367 } 368 369 void raise_softirq(unsigned int nr) 370 { 371 unsigned long flags; 372 373 local_irq_save(flags); 374 raise_softirq_irqoff(nr); 375 local_irq_restore(flags); 376 } 377 378 void __raise_softirq_irqoff(unsigned int nr) 379 { 380 trace_softirq_raise(nr); 381 or_softirq_pending(1UL << nr); 382 } 383 384 void open_softirq(int nr, void (*action)(struct softirq_action *)) 385 { 386 softirq_vec[nr].action = action; 387 } 388 389 /* 390 * Tasklets 391 */ 392 struct tasklet_head 393 { 394 struct tasklet_struct *head; 395 struct tasklet_struct **tail; 396 }; 397 398 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 399 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 400 401 void __tasklet_schedule(struct tasklet_struct *t) 402 { 403 unsigned long flags; 404 405 local_irq_save(flags); 406 t->next = NULL; 407 *__this_cpu_read(tasklet_vec.tail) = t; 408 __this_cpu_write(tasklet_vec.tail, &(t->next)); 409 raise_softirq_irqoff(TASKLET_SOFTIRQ); 410 local_irq_restore(flags); 411 } 412 413 EXPORT_SYMBOL(__tasklet_schedule); 414 415 void __tasklet_hi_schedule(struct tasklet_struct *t) 416 { 417 unsigned long flags; 418 419 local_irq_save(flags); 420 t->next = NULL; 421 *__this_cpu_read(tasklet_hi_vec.tail) = t; 422 __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); 423 raise_softirq_irqoff(HI_SOFTIRQ); 424 local_irq_restore(flags); 425 } 426 427 EXPORT_SYMBOL(__tasklet_hi_schedule); 428 429 void __tasklet_hi_schedule_first(struct tasklet_struct *t) 430 { 431 BUG_ON(!irqs_disabled()); 432 433 t->next = __this_cpu_read(tasklet_hi_vec.head); 434 __this_cpu_write(tasklet_hi_vec.head, t); 435 __raise_softirq_irqoff(HI_SOFTIRQ); 436 } 437 438 EXPORT_SYMBOL(__tasklet_hi_schedule_first); 439 440 static void tasklet_action(struct softirq_action *a) 441 { 442 struct tasklet_struct *list; 443 444 local_irq_disable(); 445 list = __this_cpu_read(tasklet_vec.head); 446 __this_cpu_write(tasklet_vec.head, NULL); 447 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); 448 local_irq_enable(); 449 450 while (list) { 451 struct tasklet_struct *t = list; 452 453 list = list->next; 454 455 if (tasklet_trylock(t)) { 456 if (!atomic_read(&t->count)) { 457 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 458 BUG(); 459 t->func(t->data); 460 tasklet_unlock(t); 461 continue; 462 } 463 tasklet_unlock(t); 464 } 465 466 local_irq_disable(); 467 t->next = NULL; 468 *__this_cpu_read(tasklet_vec.tail) = t; 469 __this_cpu_write(tasklet_vec.tail, &(t->next)); 470 __raise_softirq_irqoff(TASKLET_SOFTIRQ); 471 local_irq_enable(); 472 } 473 } 474 475 static void tasklet_hi_action(struct softirq_action *a) 476 { 477 struct tasklet_struct *list; 478 479 local_irq_disable(); 480 list = __this_cpu_read(tasklet_hi_vec.head); 481 __this_cpu_write(tasklet_hi_vec.head, NULL); 482 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); 483 local_irq_enable(); 484 485 while (list) { 486 struct tasklet_struct *t = list; 487 488 list = list->next; 489 490 if (tasklet_trylock(t)) { 491 if (!atomic_read(&t->count)) { 492 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 493 BUG(); 494 t->func(t->data); 495 tasklet_unlock(t); 496 continue; 497 } 498 tasklet_unlock(t); 499 } 500 501 local_irq_disable(); 502 t->next = NULL; 503 *__this_cpu_read(tasklet_hi_vec.tail) = t; 504 __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); 505 __raise_softirq_irqoff(HI_SOFTIRQ); 506 local_irq_enable(); 507 } 508 } 509 510 511 void tasklet_init(struct tasklet_struct *t, 512 void (*func)(unsigned long), unsigned long data) 513 { 514 t->next = NULL; 515 t->state = 0; 516 atomic_set(&t->count, 0); 517 t->func = func; 518 t->data = data; 519 } 520 521 EXPORT_SYMBOL(tasklet_init); 522 523 void tasklet_kill(struct tasklet_struct *t) 524 { 525 if (in_interrupt()) 526 printk("Attempt to kill tasklet from interrupt\n"); 527 528 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 529 do { 530 yield(); 531 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 532 } 533 tasklet_unlock_wait(t); 534 clear_bit(TASKLET_STATE_SCHED, &t->state); 535 } 536 537 EXPORT_SYMBOL(tasklet_kill); 538 539 /* 540 * tasklet_hrtimer 541 */ 542 543 /* 544 * The trampoline is called when the hrtimer expires. It schedules a tasklet 545 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended 546 * hrtimer callback, but from softirq context. 547 */ 548 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) 549 { 550 struct tasklet_hrtimer *ttimer = 551 container_of(timer, struct tasklet_hrtimer, timer); 552 553 tasklet_hi_schedule(&ttimer->tasklet); 554 return HRTIMER_NORESTART; 555 } 556 557 /* 558 * Helper function which calls the hrtimer callback from 559 * tasklet/softirq context 560 */ 561 static void __tasklet_hrtimer_trampoline(unsigned long data) 562 { 563 struct tasklet_hrtimer *ttimer = (void *)data; 564 enum hrtimer_restart restart; 565 566 restart = ttimer->function(&ttimer->timer); 567 if (restart != HRTIMER_NORESTART) 568 hrtimer_restart(&ttimer->timer); 569 } 570 571 /** 572 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks 573 * @ttimer: tasklet_hrtimer which is initialized 574 * @function: hrtimer callback function which gets called from softirq context 575 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) 576 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) 577 */ 578 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, 579 enum hrtimer_restart (*function)(struct hrtimer *), 580 clockid_t which_clock, enum hrtimer_mode mode) 581 { 582 hrtimer_init(&ttimer->timer, which_clock, mode); 583 ttimer->timer.function = __hrtimer_tasklet_trampoline; 584 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, 585 (unsigned long)ttimer); 586 ttimer->function = function; 587 } 588 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); 589 590 /* 591 * Remote softirq bits 592 */ 593 594 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 595 EXPORT_PER_CPU_SYMBOL(softirq_work_list); 596 597 static void __local_trigger(struct call_single_data *cp, int softirq) 598 { 599 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); 600 601 list_add_tail(&cp->list, head); 602 603 /* Trigger the softirq only if the list was previously empty. */ 604 if (head->next == &cp->list) 605 raise_softirq_irqoff(softirq); 606 } 607 608 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS 609 static void remote_softirq_receive(void *data) 610 { 611 struct call_single_data *cp = data; 612 unsigned long flags; 613 int softirq; 614 615 softirq = cp->priv; 616 617 local_irq_save(flags); 618 __local_trigger(cp, softirq); 619 local_irq_restore(flags); 620 } 621 622 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 623 { 624 if (cpu_online(cpu)) { 625 cp->func = remote_softirq_receive; 626 cp->info = cp; 627 cp->flags = 0; 628 cp->priv = softirq; 629 630 __smp_call_function_single(cpu, cp, 0); 631 return 0; 632 } 633 return 1; 634 } 635 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ 636 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 637 { 638 return 1; 639 } 640 #endif 641 642 /** 643 * __send_remote_softirq - try to schedule softirq work on a remote cpu 644 * @cp: private SMP call function data area 645 * @cpu: the remote cpu 646 * @this_cpu: the currently executing cpu 647 * @softirq: the softirq for the work 648 * 649 * Attempt to schedule softirq work on a remote cpu. If this cannot be 650 * done, the work is instead queued up on the local cpu. 651 * 652 * Interrupts must be disabled. 653 */ 654 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) 655 { 656 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) 657 __local_trigger(cp, softirq); 658 } 659 EXPORT_SYMBOL(__send_remote_softirq); 660 661 /** 662 * send_remote_softirq - try to schedule softirq work on a remote cpu 663 * @cp: private SMP call function data area 664 * @cpu: the remote cpu 665 * @softirq: the softirq for the work 666 * 667 * Like __send_remote_softirq except that disabling interrupts and 668 * computing the current cpu is done for the caller. 669 */ 670 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 671 { 672 unsigned long flags; 673 int this_cpu; 674 675 local_irq_save(flags); 676 this_cpu = smp_processor_id(); 677 __send_remote_softirq(cp, cpu, this_cpu, softirq); 678 local_irq_restore(flags); 679 } 680 EXPORT_SYMBOL(send_remote_softirq); 681 682 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, 683 unsigned long action, void *hcpu) 684 { 685 /* 686 * If a CPU goes away, splice its entries to the current CPU 687 * and trigger a run of the softirq 688 */ 689 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 690 int cpu = (unsigned long) hcpu; 691 int i; 692 693 local_irq_disable(); 694 for (i = 0; i < NR_SOFTIRQS; i++) { 695 struct list_head *head = &per_cpu(softirq_work_list[i], cpu); 696 struct list_head *local_head; 697 698 if (list_empty(head)) 699 continue; 700 701 local_head = &__get_cpu_var(softirq_work_list[i]); 702 list_splice_init(head, local_head); 703 raise_softirq_irqoff(i); 704 } 705 local_irq_enable(); 706 } 707 708 return NOTIFY_OK; 709 } 710 711 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { 712 .notifier_call = remote_softirq_cpu_notify, 713 }; 714 715 void __init softirq_init(void) 716 { 717 int cpu; 718 719 for_each_possible_cpu(cpu) { 720 int i; 721 722 per_cpu(tasklet_vec, cpu).tail = 723 &per_cpu(tasklet_vec, cpu).head; 724 per_cpu(tasklet_hi_vec, cpu).tail = 725 &per_cpu(tasklet_hi_vec, cpu).head; 726 for (i = 0; i < NR_SOFTIRQS; i++) 727 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); 728 } 729 730 register_hotcpu_notifier(&remote_softirq_cpu_notifier); 731 732 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 733 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 734 } 735 736 static int run_ksoftirqd(void * __bind_cpu) 737 { 738 set_current_state(TASK_INTERRUPTIBLE); 739 740 while (!kthread_should_stop()) { 741 preempt_disable(); 742 if (!local_softirq_pending()) { 743 schedule_preempt_disabled(); 744 } 745 746 __set_current_state(TASK_RUNNING); 747 748 while (local_softirq_pending()) { 749 /* Preempt disable stops cpu going offline. 750 If already offline, we'll be on wrong CPU: 751 don't process */ 752 if (cpu_is_offline((long)__bind_cpu)) 753 goto wait_to_die; 754 local_irq_disable(); 755 if (local_softirq_pending()) 756 __do_softirq(); 757 local_irq_enable(); 758 sched_preempt_enable_no_resched(); 759 cond_resched(); 760 preempt_disable(); 761 rcu_note_context_switch((long)__bind_cpu); 762 } 763 preempt_enable(); 764 set_current_state(TASK_INTERRUPTIBLE); 765 } 766 __set_current_state(TASK_RUNNING); 767 return 0; 768 769 wait_to_die: 770 preempt_enable(); 771 /* Wait for kthread_stop */ 772 set_current_state(TASK_INTERRUPTIBLE); 773 while (!kthread_should_stop()) { 774 schedule(); 775 set_current_state(TASK_INTERRUPTIBLE); 776 } 777 __set_current_state(TASK_RUNNING); 778 return 0; 779 } 780 781 #ifdef CONFIG_HOTPLUG_CPU 782 /* 783 * tasklet_kill_immediate is called to remove a tasklet which can already be 784 * scheduled for execution on @cpu. 785 * 786 * Unlike tasklet_kill, this function removes the tasklet 787 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. 788 * 789 * When this function is called, @cpu must be in the CPU_DEAD state. 790 */ 791 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) 792 { 793 struct tasklet_struct **i; 794 795 BUG_ON(cpu_online(cpu)); 796 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); 797 798 if (!test_bit(TASKLET_STATE_SCHED, &t->state)) 799 return; 800 801 /* CPU is dead, so no lock needed. */ 802 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 803 if (*i == t) { 804 *i = t->next; 805 /* If this was the tail element, move the tail ptr */ 806 if (*i == NULL) 807 per_cpu(tasklet_vec, cpu).tail = i; 808 return; 809 } 810 } 811 BUG(); 812 } 813 814 static void takeover_tasklets(unsigned int cpu) 815 { 816 /* CPU is dead, so no lock needed. */ 817 local_irq_disable(); 818 819 /* Find end, append list for that CPU. */ 820 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { 821 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; 822 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); 823 per_cpu(tasklet_vec, cpu).head = NULL; 824 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; 825 } 826 raise_softirq_irqoff(TASKLET_SOFTIRQ); 827 828 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { 829 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; 830 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); 831 per_cpu(tasklet_hi_vec, cpu).head = NULL; 832 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; 833 } 834 raise_softirq_irqoff(HI_SOFTIRQ); 835 836 local_irq_enable(); 837 } 838 #endif /* CONFIG_HOTPLUG_CPU */ 839 840 static int __cpuinit cpu_callback(struct notifier_block *nfb, 841 unsigned long action, 842 void *hcpu) 843 { 844 int hotcpu = (unsigned long)hcpu; 845 struct task_struct *p; 846 847 switch (action) { 848 case CPU_UP_PREPARE: 849 case CPU_UP_PREPARE_FROZEN: 850 p = kthread_create_on_node(run_ksoftirqd, 851 hcpu, 852 cpu_to_node(hotcpu), 853 "ksoftirqd/%d", hotcpu); 854 if (IS_ERR(p)) { 855 printk("ksoftirqd for %i failed\n", hotcpu); 856 return notifier_from_errno(PTR_ERR(p)); 857 } 858 kthread_bind(p, hotcpu); 859 per_cpu(ksoftirqd, hotcpu) = p; 860 break; 861 case CPU_ONLINE: 862 case CPU_ONLINE_FROZEN: 863 wake_up_process(per_cpu(ksoftirqd, hotcpu)); 864 break; 865 #ifdef CONFIG_HOTPLUG_CPU 866 case CPU_UP_CANCELED: 867 case CPU_UP_CANCELED_FROZEN: 868 if (!per_cpu(ksoftirqd, hotcpu)) 869 break; 870 /* Unbind so it can run. Fall thru. */ 871 kthread_bind(per_cpu(ksoftirqd, hotcpu), 872 cpumask_any(cpu_online_mask)); 873 case CPU_DEAD: 874 case CPU_DEAD_FROZEN: { 875 static const struct sched_param param = { 876 .sched_priority = MAX_RT_PRIO-1 877 }; 878 879 p = per_cpu(ksoftirqd, hotcpu); 880 per_cpu(ksoftirqd, hotcpu) = NULL; 881 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); 882 kthread_stop(p); 883 takeover_tasklets(hotcpu); 884 break; 885 } 886 #endif /* CONFIG_HOTPLUG_CPU */ 887 } 888 return NOTIFY_OK; 889 } 890 891 static struct notifier_block __cpuinitdata cpu_nfb = { 892 .notifier_call = cpu_callback 893 }; 894 895 static __init int spawn_ksoftirqd(void) 896 { 897 void *cpu = (void *)(long)smp_processor_id(); 898 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 899 900 BUG_ON(err != NOTIFY_OK); 901 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 902 register_cpu_notifier(&cpu_nfb); 903 return 0; 904 } 905 early_initcall(spawn_ksoftirqd); 906 907 /* 908 * [ These __weak aliases are kept in a separate compilation unit, so that 909 * GCC does not inline them incorrectly. ] 910 */ 911 912 int __init __weak early_irq_init(void) 913 { 914 return 0; 915 } 916 917 #ifdef CONFIG_GENERIC_HARDIRQS 918 int __init __weak arch_probe_nr_irqs(void) 919 { 920 return NR_IRQS_LEGACY; 921 } 922 923 int __init __weak arch_early_irq_init(void) 924 { 925 return 0; 926 } 927 #endif 928