1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sched/hotplug.h> 12 #include <linux/sched/task.h> 13 #include <linux/unistd.h> 14 #include <linux/cpu.h> 15 #include <linux/oom.h> 16 #include <linux/rcupdate.h> 17 #include <linux/export.h> 18 #include <linux/bug.h> 19 #include <linux/kthread.h> 20 #include <linux/stop_machine.h> 21 #include <linux/mutex.h> 22 #include <linux/gfp.h> 23 #include <linux/suspend.h> 24 #include <linux/lockdep.h> 25 #include <linux/tick.h> 26 #include <linux/irq.h> 27 #include <linux/smpboot.h> 28 #include <linux/relay.h> 29 #include <linux/slab.h> 30 #include <linux/percpu-rwsem.h> 31 32 #include <trace/events/power.h> 33 #define CREATE_TRACE_POINTS 34 #include <trace/events/cpuhp.h> 35 36 #include "smpboot.h" 37 38 /** 39 * cpuhp_cpu_state - Per cpu hotplug state storage 40 * @state: The current cpu state 41 * @target: The target state 42 * @thread: Pointer to the hotplug thread 43 * @should_run: Thread should execute 44 * @rollback: Perform a rollback 45 * @single: Single callback invocation 46 * @bringup: Single callback bringup or teardown selector 47 * @cb_state: The state for a single callback (install/uninstall) 48 * @result: Result of the operation 49 * @done: Signal completion to the issuer of the task 50 */ 51 struct cpuhp_cpu_state { 52 enum cpuhp_state state; 53 enum cpuhp_state target; 54 #ifdef CONFIG_SMP 55 struct task_struct *thread; 56 bool should_run; 57 bool rollback; 58 bool single; 59 bool bringup; 60 struct hlist_node *node; 61 enum cpuhp_state cb_state; 62 int result; 63 struct completion done; 64 #endif 65 }; 66 67 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); 68 69 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) 70 static struct lock_class_key cpuhp_state_key; 71 static struct lockdep_map cpuhp_state_lock_map = 72 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key); 73 #endif 74 75 /** 76 * cpuhp_step - Hotplug state machine step 77 * @name: Name of the step 78 * @startup: Startup function of the step 79 * @teardown: Teardown function of the step 80 * @skip_onerr: Do not invoke the functions on error rollback 81 * Will go away once the notifiers are gone 82 * @cant_stop: Bringup/teardown can't be stopped at this step 83 */ 84 struct cpuhp_step { 85 const char *name; 86 union { 87 int (*single)(unsigned int cpu); 88 int (*multi)(unsigned int cpu, 89 struct hlist_node *node); 90 } startup; 91 union { 92 int (*single)(unsigned int cpu); 93 int (*multi)(unsigned int cpu, 94 struct hlist_node *node); 95 } teardown; 96 struct hlist_head list; 97 bool skip_onerr; 98 bool cant_stop; 99 bool multi_instance; 100 }; 101 102 static DEFINE_MUTEX(cpuhp_state_mutex); 103 static struct cpuhp_step cpuhp_bp_states[]; 104 static struct cpuhp_step cpuhp_ap_states[]; 105 106 static bool cpuhp_is_ap_state(enum cpuhp_state state) 107 { 108 /* 109 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation 110 * purposes as that state is handled explicitly in cpu_down. 111 */ 112 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; 113 } 114 115 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) 116 { 117 struct cpuhp_step *sp; 118 119 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states; 120 return sp + state; 121 } 122 123 /** 124 * cpuhp_invoke_callback _ Invoke the callbacks for a given state 125 * @cpu: The cpu for which the callback should be invoked 126 * @step: The step in the state machine 127 * @bringup: True if the bringup callback should be invoked 128 * 129 * Called from cpu hotplug and from the state register machinery. 130 */ 131 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, 132 bool bringup, struct hlist_node *node) 133 { 134 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 135 struct cpuhp_step *step = cpuhp_get_step(state); 136 int (*cbm)(unsigned int cpu, struct hlist_node *node); 137 int (*cb)(unsigned int cpu); 138 int ret, cnt; 139 140 if (!step->multi_instance) { 141 cb = bringup ? step->startup.single : step->teardown.single; 142 if (!cb) 143 return 0; 144 trace_cpuhp_enter(cpu, st->target, state, cb); 145 ret = cb(cpu); 146 trace_cpuhp_exit(cpu, st->state, state, ret); 147 return ret; 148 } 149 cbm = bringup ? step->startup.multi : step->teardown.multi; 150 if (!cbm) 151 return 0; 152 153 /* Single invocation for instance add/remove */ 154 if (node) { 155 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 156 ret = cbm(cpu, node); 157 trace_cpuhp_exit(cpu, st->state, state, ret); 158 return ret; 159 } 160 161 /* State transition. Invoke on all instances */ 162 cnt = 0; 163 hlist_for_each(node, &step->list) { 164 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 165 ret = cbm(cpu, node); 166 trace_cpuhp_exit(cpu, st->state, state, ret); 167 if (ret) 168 goto err; 169 cnt++; 170 } 171 return 0; 172 err: 173 /* Rollback the instances if one failed */ 174 cbm = !bringup ? step->startup.multi : step->teardown.multi; 175 if (!cbm) 176 return ret; 177 178 hlist_for_each(node, &step->list) { 179 if (!cnt--) 180 break; 181 cbm(cpu, node); 182 } 183 return ret; 184 } 185 186 #ifdef CONFIG_SMP 187 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 188 static DEFINE_MUTEX(cpu_add_remove_lock); 189 bool cpuhp_tasks_frozen; 190 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); 191 192 /* 193 * The following two APIs (cpu_maps_update_begin/done) must be used when 194 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. 195 */ 196 void cpu_maps_update_begin(void) 197 { 198 mutex_lock(&cpu_add_remove_lock); 199 } 200 201 void cpu_maps_update_done(void) 202 { 203 mutex_unlock(&cpu_add_remove_lock); 204 } 205 206 /* 207 * If set, cpu_up and cpu_down will return -EBUSY and do nothing. 208 * Should always be manipulated under cpu_add_remove_lock 209 */ 210 static int cpu_hotplug_disabled; 211 212 #ifdef CONFIG_HOTPLUG_CPU 213 214 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); 215 216 void cpus_read_lock(void) 217 { 218 percpu_down_read(&cpu_hotplug_lock); 219 } 220 EXPORT_SYMBOL_GPL(cpus_read_lock); 221 222 void cpus_read_unlock(void) 223 { 224 percpu_up_read(&cpu_hotplug_lock); 225 } 226 EXPORT_SYMBOL_GPL(cpus_read_unlock); 227 228 void cpus_write_lock(void) 229 { 230 percpu_down_write(&cpu_hotplug_lock); 231 } 232 233 void cpus_write_unlock(void) 234 { 235 percpu_up_write(&cpu_hotplug_lock); 236 } 237 238 void lockdep_assert_cpus_held(void) 239 { 240 percpu_rwsem_assert_held(&cpu_hotplug_lock); 241 } 242 243 /* 244 * Wait for currently running CPU hotplug operations to complete (if any) and 245 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects 246 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the 247 * hotplug path before performing hotplug operations. So acquiring that lock 248 * guarantees mutual exclusion from any currently running hotplug operations. 249 */ 250 void cpu_hotplug_disable(void) 251 { 252 cpu_maps_update_begin(); 253 cpu_hotplug_disabled++; 254 cpu_maps_update_done(); 255 } 256 EXPORT_SYMBOL_GPL(cpu_hotplug_disable); 257 258 static void __cpu_hotplug_enable(void) 259 { 260 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) 261 return; 262 cpu_hotplug_disabled--; 263 } 264 265 void cpu_hotplug_enable(void) 266 { 267 cpu_maps_update_begin(); 268 __cpu_hotplug_enable(); 269 cpu_maps_update_done(); 270 } 271 EXPORT_SYMBOL_GPL(cpu_hotplug_enable); 272 #endif /* CONFIG_HOTPLUG_CPU */ 273 274 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st); 275 276 static int bringup_wait_for_ap(unsigned int cpu) 277 { 278 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 279 280 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ 281 wait_for_completion(&st->done); 282 if (WARN_ON_ONCE((!cpu_online(cpu)))) 283 return -ECANCELED; 284 285 /* Unpark the stopper thread and the hotplug thread of the target cpu */ 286 stop_machine_unpark(cpu); 287 kthread_unpark(st->thread); 288 289 /* Should we go further up ? */ 290 if (st->target > CPUHP_AP_ONLINE_IDLE) { 291 __cpuhp_kick_ap_work(st); 292 wait_for_completion(&st->done); 293 } 294 return st->result; 295 } 296 297 static int bringup_cpu(unsigned int cpu) 298 { 299 struct task_struct *idle = idle_thread_get(cpu); 300 int ret; 301 302 /* 303 * Some architectures have to walk the irq descriptors to 304 * setup the vector space for the cpu which comes online. 305 * Prevent irq alloc/free across the bringup. 306 */ 307 irq_lock_sparse(); 308 309 /* Arch-specific enabling code. */ 310 ret = __cpu_up(cpu, idle); 311 irq_unlock_sparse(); 312 if (ret) 313 return ret; 314 return bringup_wait_for_ap(cpu); 315 } 316 317 /* 318 * Hotplug state machine related functions 319 */ 320 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) 321 { 322 for (st->state++; st->state < st->target; st->state++) { 323 struct cpuhp_step *step = cpuhp_get_step(st->state); 324 325 if (!step->skip_onerr) 326 cpuhp_invoke_callback(cpu, st->state, true, NULL); 327 } 328 } 329 330 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 331 enum cpuhp_state target) 332 { 333 enum cpuhp_state prev_state = st->state; 334 int ret = 0; 335 336 for (; st->state > target; st->state--) { 337 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL); 338 if (ret) { 339 st->target = prev_state; 340 undo_cpu_down(cpu, st); 341 break; 342 } 343 } 344 return ret; 345 } 346 347 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) 348 { 349 for (st->state--; st->state > st->target; st->state--) { 350 struct cpuhp_step *step = cpuhp_get_step(st->state); 351 352 if (!step->skip_onerr) 353 cpuhp_invoke_callback(cpu, st->state, false, NULL); 354 } 355 } 356 357 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 358 enum cpuhp_state target) 359 { 360 enum cpuhp_state prev_state = st->state; 361 int ret = 0; 362 363 while (st->state < target) { 364 st->state++; 365 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL); 366 if (ret) { 367 st->target = prev_state; 368 undo_cpu_up(cpu, st); 369 break; 370 } 371 } 372 return ret; 373 } 374 375 /* 376 * The cpu hotplug threads manage the bringup and teardown of the cpus 377 */ 378 static void cpuhp_create(unsigned int cpu) 379 { 380 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 381 382 init_completion(&st->done); 383 } 384 385 static int cpuhp_should_run(unsigned int cpu) 386 { 387 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 388 389 return st->should_run; 390 } 391 392 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */ 393 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st) 394 { 395 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU); 396 397 return cpuhp_down_callbacks(cpu, st, target); 398 } 399 400 /* Execute the online startup callbacks. Used to be CPU_ONLINE */ 401 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st) 402 { 403 return cpuhp_up_callbacks(cpu, st, st->target); 404 } 405 406 /* 407 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke 408 * callbacks when a state gets [un]installed at runtime. 409 */ 410 static void cpuhp_thread_fun(unsigned int cpu) 411 { 412 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 413 int ret = 0; 414 415 /* 416 * Paired with the mb() in cpuhp_kick_ap_work and 417 * cpuhp_invoke_ap_callback, so the work set is consistent visible. 418 */ 419 smp_mb(); 420 if (!st->should_run) 421 return; 422 423 st->should_run = false; 424 425 lock_map_acquire(&cpuhp_state_lock_map); 426 /* Single callback invocation for [un]install ? */ 427 if (st->single) { 428 if (st->cb_state < CPUHP_AP_ONLINE) { 429 local_irq_disable(); 430 ret = cpuhp_invoke_callback(cpu, st->cb_state, 431 st->bringup, st->node); 432 local_irq_enable(); 433 } else { 434 ret = cpuhp_invoke_callback(cpu, st->cb_state, 435 st->bringup, st->node); 436 } 437 } else if (st->rollback) { 438 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 439 440 undo_cpu_down(cpu, st); 441 st->rollback = false; 442 } else { 443 /* Cannot happen .... */ 444 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 445 446 /* Regular hotplug work */ 447 if (st->state < st->target) 448 ret = cpuhp_ap_online(cpu, st); 449 else if (st->state > st->target) 450 ret = cpuhp_ap_offline(cpu, st); 451 } 452 lock_map_release(&cpuhp_state_lock_map); 453 st->result = ret; 454 complete(&st->done); 455 } 456 457 /* Invoke a single callback on a remote cpu */ 458 static int 459 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, 460 struct hlist_node *node) 461 { 462 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 463 464 if (!cpu_online(cpu)) 465 return 0; 466 467 lock_map_acquire(&cpuhp_state_lock_map); 468 lock_map_release(&cpuhp_state_lock_map); 469 470 /* 471 * If we are up and running, use the hotplug thread. For early calls 472 * we invoke the thread function directly. 473 */ 474 if (!st->thread) 475 return cpuhp_invoke_callback(cpu, state, bringup, node); 476 477 st->cb_state = state; 478 st->single = true; 479 st->bringup = bringup; 480 st->node = node; 481 482 /* 483 * Make sure the above stores are visible before should_run becomes 484 * true. Paired with the mb() above in cpuhp_thread_fun() 485 */ 486 smp_mb(); 487 st->should_run = true; 488 wake_up_process(st->thread); 489 wait_for_completion(&st->done); 490 return st->result; 491 } 492 493 /* Regular hotplug invocation of the AP hotplug thread */ 494 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st) 495 { 496 st->result = 0; 497 st->single = false; 498 /* 499 * Make sure the above stores are visible before should_run becomes 500 * true. Paired with the mb() above in cpuhp_thread_fun() 501 */ 502 smp_mb(); 503 st->should_run = true; 504 wake_up_process(st->thread); 505 } 506 507 static int cpuhp_kick_ap_work(unsigned int cpu) 508 { 509 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 510 enum cpuhp_state state = st->state; 511 512 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work); 513 lock_map_acquire(&cpuhp_state_lock_map); 514 lock_map_release(&cpuhp_state_lock_map); 515 __cpuhp_kick_ap_work(st); 516 wait_for_completion(&st->done); 517 trace_cpuhp_exit(cpu, st->state, state, st->result); 518 return st->result; 519 } 520 521 static struct smp_hotplug_thread cpuhp_threads = { 522 .store = &cpuhp_state.thread, 523 .create = &cpuhp_create, 524 .thread_should_run = cpuhp_should_run, 525 .thread_fn = cpuhp_thread_fun, 526 .thread_comm = "cpuhp/%u", 527 .selfparking = true, 528 }; 529 530 void __init cpuhp_threads_init(void) 531 { 532 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); 533 kthread_unpark(this_cpu_read(cpuhp_state.thread)); 534 } 535 536 #ifdef CONFIG_HOTPLUG_CPU 537 /** 538 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 539 * @cpu: a CPU id 540 * 541 * This function walks all processes, finds a valid mm struct for each one and 542 * then clears a corresponding bit in mm's cpumask. While this all sounds 543 * trivial, there are various non-obvious corner cases, which this function 544 * tries to solve in a safe manner. 545 * 546 * Also note that the function uses a somewhat relaxed locking scheme, so it may 547 * be called only for an already offlined CPU. 548 */ 549 void clear_tasks_mm_cpumask(int cpu) 550 { 551 struct task_struct *p; 552 553 /* 554 * This function is called after the cpu is taken down and marked 555 * offline, so its not like new tasks will ever get this cpu set in 556 * their mm mask. -- Peter Zijlstra 557 * Thus, we may use rcu_read_lock() here, instead of grabbing 558 * full-fledged tasklist_lock. 559 */ 560 WARN_ON(cpu_online(cpu)); 561 rcu_read_lock(); 562 for_each_process(p) { 563 struct task_struct *t; 564 565 /* 566 * Main thread might exit, but other threads may still have 567 * a valid mm. Find one. 568 */ 569 t = find_lock_task_mm(p); 570 if (!t) 571 continue; 572 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 573 task_unlock(t); 574 } 575 rcu_read_unlock(); 576 } 577 578 /* Take this CPU down. */ 579 static int take_cpu_down(void *_param) 580 { 581 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 582 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); 583 int err, cpu = smp_processor_id(); 584 585 /* Ensure this CPU doesn't handle any more interrupts. */ 586 err = __cpu_disable(); 587 if (err < 0) 588 return err; 589 590 /* 591 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not 592 * do this step again. 593 */ 594 WARN_ON(st->state != CPUHP_TEARDOWN_CPU); 595 st->state--; 596 /* Invoke the former CPU_DYING callbacks */ 597 for (; st->state > target; st->state--) 598 cpuhp_invoke_callback(cpu, st->state, false, NULL); 599 600 /* Give up timekeeping duties */ 601 tick_handover_do_timer(); 602 /* Park the stopper thread */ 603 stop_machine_park(cpu); 604 return 0; 605 } 606 607 static int takedown_cpu(unsigned int cpu) 608 { 609 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 610 int err; 611 612 /* Park the smpboot threads */ 613 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); 614 smpboot_park_threads(cpu); 615 616 /* 617 * Prevent irq alloc/free while the dying cpu reorganizes the 618 * interrupt affinities. 619 */ 620 irq_lock_sparse(); 621 622 /* 623 * So now all preempt/rcu users must observe !cpu_active(). 624 */ 625 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); 626 if (err) { 627 /* CPU refused to die */ 628 irq_unlock_sparse(); 629 /* Unpark the hotplug thread so we can rollback there */ 630 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); 631 return err; 632 } 633 BUG_ON(cpu_online(cpu)); 634 635 /* 636 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all 637 * runnable tasks from the cpu, there's only the idle task left now 638 * that the migration thread is done doing the stop_machine thing. 639 * 640 * Wait for the stop thread to go away. 641 */ 642 wait_for_completion(&st->done); 643 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); 644 645 /* Interrupts are moved away from the dying cpu, reenable alloc/free */ 646 irq_unlock_sparse(); 647 648 hotplug_cpu__broadcast_tick_pull(cpu); 649 /* This actually kills the CPU. */ 650 __cpu_die(cpu); 651 652 tick_cleanup_dead_cpu(cpu); 653 return 0; 654 } 655 656 static void cpuhp_complete_idle_dead(void *arg) 657 { 658 struct cpuhp_cpu_state *st = arg; 659 660 complete(&st->done); 661 } 662 663 void cpuhp_report_idle_dead(void) 664 { 665 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 666 667 BUG_ON(st->state != CPUHP_AP_OFFLINE); 668 rcu_report_dead(smp_processor_id()); 669 st->state = CPUHP_AP_IDLE_DEAD; 670 /* 671 * We cannot call complete after rcu_report_dead() so we delegate it 672 * to an online cpu. 673 */ 674 smp_call_function_single(cpumask_first(cpu_online_mask), 675 cpuhp_complete_idle_dead, st, 0); 676 } 677 678 #else 679 #define takedown_cpu NULL 680 #endif 681 682 #ifdef CONFIG_HOTPLUG_CPU 683 684 /* Requires cpu_add_remove_lock to be held */ 685 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, 686 enum cpuhp_state target) 687 { 688 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 689 int prev_state, ret = 0; 690 691 if (num_online_cpus() == 1) 692 return -EBUSY; 693 694 if (!cpu_present(cpu)) 695 return -EINVAL; 696 697 cpus_write_lock(); 698 699 cpuhp_tasks_frozen = tasks_frozen; 700 701 prev_state = st->state; 702 st->target = target; 703 /* 704 * If the current CPU state is in the range of the AP hotplug thread, 705 * then we need to kick the thread. 706 */ 707 if (st->state > CPUHP_TEARDOWN_CPU) { 708 ret = cpuhp_kick_ap_work(cpu); 709 /* 710 * The AP side has done the error rollback already. Just 711 * return the error code.. 712 */ 713 if (ret) 714 goto out; 715 716 /* 717 * We might have stopped still in the range of the AP hotplug 718 * thread. Nothing to do anymore. 719 */ 720 if (st->state > CPUHP_TEARDOWN_CPU) 721 goto out; 722 } 723 /* 724 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need 725 * to do the further cleanups. 726 */ 727 ret = cpuhp_down_callbacks(cpu, st, target); 728 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { 729 st->target = prev_state; 730 st->rollback = true; 731 cpuhp_kick_ap_work(cpu); 732 } 733 734 out: 735 cpus_write_unlock(); 736 return ret; 737 } 738 739 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target) 740 { 741 int err; 742 743 cpu_maps_update_begin(); 744 745 if (cpu_hotplug_disabled) { 746 err = -EBUSY; 747 goto out; 748 } 749 750 err = _cpu_down(cpu, 0, target); 751 752 out: 753 cpu_maps_update_done(); 754 return err; 755 } 756 int cpu_down(unsigned int cpu) 757 { 758 return do_cpu_down(cpu, CPUHP_OFFLINE); 759 } 760 EXPORT_SYMBOL(cpu_down); 761 #endif /*CONFIG_HOTPLUG_CPU*/ 762 763 /** 764 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU 765 * @cpu: cpu that just started 766 * 767 * It must be called by the arch code on the new cpu, before the new cpu 768 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 769 */ 770 void notify_cpu_starting(unsigned int cpu) 771 { 772 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 773 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); 774 775 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ 776 while (st->state < target) { 777 st->state++; 778 cpuhp_invoke_callback(cpu, st->state, true, NULL); 779 } 780 } 781 782 /* 783 * Called from the idle task. Wake up the controlling task which brings the 784 * stopper and the hotplug thread of the upcoming CPU up and then delegates 785 * the rest of the online bringup to the hotplug thread. 786 */ 787 void cpuhp_online_idle(enum cpuhp_state state) 788 { 789 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 790 791 /* Happens for the boot cpu */ 792 if (state != CPUHP_AP_ONLINE_IDLE) 793 return; 794 795 st->state = CPUHP_AP_ONLINE_IDLE; 796 complete(&st->done); 797 } 798 799 /* Requires cpu_add_remove_lock to be held */ 800 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) 801 { 802 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 803 struct task_struct *idle; 804 int ret = 0; 805 806 cpus_write_lock(); 807 808 if (!cpu_present(cpu)) { 809 ret = -EINVAL; 810 goto out; 811 } 812 813 /* 814 * The caller of do_cpu_up might have raced with another 815 * caller. Ignore it for now. 816 */ 817 if (st->state >= target) 818 goto out; 819 820 if (st->state == CPUHP_OFFLINE) { 821 /* Let it fail before we try to bring the cpu up */ 822 idle = idle_thread_get(cpu); 823 if (IS_ERR(idle)) { 824 ret = PTR_ERR(idle); 825 goto out; 826 } 827 } 828 829 cpuhp_tasks_frozen = tasks_frozen; 830 831 st->target = target; 832 /* 833 * If the current CPU state is in the range of the AP hotplug thread, 834 * then we need to kick the thread once more. 835 */ 836 if (st->state > CPUHP_BRINGUP_CPU) { 837 ret = cpuhp_kick_ap_work(cpu); 838 /* 839 * The AP side has done the error rollback already. Just 840 * return the error code.. 841 */ 842 if (ret) 843 goto out; 844 } 845 846 /* 847 * Try to reach the target state. We max out on the BP at 848 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is 849 * responsible for bringing it up to the target state. 850 */ 851 target = min((int)target, CPUHP_BRINGUP_CPU); 852 ret = cpuhp_up_callbacks(cpu, st, target); 853 out: 854 cpus_write_unlock(); 855 return ret; 856 } 857 858 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target) 859 { 860 int err = 0; 861 862 if (!cpu_possible(cpu)) { 863 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", 864 cpu); 865 #if defined(CONFIG_IA64) 866 pr_err("please check additional_cpus= boot parameter\n"); 867 #endif 868 return -EINVAL; 869 } 870 871 err = try_online_node(cpu_to_node(cpu)); 872 if (err) 873 return err; 874 875 cpu_maps_update_begin(); 876 877 if (cpu_hotplug_disabled) { 878 err = -EBUSY; 879 goto out; 880 } 881 882 err = _cpu_up(cpu, 0, target); 883 out: 884 cpu_maps_update_done(); 885 return err; 886 } 887 888 int cpu_up(unsigned int cpu) 889 { 890 return do_cpu_up(cpu, CPUHP_ONLINE); 891 } 892 EXPORT_SYMBOL_GPL(cpu_up); 893 894 #ifdef CONFIG_PM_SLEEP_SMP 895 static cpumask_var_t frozen_cpus; 896 897 int freeze_secondary_cpus(int primary) 898 { 899 int cpu, error = 0; 900 901 cpu_maps_update_begin(); 902 if (!cpu_online(primary)) 903 primary = cpumask_first(cpu_online_mask); 904 /* 905 * We take down all of the non-boot CPUs in one shot to avoid races 906 * with the userspace trying to use the CPU hotplug at the same time 907 */ 908 cpumask_clear(frozen_cpus); 909 910 pr_info("Disabling non-boot CPUs ...\n"); 911 for_each_online_cpu(cpu) { 912 if (cpu == primary) 913 continue; 914 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); 915 error = _cpu_down(cpu, 1, CPUHP_OFFLINE); 916 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); 917 if (!error) 918 cpumask_set_cpu(cpu, frozen_cpus); 919 else { 920 pr_err("Error taking CPU%d down: %d\n", cpu, error); 921 break; 922 } 923 } 924 925 if (!error) 926 BUG_ON(num_online_cpus() > 1); 927 else 928 pr_err("Non-boot CPUs are not disabled\n"); 929 930 /* 931 * Make sure the CPUs won't be enabled by someone else. We need to do 932 * this even in case of failure as all disable_nonboot_cpus() users are 933 * supposed to do enable_nonboot_cpus() on the failure path. 934 */ 935 cpu_hotplug_disabled++; 936 937 cpu_maps_update_done(); 938 return error; 939 } 940 941 void __weak arch_enable_nonboot_cpus_begin(void) 942 { 943 } 944 945 void __weak arch_enable_nonboot_cpus_end(void) 946 { 947 } 948 949 void enable_nonboot_cpus(void) 950 { 951 int cpu, error; 952 953 /* Allow everyone to use the CPU hotplug again */ 954 cpu_maps_update_begin(); 955 __cpu_hotplug_enable(); 956 if (cpumask_empty(frozen_cpus)) 957 goto out; 958 959 pr_info("Enabling non-boot CPUs ...\n"); 960 961 arch_enable_nonboot_cpus_begin(); 962 963 for_each_cpu(cpu, frozen_cpus) { 964 trace_suspend_resume(TPS("CPU_ON"), cpu, true); 965 error = _cpu_up(cpu, 1, CPUHP_ONLINE); 966 trace_suspend_resume(TPS("CPU_ON"), cpu, false); 967 if (!error) { 968 pr_info("CPU%d is up\n", cpu); 969 continue; 970 } 971 pr_warn("Error taking CPU%d up: %d\n", cpu, error); 972 } 973 974 arch_enable_nonboot_cpus_end(); 975 976 cpumask_clear(frozen_cpus); 977 out: 978 cpu_maps_update_done(); 979 } 980 981 static int __init alloc_frozen_cpus(void) 982 { 983 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 984 return -ENOMEM; 985 return 0; 986 } 987 core_initcall(alloc_frozen_cpus); 988 989 /* 990 * When callbacks for CPU hotplug notifications are being executed, we must 991 * ensure that the state of the system with respect to the tasks being frozen 992 * or not, as reported by the notification, remains unchanged *throughout the 993 * duration* of the execution of the callbacks. 994 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 995 * 996 * This synchronization is implemented by mutually excluding regular CPU 997 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 998 * Hibernate notifications. 999 */ 1000 static int 1001 cpu_hotplug_pm_callback(struct notifier_block *nb, 1002 unsigned long action, void *ptr) 1003 { 1004 switch (action) { 1005 1006 case PM_SUSPEND_PREPARE: 1007 case PM_HIBERNATION_PREPARE: 1008 cpu_hotplug_disable(); 1009 break; 1010 1011 case PM_POST_SUSPEND: 1012 case PM_POST_HIBERNATION: 1013 cpu_hotplug_enable(); 1014 break; 1015 1016 default: 1017 return NOTIFY_DONE; 1018 } 1019 1020 return NOTIFY_OK; 1021 } 1022 1023 1024 static int __init cpu_hotplug_pm_sync_init(void) 1025 { 1026 /* 1027 * cpu_hotplug_pm_callback has higher priority than x86 1028 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 1029 * to disable cpu hotplug to avoid cpu hotplug race. 1030 */ 1031 pm_notifier(cpu_hotplug_pm_callback, 0); 1032 return 0; 1033 } 1034 core_initcall(cpu_hotplug_pm_sync_init); 1035 1036 #endif /* CONFIG_PM_SLEEP_SMP */ 1037 1038 int __boot_cpu_id; 1039 1040 #endif /* CONFIG_SMP */ 1041 1042 /* Boot processor state steps */ 1043 static struct cpuhp_step cpuhp_bp_states[] = { 1044 [CPUHP_OFFLINE] = { 1045 .name = "offline", 1046 .startup.single = NULL, 1047 .teardown.single = NULL, 1048 }, 1049 #ifdef CONFIG_SMP 1050 [CPUHP_CREATE_THREADS]= { 1051 .name = "threads:prepare", 1052 .startup.single = smpboot_create_threads, 1053 .teardown.single = NULL, 1054 .cant_stop = true, 1055 }, 1056 [CPUHP_PERF_PREPARE] = { 1057 .name = "perf:prepare", 1058 .startup.single = perf_event_init_cpu, 1059 .teardown.single = perf_event_exit_cpu, 1060 }, 1061 [CPUHP_WORKQUEUE_PREP] = { 1062 .name = "workqueue:prepare", 1063 .startup.single = workqueue_prepare_cpu, 1064 .teardown.single = NULL, 1065 }, 1066 [CPUHP_HRTIMERS_PREPARE] = { 1067 .name = "hrtimers:prepare", 1068 .startup.single = hrtimers_prepare_cpu, 1069 .teardown.single = hrtimers_dead_cpu, 1070 }, 1071 [CPUHP_SMPCFD_PREPARE] = { 1072 .name = "smpcfd:prepare", 1073 .startup.single = smpcfd_prepare_cpu, 1074 .teardown.single = smpcfd_dead_cpu, 1075 }, 1076 [CPUHP_RELAY_PREPARE] = { 1077 .name = "relay:prepare", 1078 .startup.single = relay_prepare_cpu, 1079 .teardown.single = NULL, 1080 }, 1081 [CPUHP_SLAB_PREPARE] = { 1082 .name = "slab:prepare", 1083 .startup.single = slab_prepare_cpu, 1084 .teardown.single = slab_dead_cpu, 1085 }, 1086 [CPUHP_RCUTREE_PREP] = { 1087 .name = "RCU/tree:prepare", 1088 .startup.single = rcutree_prepare_cpu, 1089 .teardown.single = rcutree_dead_cpu, 1090 }, 1091 /* 1092 * On the tear-down path, timers_dead_cpu() must be invoked 1093 * before blk_mq_queue_reinit_notify() from notify_dead(), 1094 * otherwise a RCU stall occurs. 1095 */ 1096 [CPUHP_TIMERS_DEAD] = { 1097 .name = "timers:dead", 1098 .startup.single = NULL, 1099 .teardown.single = timers_dead_cpu, 1100 }, 1101 /* Kicks the plugged cpu into life */ 1102 [CPUHP_BRINGUP_CPU] = { 1103 .name = "cpu:bringup", 1104 .startup.single = bringup_cpu, 1105 .teardown.single = NULL, 1106 .cant_stop = true, 1107 }, 1108 [CPUHP_AP_SMPCFD_DYING] = { 1109 .name = "smpcfd:dying", 1110 .startup.single = NULL, 1111 .teardown.single = smpcfd_dying_cpu, 1112 }, 1113 /* 1114 * Handled on controll processor until the plugged processor manages 1115 * this itself. 1116 */ 1117 [CPUHP_TEARDOWN_CPU] = { 1118 .name = "cpu:teardown", 1119 .startup.single = NULL, 1120 .teardown.single = takedown_cpu, 1121 .cant_stop = true, 1122 }, 1123 #else 1124 [CPUHP_BRINGUP_CPU] = { }, 1125 #endif 1126 }; 1127 1128 /* Application processor state steps */ 1129 static struct cpuhp_step cpuhp_ap_states[] = { 1130 #ifdef CONFIG_SMP 1131 /* Final state before CPU kills itself */ 1132 [CPUHP_AP_IDLE_DEAD] = { 1133 .name = "idle:dead", 1134 }, 1135 /* 1136 * Last state before CPU enters the idle loop to die. Transient state 1137 * for synchronization. 1138 */ 1139 [CPUHP_AP_OFFLINE] = { 1140 .name = "ap:offline", 1141 .cant_stop = true, 1142 }, 1143 /* First state is scheduler control. Interrupts are disabled */ 1144 [CPUHP_AP_SCHED_STARTING] = { 1145 .name = "sched:starting", 1146 .startup.single = sched_cpu_starting, 1147 .teardown.single = sched_cpu_dying, 1148 }, 1149 [CPUHP_AP_RCUTREE_DYING] = { 1150 .name = "RCU/tree:dying", 1151 .startup.single = NULL, 1152 .teardown.single = rcutree_dying_cpu, 1153 }, 1154 /* Entry state on starting. Interrupts enabled from here on. Transient 1155 * state for synchronsization */ 1156 [CPUHP_AP_ONLINE] = { 1157 .name = "ap:online", 1158 }, 1159 /* Handle smpboot threads park/unpark */ 1160 [CPUHP_AP_SMPBOOT_THREADS] = { 1161 .name = "smpboot/threads:online", 1162 .startup.single = smpboot_unpark_threads, 1163 .teardown.single = NULL, 1164 }, 1165 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = { 1166 .name = "irq/affinity:online", 1167 .startup.single = irq_affinity_online_cpu, 1168 .teardown.single = NULL, 1169 }, 1170 [CPUHP_AP_PERF_ONLINE] = { 1171 .name = "perf:online", 1172 .startup.single = perf_event_init_cpu, 1173 .teardown.single = perf_event_exit_cpu, 1174 }, 1175 [CPUHP_AP_WORKQUEUE_ONLINE] = { 1176 .name = "workqueue:online", 1177 .startup.single = workqueue_online_cpu, 1178 .teardown.single = workqueue_offline_cpu, 1179 }, 1180 [CPUHP_AP_RCUTREE_ONLINE] = { 1181 .name = "RCU/tree:online", 1182 .startup.single = rcutree_online_cpu, 1183 .teardown.single = rcutree_offline_cpu, 1184 }, 1185 #endif 1186 /* 1187 * The dynamically registered state space is here 1188 */ 1189 1190 #ifdef CONFIG_SMP 1191 /* Last state is scheduler control setting the cpu active */ 1192 [CPUHP_AP_ACTIVE] = { 1193 .name = "sched:active", 1194 .startup.single = sched_cpu_activate, 1195 .teardown.single = sched_cpu_deactivate, 1196 }, 1197 #endif 1198 1199 /* CPU is fully up and running. */ 1200 [CPUHP_ONLINE] = { 1201 .name = "online", 1202 .startup.single = NULL, 1203 .teardown.single = NULL, 1204 }, 1205 }; 1206 1207 /* Sanity check for callbacks */ 1208 static int cpuhp_cb_check(enum cpuhp_state state) 1209 { 1210 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) 1211 return -EINVAL; 1212 return 0; 1213 } 1214 1215 /* 1216 * Returns a free for dynamic slot assignment of the Online state. The states 1217 * are protected by the cpuhp_slot_states mutex and an empty slot is identified 1218 * by having no name assigned. 1219 */ 1220 static int cpuhp_reserve_state(enum cpuhp_state state) 1221 { 1222 enum cpuhp_state i, end; 1223 struct cpuhp_step *step; 1224 1225 switch (state) { 1226 case CPUHP_AP_ONLINE_DYN: 1227 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN; 1228 end = CPUHP_AP_ONLINE_DYN_END; 1229 break; 1230 case CPUHP_BP_PREPARE_DYN: 1231 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN; 1232 end = CPUHP_BP_PREPARE_DYN_END; 1233 break; 1234 default: 1235 return -EINVAL; 1236 } 1237 1238 for (i = state; i <= end; i++, step++) { 1239 if (!step->name) 1240 return i; 1241 } 1242 WARN(1, "No more dynamic states available for CPU hotplug\n"); 1243 return -ENOSPC; 1244 } 1245 1246 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, 1247 int (*startup)(unsigned int cpu), 1248 int (*teardown)(unsigned int cpu), 1249 bool multi_instance) 1250 { 1251 /* (Un)Install the callbacks for further cpu hotplug operations */ 1252 struct cpuhp_step *sp; 1253 int ret = 0; 1254 1255 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) { 1256 ret = cpuhp_reserve_state(state); 1257 if (ret < 0) 1258 return ret; 1259 state = ret; 1260 } 1261 sp = cpuhp_get_step(state); 1262 if (name && sp->name) 1263 return -EBUSY; 1264 1265 sp->startup.single = startup; 1266 sp->teardown.single = teardown; 1267 sp->name = name; 1268 sp->multi_instance = multi_instance; 1269 INIT_HLIST_HEAD(&sp->list); 1270 return ret; 1271 } 1272 1273 static void *cpuhp_get_teardown_cb(enum cpuhp_state state) 1274 { 1275 return cpuhp_get_step(state)->teardown.single; 1276 } 1277 1278 /* 1279 * Call the startup/teardown function for a step either on the AP or 1280 * on the current CPU. 1281 */ 1282 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, 1283 struct hlist_node *node) 1284 { 1285 struct cpuhp_step *sp = cpuhp_get_step(state); 1286 int ret; 1287 1288 if ((bringup && !sp->startup.single) || 1289 (!bringup && !sp->teardown.single)) 1290 return 0; 1291 /* 1292 * The non AP bound callbacks can fail on bringup. On teardown 1293 * e.g. module removal we crash for now. 1294 */ 1295 #ifdef CONFIG_SMP 1296 if (cpuhp_is_ap_state(state)) 1297 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); 1298 else 1299 ret = cpuhp_invoke_callback(cpu, state, bringup, node); 1300 #else 1301 ret = cpuhp_invoke_callback(cpu, state, bringup, node); 1302 #endif 1303 BUG_ON(ret && !bringup); 1304 return ret; 1305 } 1306 1307 /* 1308 * Called from __cpuhp_setup_state on a recoverable failure. 1309 * 1310 * Note: The teardown callbacks for rollback are not allowed to fail! 1311 */ 1312 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, 1313 struct hlist_node *node) 1314 { 1315 int cpu; 1316 1317 /* Roll back the already executed steps on the other cpus */ 1318 for_each_present_cpu(cpu) { 1319 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1320 int cpustate = st->state; 1321 1322 if (cpu >= failedcpu) 1323 break; 1324 1325 /* Did we invoke the startup call on that cpu ? */ 1326 if (cpustate >= state) 1327 cpuhp_issue_call(cpu, state, false, node); 1328 } 1329 } 1330 1331 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, 1332 struct hlist_node *node, 1333 bool invoke) 1334 { 1335 struct cpuhp_step *sp; 1336 int cpu; 1337 int ret; 1338 1339 lockdep_assert_cpus_held(); 1340 1341 sp = cpuhp_get_step(state); 1342 if (sp->multi_instance == false) 1343 return -EINVAL; 1344 1345 mutex_lock(&cpuhp_state_mutex); 1346 1347 if (!invoke || !sp->startup.multi) 1348 goto add_node; 1349 1350 /* 1351 * Try to call the startup callback for each present cpu 1352 * depending on the hotplug state of the cpu. 1353 */ 1354 for_each_present_cpu(cpu) { 1355 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1356 int cpustate = st->state; 1357 1358 if (cpustate < state) 1359 continue; 1360 1361 ret = cpuhp_issue_call(cpu, state, true, node); 1362 if (ret) { 1363 if (sp->teardown.multi) 1364 cpuhp_rollback_install(cpu, state, node); 1365 goto unlock; 1366 } 1367 } 1368 add_node: 1369 ret = 0; 1370 hlist_add_head(node, &sp->list); 1371 unlock: 1372 mutex_unlock(&cpuhp_state_mutex); 1373 return ret; 1374 } 1375 1376 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, 1377 bool invoke) 1378 { 1379 int ret; 1380 1381 cpus_read_lock(); 1382 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke); 1383 cpus_read_unlock(); 1384 return ret; 1385 } 1386 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); 1387 1388 /** 1389 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state 1390 * @state: The state to setup 1391 * @invoke: If true, the startup function is invoked for cpus where 1392 * cpu state >= @state 1393 * @startup: startup callback function 1394 * @teardown: teardown callback function 1395 * @multi_instance: State is set up for multiple instances which get 1396 * added afterwards. 1397 * 1398 * The caller needs to hold cpus read locked while calling this function. 1399 * Returns: 1400 * On success: 1401 * Positive state number if @state is CPUHP_AP_ONLINE_DYN 1402 * 0 for all other states 1403 * On failure: proper (negative) error code 1404 */ 1405 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, 1406 const char *name, bool invoke, 1407 int (*startup)(unsigned int cpu), 1408 int (*teardown)(unsigned int cpu), 1409 bool multi_instance) 1410 { 1411 int cpu, ret = 0; 1412 bool dynstate; 1413 1414 lockdep_assert_cpus_held(); 1415 1416 if (cpuhp_cb_check(state) || !name) 1417 return -EINVAL; 1418 1419 mutex_lock(&cpuhp_state_mutex); 1420 1421 ret = cpuhp_store_callbacks(state, name, startup, teardown, 1422 multi_instance); 1423 1424 dynstate = state == CPUHP_AP_ONLINE_DYN; 1425 if (ret > 0 && dynstate) { 1426 state = ret; 1427 ret = 0; 1428 } 1429 1430 if (ret || !invoke || !startup) 1431 goto out; 1432 1433 /* 1434 * Try to call the startup callback for each present cpu 1435 * depending on the hotplug state of the cpu. 1436 */ 1437 for_each_present_cpu(cpu) { 1438 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1439 int cpustate = st->state; 1440 1441 if (cpustate < state) 1442 continue; 1443 1444 ret = cpuhp_issue_call(cpu, state, true, NULL); 1445 if (ret) { 1446 if (teardown) 1447 cpuhp_rollback_install(cpu, state, NULL); 1448 cpuhp_store_callbacks(state, NULL, NULL, NULL, false); 1449 goto out; 1450 } 1451 } 1452 out: 1453 mutex_unlock(&cpuhp_state_mutex); 1454 /* 1455 * If the requested state is CPUHP_AP_ONLINE_DYN, return the 1456 * dynamically allocated state in case of success. 1457 */ 1458 if (!ret && dynstate) 1459 return state; 1460 return ret; 1461 } 1462 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked); 1463 1464 int __cpuhp_setup_state(enum cpuhp_state state, 1465 const char *name, bool invoke, 1466 int (*startup)(unsigned int cpu), 1467 int (*teardown)(unsigned int cpu), 1468 bool multi_instance) 1469 { 1470 int ret; 1471 1472 cpus_read_lock(); 1473 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup, 1474 teardown, multi_instance); 1475 cpus_read_unlock(); 1476 return ret; 1477 } 1478 EXPORT_SYMBOL(__cpuhp_setup_state); 1479 1480 int __cpuhp_state_remove_instance(enum cpuhp_state state, 1481 struct hlist_node *node, bool invoke) 1482 { 1483 struct cpuhp_step *sp = cpuhp_get_step(state); 1484 int cpu; 1485 1486 BUG_ON(cpuhp_cb_check(state)); 1487 1488 if (!sp->multi_instance) 1489 return -EINVAL; 1490 1491 cpus_read_lock(); 1492 mutex_lock(&cpuhp_state_mutex); 1493 1494 if (!invoke || !cpuhp_get_teardown_cb(state)) 1495 goto remove; 1496 /* 1497 * Call the teardown callback for each present cpu depending 1498 * on the hotplug state of the cpu. This function is not 1499 * allowed to fail currently! 1500 */ 1501 for_each_present_cpu(cpu) { 1502 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1503 int cpustate = st->state; 1504 1505 if (cpustate >= state) 1506 cpuhp_issue_call(cpu, state, false, node); 1507 } 1508 1509 remove: 1510 hlist_del(node); 1511 mutex_unlock(&cpuhp_state_mutex); 1512 cpus_read_unlock(); 1513 1514 return 0; 1515 } 1516 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); 1517 1518 /** 1519 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state 1520 * @state: The state to remove 1521 * @invoke: If true, the teardown function is invoked for cpus where 1522 * cpu state >= @state 1523 * 1524 * The caller needs to hold cpus read locked while calling this function. 1525 * The teardown callback is currently not allowed to fail. Think 1526 * about module removal! 1527 */ 1528 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke) 1529 { 1530 struct cpuhp_step *sp = cpuhp_get_step(state); 1531 int cpu; 1532 1533 BUG_ON(cpuhp_cb_check(state)); 1534 1535 lockdep_assert_cpus_held(); 1536 1537 mutex_lock(&cpuhp_state_mutex); 1538 if (sp->multi_instance) { 1539 WARN(!hlist_empty(&sp->list), 1540 "Error: Removing state %d which has instances left.\n", 1541 state); 1542 goto remove; 1543 } 1544 1545 if (!invoke || !cpuhp_get_teardown_cb(state)) 1546 goto remove; 1547 1548 /* 1549 * Call the teardown callback for each present cpu depending 1550 * on the hotplug state of the cpu. This function is not 1551 * allowed to fail currently! 1552 */ 1553 for_each_present_cpu(cpu) { 1554 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1555 int cpustate = st->state; 1556 1557 if (cpustate >= state) 1558 cpuhp_issue_call(cpu, state, false, NULL); 1559 } 1560 remove: 1561 cpuhp_store_callbacks(state, NULL, NULL, NULL, false); 1562 mutex_unlock(&cpuhp_state_mutex); 1563 } 1564 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked); 1565 1566 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) 1567 { 1568 cpus_read_lock(); 1569 __cpuhp_remove_state_cpuslocked(state, invoke); 1570 cpus_read_unlock(); 1571 } 1572 EXPORT_SYMBOL(__cpuhp_remove_state); 1573 1574 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) 1575 static ssize_t show_cpuhp_state(struct device *dev, 1576 struct device_attribute *attr, char *buf) 1577 { 1578 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 1579 1580 return sprintf(buf, "%d\n", st->state); 1581 } 1582 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL); 1583 1584 static ssize_t write_cpuhp_target(struct device *dev, 1585 struct device_attribute *attr, 1586 const char *buf, size_t count) 1587 { 1588 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 1589 struct cpuhp_step *sp; 1590 int target, ret; 1591 1592 ret = kstrtoint(buf, 10, &target); 1593 if (ret) 1594 return ret; 1595 1596 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL 1597 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) 1598 return -EINVAL; 1599 #else 1600 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) 1601 return -EINVAL; 1602 #endif 1603 1604 ret = lock_device_hotplug_sysfs(); 1605 if (ret) 1606 return ret; 1607 1608 mutex_lock(&cpuhp_state_mutex); 1609 sp = cpuhp_get_step(target); 1610 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; 1611 mutex_unlock(&cpuhp_state_mutex); 1612 if (ret) 1613 goto out; 1614 1615 if (st->state < target) 1616 ret = do_cpu_up(dev->id, target); 1617 else 1618 ret = do_cpu_down(dev->id, target); 1619 out: 1620 unlock_device_hotplug(); 1621 return ret ? ret : count; 1622 } 1623 1624 static ssize_t show_cpuhp_target(struct device *dev, 1625 struct device_attribute *attr, char *buf) 1626 { 1627 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 1628 1629 return sprintf(buf, "%d\n", st->target); 1630 } 1631 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); 1632 1633 static struct attribute *cpuhp_cpu_attrs[] = { 1634 &dev_attr_state.attr, 1635 &dev_attr_target.attr, 1636 NULL 1637 }; 1638 1639 static const struct attribute_group cpuhp_cpu_attr_group = { 1640 .attrs = cpuhp_cpu_attrs, 1641 .name = "hotplug", 1642 NULL 1643 }; 1644 1645 static ssize_t show_cpuhp_states(struct device *dev, 1646 struct device_attribute *attr, char *buf) 1647 { 1648 ssize_t cur, res = 0; 1649 int i; 1650 1651 mutex_lock(&cpuhp_state_mutex); 1652 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) { 1653 struct cpuhp_step *sp = cpuhp_get_step(i); 1654 1655 if (sp->name) { 1656 cur = sprintf(buf, "%3d: %s\n", i, sp->name); 1657 buf += cur; 1658 res += cur; 1659 } 1660 } 1661 mutex_unlock(&cpuhp_state_mutex); 1662 return res; 1663 } 1664 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL); 1665 1666 static struct attribute *cpuhp_cpu_root_attrs[] = { 1667 &dev_attr_states.attr, 1668 NULL 1669 }; 1670 1671 static const struct attribute_group cpuhp_cpu_root_attr_group = { 1672 .attrs = cpuhp_cpu_root_attrs, 1673 .name = "hotplug", 1674 NULL 1675 }; 1676 1677 static int __init cpuhp_sysfs_init(void) 1678 { 1679 int cpu, ret; 1680 1681 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, 1682 &cpuhp_cpu_root_attr_group); 1683 if (ret) 1684 return ret; 1685 1686 for_each_possible_cpu(cpu) { 1687 struct device *dev = get_cpu_device(cpu); 1688 1689 if (!dev) 1690 continue; 1691 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); 1692 if (ret) 1693 return ret; 1694 } 1695 return 0; 1696 } 1697 device_initcall(cpuhp_sysfs_init); 1698 #endif 1699 1700 /* 1701 * cpu_bit_bitmap[] is a special, "compressed" data structure that 1702 * represents all NR_CPUS bits binary values of 1<<nr. 1703 * 1704 * It is used by cpumask_of() to get a constant address to a CPU 1705 * mask value that has a single bit set only. 1706 */ 1707 1708 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 1709 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 1710 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 1711 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 1712 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 1713 1714 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 1715 1716 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 1717 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 1718 #if BITS_PER_LONG > 32 1719 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 1720 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 1721 #endif 1722 }; 1723 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 1724 1725 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 1726 EXPORT_SYMBOL(cpu_all_bits); 1727 1728 #ifdef CONFIG_INIT_ALL_POSSIBLE 1729 struct cpumask __cpu_possible_mask __read_mostly 1730 = {CPU_BITS_ALL}; 1731 #else 1732 struct cpumask __cpu_possible_mask __read_mostly; 1733 #endif 1734 EXPORT_SYMBOL(__cpu_possible_mask); 1735 1736 struct cpumask __cpu_online_mask __read_mostly; 1737 EXPORT_SYMBOL(__cpu_online_mask); 1738 1739 struct cpumask __cpu_present_mask __read_mostly; 1740 EXPORT_SYMBOL(__cpu_present_mask); 1741 1742 struct cpumask __cpu_active_mask __read_mostly; 1743 EXPORT_SYMBOL(__cpu_active_mask); 1744 1745 void init_cpu_present(const struct cpumask *src) 1746 { 1747 cpumask_copy(&__cpu_present_mask, src); 1748 } 1749 1750 void init_cpu_possible(const struct cpumask *src) 1751 { 1752 cpumask_copy(&__cpu_possible_mask, src); 1753 } 1754 1755 void init_cpu_online(const struct cpumask *src) 1756 { 1757 cpumask_copy(&__cpu_online_mask, src); 1758 } 1759 1760 /* 1761 * Activate the first processor. 1762 */ 1763 void __init boot_cpu_init(void) 1764 { 1765 int cpu = smp_processor_id(); 1766 1767 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 1768 set_cpu_online(cpu, true); 1769 set_cpu_active(cpu, true); 1770 set_cpu_present(cpu, true); 1771 set_cpu_possible(cpu, true); 1772 1773 #ifdef CONFIG_SMP 1774 __boot_cpu_id = cpu; 1775 #endif 1776 } 1777 1778 /* 1779 * Must be called _AFTER_ setting up the per_cpu areas 1780 */ 1781 void __init boot_cpu_state_init(void) 1782 { 1783 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE; 1784 } 1785