1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/sched/mm.h> 7 #include <linux/proc_fs.h> 8 #include <linux/smp.h> 9 #include <linux/init.h> 10 #include <linux/notifier.h> 11 #include <linux/sched/signal.h> 12 #include <linux/sched/hotplug.h> 13 #include <linux/sched/isolation.h> 14 #include <linux/sched/task.h> 15 #include <linux/sched/smt.h> 16 #include <linux/unistd.h> 17 #include <linux/cpu.h> 18 #include <linux/oom.h> 19 #include <linux/rcupdate.h> 20 #include <linux/export.h> 21 #include <linux/bug.h> 22 #include <linux/kthread.h> 23 #include <linux/stop_machine.h> 24 #include <linux/mutex.h> 25 #include <linux/gfp.h> 26 #include <linux/suspend.h> 27 #include <linux/lockdep.h> 28 #include <linux/tick.h> 29 #include <linux/irq.h> 30 #include <linux/nmi.h> 31 #include <linux/smpboot.h> 32 #include <linux/relay.h> 33 #include <linux/slab.h> 34 #include <linux/scs.h> 35 #include <linux/percpu-rwsem.h> 36 #include <linux/cpuset.h> 37 #include <linux/random.h> 38 39 #include <trace/events/power.h> 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/cpuhp.h> 42 43 #include "smpboot.h" 44 45 /** 46 * struct cpuhp_cpu_state - Per cpu hotplug state storage 47 * @state: The current cpu state 48 * @target: The target state 49 * @fail: Current CPU hotplug callback state 50 * @thread: Pointer to the hotplug thread 51 * @should_run: Thread should execute 52 * @rollback: Perform a rollback 53 * @single: Single callback invocation 54 * @bringup: Single callback bringup or teardown selector 55 * @cpu: CPU number 56 * @node: Remote CPU node; for multi-instance, do a 57 * single entry callback for install/remove 58 * @last: For multi-instance rollback, remember how far we got 59 * @cb_state: The state for a single callback (install/uninstall) 60 * @result: Result of the operation 61 * @done_up: Signal completion to the issuer of the task for cpu-up 62 * @done_down: Signal completion to the issuer of the task for cpu-down 63 */ 64 struct cpuhp_cpu_state { 65 enum cpuhp_state state; 66 enum cpuhp_state target; 67 enum cpuhp_state fail; 68 #ifdef CONFIG_SMP 69 struct task_struct *thread; 70 bool should_run; 71 bool rollback; 72 bool single; 73 bool bringup; 74 struct hlist_node *node; 75 struct hlist_node *last; 76 enum cpuhp_state cb_state; 77 int result; 78 struct completion done_up; 79 struct completion done_down; 80 #endif 81 }; 82 83 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { 84 .fail = CPUHP_INVALID, 85 }; 86 87 #ifdef CONFIG_SMP 88 cpumask_t cpus_booted_once_mask; 89 #endif 90 91 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) 92 static struct lockdep_map cpuhp_state_up_map = 93 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); 94 static struct lockdep_map cpuhp_state_down_map = 95 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map); 96 97 98 static inline void cpuhp_lock_acquire(bool bringup) 99 { 100 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); 101 } 102 103 static inline void cpuhp_lock_release(bool bringup) 104 { 105 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); 106 } 107 #else 108 109 static inline void cpuhp_lock_acquire(bool bringup) { } 110 static inline void cpuhp_lock_release(bool bringup) { } 111 112 #endif 113 114 /** 115 * struct cpuhp_step - Hotplug state machine step 116 * @name: Name of the step 117 * @startup: Startup function of the step 118 * @teardown: Teardown function of the step 119 * @cant_stop: Bringup/teardown can't be stopped at this step 120 * @multi_instance: State has multiple instances which get added afterwards 121 */ 122 struct cpuhp_step { 123 const char *name; 124 union { 125 int (*single)(unsigned int cpu); 126 int (*multi)(unsigned int cpu, 127 struct hlist_node *node); 128 } startup; 129 union { 130 int (*single)(unsigned int cpu); 131 int (*multi)(unsigned int cpu, 132 struct hlist_node *node); 133 } teardown; 134 /* private: */ 135 struct hlist_head list; 136 /* public: */ 137 bool cant_stop; 138 bool multi_instance; 139 }; 140 141 static DEFINE_MUTEX(cpuhp_state_mutex); 142 static struct cpuhp_step cpuhp_hp_states[]; 143 144 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) 145 { 146 return cpuhp_hp_states + state; 147 } 148 149 static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step) 150 { 151 return bringup ? !step->startup.single : !step->teardown.single; 152 } 153 154 /** 155 * cpuhp_invoke_callback - Invoke the callbacks for a given state 156 * @cpu: The cpu for which the callback should be invoked 157 * @state: The state to do callbacks for 158 * @bringup: True if the bringup callback should be invoked 159 * @node: For multi-instance, do a single entry callback for install/remove 160 * @lastp: For multi-instance rollback, remember how far we got 161 * 162 * Called from cpu hotplug and from the state register machinery. 163 * 164 * Return: %0 on success or a negative errno code 165 */ 166 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, 167 bool bringup, struct hlist_node *node, 168 struct hlist_node **lastp) 169 { 170 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 171 struct cpuhp_step *step = cpuhp_get_step(state); 172 int (*cbm)(unsigned int cpu, struct hlist_node *node); 173 int (*cb)(unsigned int cpu); 174 int ret, cnt; 175 176 if (st->fail == state) { 177 st->fail = CPUHP_INVALID; 178 return -EAGAIN; 179 } 180 181 if (cpuhp_step_empty(bringup, step)) { 182 WARN_ON_ONCE(1); 183 return 0; 184 } 185 186 if (!step->multi_instance) { 187 WARN_ON_ONCE(lastp && *lastp); 188 cb = bringup ? step->startup.single : step->teardown.single; 189 190 trace_cpuhp_enter(cpu, st->target, state, cb); 191 ret = cb(cpu); 192 trace_cpuhp_exit(cpu, st->state, state, ret); 193 return ret; 194 } 195 cbm = bringup ? step->startup.multi : step->teardown.multi; 196 197 /* Single invocation for instance add/remove */ 198 if (node) { 199 WARN_ON_ONCE(lastp && *lastp); 200 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 201 ret = cbm(cpu, node); 202 trace_cpuhp_exit(cpu, st->state, state, ret); 203 return ret; 204 } 205 206 /* State transition. Invoke on all instances */ 207 cnt = 0; 208 hlist_for_each(node, &step->list) { 209 if (lastp && node == *lastp) 210 break; 211 212 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 213 ret = cbm(cpu, node); 214 trace_cpuhp_exit(cpu, st->state, state, ret); 215 if (ret) { 216 if (!lastp) 217 goto err; 218 219 *lastp = node; 220 return ret; 221 } 222 cnt++; 223 } 224 if (lastp) 225 *lastp = NULL; 226 return 0; 227 err: 228 /* Rollback the instances if one failed */ 229 cbm = !bringup ? step->startup.multi : step->teardown.multi; 230 if (!cbm) 231 return ret; 232 233 hlist_for_each(node, &step->list) { 234 if (!cnt--) 235 break; 236 237 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 238 ret = cbm(cpu, node); 239 trace_cpuhp_exit(cpu, st->state, state, ret); 240 /* 241 * Rollback must not fail, 242 */ 243 WARN_ON_ONCE(ret); 244 } 245 return ret; 246 } 247 248 #ifdef CONFIG_SMP 249 static bool cpuhp_is_ap_state(enum cpuhp_state state) 250 { 251 /* 252 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation 253 * purposes as that state is handled explicitly in cpu_down. 254 */ 255 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; 256 } 257 258 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup) 259 { 260 struct completion *done = bringup ? &st->done_up : &st->done_down; 261 wait_for_completion(done); 262 } 263 264 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) 265 { 266 struct completion *done = bringup ? &st->done_up : &st->done_down; 267 complete(done); 268 } 269 270 /* 271 * The former STARTING/DYING states, ran with IRQs disabled and must not fail. 272 */ 273 static bool cpuhp_is_atomic_state(enum cpuhp_state state) 274 { 275 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; 276 } 277 278 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 279 static DEFINE_MUTEX(cpu_add_remove_lock); 280 bool cpuhp_tasks_frozen; 281 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); 282 283 /* 284 * The following two APIs (cpu_maps_update_begin/done) must be used when 285 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. 286 */ 287 void cpu_maps_update_begin(void) 288 { 289 mutex_lock(&cpu_add_remove_lock); 290 } 291 292 void cpu_maps_update_done(void) 293 { 294 mutex_unlock(&cpu_add_remove_lock); 295 } 296 297 /* 298 * If set, cpu_up and cpu_down will return -EBUSY and do nothing. 299 * Should always be manipulated under cpu_add_remove_lock 300 */ 301 static int cpu_hotplug_disabled; 302 303 #ifdef CONFIG_HOTPLUG_CPU 304 305 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); 306 307 void cpus_read_lock(void) 308 { 309 percpu_down_read(&cpu_hotplug_lock); 310 } 311 EXPORT_SYMBOL_GPL(cpus_read_lock); 312 313 int cpus_read_trylock(void) 314 { 315 return percpu_down_read_trylock(&cpu_hotplug_lock); 316 } 317 EXPORT_SYMBOL_GPL(cpus_read_trylock); 318 319 void cpus_read_unlock(void) 320 { 321 percpu_up_read(&cpu_hotplug_lock); 322 } 323 EXPORT_SYMBOL_GPL(cpus_read_unlock); 324 325 void cpus_write_lock(void) 326 { 327 percpu_down_write(&cpu_hotplug_lock); 328 } 329 330 void cpus_write_unlock(void) 331 { 332 percpu_up_write(&cpu_hotplug_lock); 333 } 334 335 void lockdep_assert_cpus_held(void) 336 { 337 /* 338 * We can't have hotplug operations before userspace starts running, 339 * and some init codepaths will knowingly not take the hotplug lock. 340 * This is all valid, so mute lockdep until it makes sense to report 341 * unheld locks. 342 */ 343 if (system_state < SYSTEM_RUNNING) 344 return; 345 346 percpu_rwsem_assert_held(&cpu_hotplug_lock); 347 } 348 349 #ifdef CONFIG_LOCKDEP 350 int lockdep_is_cpus_held(void) 351 { 352 return percpu_rwsem_is_held(&cpu_hotplug_lock); 353 } 354 #endif 355 356 static void lockdep_acquire_cpus_lock(void) 357 { 358 rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_); 359 } 360 361 static void lockdep_release_cpus_lock(void) 362 { 363 rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_); 364 } 365 366 /* 367 * Wait for currently running CPU hotplug operations to complete (if any) and 368 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects 369 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the 370 * hotplug path before performing hotplug operations. So acquiring that lock 371 * guarantees mutual exclusion from any currently running hotplug operations. 372 */ 373 void cpu_hotplug_disable(void) 374 { 375 cpu_maps_update_begin(); 376 cpu_hotplug_disabled++; 377 cpu_maps_update_done(); 378 } 379 EXPORT_SYMBOL_GPL(cpu_hotplug_disable); 380 381 static void __cpu_hotplug_enable(void) 382 { 383 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) 384 return; 385 cpu_hotplug_disabled--; 386 } 387 388 void cpu_hotplug_enable(void) 389 { 390 cpu_maps_update_begin(); 391 __cpu_hotplug_enable(); 392 cpu_maps_update_done(); 393 } 394 EXPORT_SYMBOL_GPL(cpu_hotplug_enable); 395 396 #else 397 398 static void lockdep_acquire_cpus_lock(void) 399 { 400 } 401 402 static void lockdep_release_cpus_lock(void) 403 { 404 } 405 406 #endif /* CONFIG_HOTPLUG_CPU */ 407 408 /* 409 * Architectures that need SMT-specific errata handling during SMT hotplug 410 * should override this. 411 */ 412 void __weak arch_smt_update(void) { } 413 414 #ifdef CONFIG_HOTPLUG_SMT 415 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 416 417 void __init cpu_smt_disable(bool force) 418 { 419 if (!cpu_smt_possible()) 420 return; 421 422 if (force) { 423 pr_info("SMT: Force disabled\n"); 424 cpu_smt_control = CPU_SMT_FORCE_DISABLED; 425 } else { 426 pr_info("SMT: disabled\n"); 427 cpu_smt_control = CPU_SMT_DISABLED; 428 } 429 } 430 431 /* 432 * The decision whether SMT is supported can only be done after the full 433 * CPU identification. Called from architecture code. 434 */ 435 void __init cpu_smt_check_topology(void) 436 { 437 if (!topology_smt_supported()) 438 cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 439 } 440 441 static int __init smt_cmdline_disable(char *str) 442 { 443 cpu_smt_disable(str && !strcmp(str, "force")); 444 return 0; 445 } 446 early_param("nosmt", smt_cmdline_disable); 447 448 static inline bool cpu_smt_allowed(unsigned int cpu) 449 { 450 if (cpu_smt_control == CPU_SMT_ENABLED) 451 return true; 452 453 if (topology_is_primary_thread(cpu)) 454 return true; 455 456 /* 457 * On x86 it's required to boot all logical CPUs at least once so 458 * that the init code can get a chance to set CR4.MCE on each 459 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any 460 * core will shutdown the machine. 461 */ 462 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask); 463 } 464 465 /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */ 466 bool cpu_smt_possible(void) 467 { 468 return cpu_smt_control != CPU_SMT_FORCE_DISABLED && 469 cpu_smt_control != CPU_SMT_NOT_SUPPORTED; 470 } 471 EXPORT_SYMBOL_GPL(cpu_smt_possible); 472 #else 473 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } 474 #endif 475 476 static inline enum cpuhp_state 477 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) 478 { 479 enum cpuhp_state prev_state = st->state; 480 bool bringup = st->state < target; 481 482 st->rollback = false; 483 st->last = NULL; 484 485 st->target = target; 486 st->single = false; 487 st->bringup = bringup; 488 if (cpu_dying(cpu) != !bringup) 489 set_cpu_dying(cpu, !bringup); 490 491 return prev_state; 492 } 493 494 static inline void 495 cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st, 496 enum cpuhp_state prev_state) 497 { 498 bool bringup = !st->bringup; 499 500 st->target = prev_state; 501 502 /* 503 * Already rolling back. No need invert the bringup value or to change 504 * the current state. 505 */ 506 if (st->rollback) 507 return; 508 509 st->rollback = true; 510 511 /* 512 * If we have st->last we need to undo partial multi_instance of this 513 * state first. Otherwise start undo at the previous state. 514 */ 515 if (!st->last) { 516 if (st->bringup) 517 st->state--; 518 else 519 st->state++; 520 } 521 522 st->bringup = bringup; 523 if (cpu_dying(cpu) != !bringup) 524 set_cpu_dying(cpu, !bringup); 525 } 526 527 /* Regular hotplug invocation of the AP hotplug thread */ 528 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st) 529 { 530 if (!st->single && st->state == st->target) 531 return; 532 533 st->result = 0; 534 /* 535 * Make sure the above stores are visible before should_run becomes 536 * true. Paired with the mb() above in cpuhp_thread_fun() 537 */ 538 smp_mb(); 539 st->should_run = true; 540 wake_up_process(st->thread); 541 wait_for_ap_thread(st, st->bringup); 542 } 543 544 static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st, 545 enum cpuhp_state target) 546 { 547 enum cpuhp_state prev_state; 548 int ret; 549 550 prev_state = cpuhp_set_state(cpu, st, target); 551 __cpuhp_kick_ap(st); 552 if ((ret = st->result)) { 553 cpuhp_reset_state(cpu, st, prev_state); 554 __cpuhp_kick_ap(st); 555 } 556 557 return ret; 558 } 559 560 static int bringup_wait_for_ap(unsigned int cpu) 561 { 562 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 563 564 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ 565 wait_for_ap_thread(st, true); 566 if (WARN_ON_ONCE((!cpu_online(cpu)))) 567 return -ECANCELED; 568 569 /* Unpark the hotplug thread of the target cpu */ 570 kthread_unpark(st->thread); 571 572 /* 573 * SMT soft disabling on X86 requires to bring the CPU out of the 574 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The 575 * CPU marked itself as booted_once in notify_cpu_starting() so the 576 * cpu_smt_allowed() check will now return false if this is not the 577 * primary sibling. 578 */ 579 if (!cpu_smt_allowed(cpu)) 580 return -ECANCELED; 581 582 if (st->target <= CPUHP_AP_ONLINE_IDLE) 583 return 0; 584 585 return cpuhp_kick_ap(cpu, st, st->target); 586 } 587 588 static int bringup_cpu(unsigned int cpu) 589 { 590 struct task_struct *idle = idle_thread_get(cpu); 591 int ret; 592 593 /* 594 * Reset stale stack state from the last time this CPU was online. 595 */ 596 scs_task_reset(idle); 597 kasan_unpoison_task_stack(idle); 598 599 /* 600 * Some architectures have to walk the irq descriptors to 601 * setup the vector space for the cpu which comes online. 602 * Prevent irq alloc/free across the bringup. 603 */ 604 irq_lock_sparse(); 605 606 /* Arch-specific enabling code. */ 607 ret = __cpu_up(cpu, idle); 608 irq_unlock_sparse(); 609 if (ret) 610 return ret; 611 return bringup_wait_for_ap(cpu); 612 } 613 614 static int finish_cpu(unsigned int cpu) 615 { 616 struct task_struct *idle = idle_thread_get(cpu); 617 struct mm_struct *mm = idle->active_mm; 618 619 /* 620 * idle_task_exit() will have switched to &init_mm, now 621 * clean up any remaining active_mm state. 622 */ 623 if (mm != &init_mm) 624 idle->active_mm = &init_mm; 625 mmdrop(mm); 626 return 0; 627 } 628 629 /* 630 * Hotplug state machine related functions 631 */ 632 633 /* 634 * Get the next state to run. Empty ones will be skipped. Returns true if a 635 * state must be run. 636 * 637 * st->state will be modified ahead of time, to match state_to_run, as if it 638 * has already ran. 639 */ 640 static bool cpuhp_next_state(bool bringup, 641 enum cpuhp_state *state_to_run, 642 struct cpuhp_cpu_state *st, 643 enum cpuhp_state target) 644 { 645 do { 646 if (bringup) { 647 if (st->state >= target) 648 return false; 649 650 *state_to_run = ++st->state; 651 } else { 652 if (st->state <= target) 653 return false; 654 655 *state_to_run = st->state--; 656 } 657 658 if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run))) 659 break; 660 } while (true); 661 662 return true; 663 } 664 665 static int cpuhp_invoke_callback_range(bool bringup, 666 unsigned int cpu, 667 struct cpuhp_cpu_state *st, 668 enum cpuhp_state target) 669 { 670 enum cpuhp_state state; 671 int err = 0; 672 673 while (cpuhp_next_state(bringup, &state, st, target)) { 674 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL); 675 if (err) 676 break; 677 } 678 679 return err; 680 } 681 682 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) 683 { 684 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) 685 return true; 686 /* 687 * When CPU hotplug is disabled, then taking the CPU down is not 688 * possible because takedown_cpu() and the architecture and 689 * subsystem specific mechanisms are not available. So the CPU 690 * which would be completely unplugged again needs to stay around 691 * in the current state. 692 */ 693 return st->state <= CPUHP_BRINGUP_CPU; 694 } 695 696 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 697 enum cpuhp_state target) 698 { 699 enum cpuhp_state prev_state = st->state; 700 int ret = 0; 701 702 ret = cpuhp_invoke_callback_range(true, cpu, st, target); 703 if (ret) { 704 pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n", 705 ret, cpu, cpuhp_get_step(st->state)->name, 706 st->state); 707 708 cpuhp_reset_state(cpu, st, prev_state); 709 if (can_rollback_cpu(st)) 710 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, 711 prev_state)); 712 } 713 return ret; 714 } 715 716 /* 717 * The cpu hotplug threads manage the bringup and teardown of the cpus 718 */ 719 static int cpuhp_should_run(unsigned int cpu) 720 { 721 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 722 723 return st->should_run; 724 } 725 726 /* 727 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke 728 * callbacks when a state gets [un]installed at runtime. 729 * 730 * Each invocation of this function by the smpboot thread does a single AP 731 * state callback. 732 * 733 * It has 3 modes of operation: 734 * - single: runs st->cb_state 735 * - up: runs ++st->state, while st->state < st->target 736 * - down: runs st->state--, while st->state > st->target 737 * 738 * When complete or on error, should_run is cleared and the completion is fired. 739 */ 740 static void cpuhp_thread_fun(unsigned int cpu) 741 { 742 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 743 bool bringup = st->bringup; 744 enum cpuhp_state state; 745 746 if (WARN_ON_ONCE(!st->should_run)) 747 return; 748 749 /* 750 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures 751 * that if we see ->should_run we also see the rest of the state. 752 */ 753 smp_mb(); 754 755 /* 756 * The BP holds the hotplug lock, but we're now running on the AP, 757 * ensure that anybody asserting the lock is held, will actually find 758 * it so. 759 */ 760 lockdep_acquire_cpus_lock(); 761 cpuhp_lock_acquire(bringup); 762 763 if (st->single) { 764 state = st->cb_state; 765 st->should_run = false; 766 } else { 767 st->should_run = cpuhp_next_state(bringup, &state, st, st->target); 768 if (!st->should_run) 769 goto end; 770 } 771 772 WARN_ON_ONCE(!cpuhp_is_ap_state(state)); 773 774 if (cpuhp_is_atomic_state(state)) { 775 local_irq_disable(); 776 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); 777 local_irq_enable(); 778 779 /* 780 * STARTING/DYING must not fail! 781 */ 782 WARN_ON_ONCE(st->result); 783 } else { 784 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); 785 } 786 787 if (st->result) { 788 /* 789 * If we fail on a rollback, we're up a creek without no 790 * paddle, no way forward, no way back. We loose, thanks for 791 * playing. 792 */ 793 WARN_ON_ONCE(st->rollback); 794 st->should_run = false; 795 } 796 797 end: 798 cpuhp_lock_release(bringup); 799 lockdep_release_cpus_lock(); 800 801 if (!st->should_run) 802 complete_ap_thread(st, bringup); 803 } 804 805 /* Invoke a single callback on a remote cpu */ 806 static int 807 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, 808 struct hlist_node *node) 809 { 810 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 811 int ret; 812 813 if (!cpu_online(cpu)) 814 return 0; 815 816 cpuhp_lock_acquire(false); 817 cpuhp_lock_release(false); 818 819 cpuhp_lock_acquire(true); 820 cpuhp_lock_release(true); 821 822 /* 823 * If we are up and running, use the hotplug thread. For early calls 824 * we invoke the thread function directly. 825 */ 826 if (!st->thread) 827 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); 828 829 st->rollback = false; 830 st->last = NULL; 831 832 st->node = node; 833 st->bringup = bringup; 834 st->cb_state = state; 835 st->single = true; 836 837 __cpuhp_kick_ap(st); 838 839 /* 840 * If we failed and did a partial, do a rollback. 841 */ 842 if ((ret = st->result) && st->last) { 843 st->rollback = true; 844 st->bringup = !bringup; 845 846 __cpuhp_kick_ap(st); 847 } 848 849 /* 850 * Clean up the leftovers so the next hotplug operation wont use stale 851 * data. 852 */ 853 st->node = st->last = NULL; 854 return ret; 855 } 856 857 static int cpuhp_kick_ap_work(unsigned int cpu) 858 { 859 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 860 enum cpuhp_state prev_state = st->state; 861 int ret; 862 863 cpuhp_lock_acquire(false); 864 cpuhp_lock_release(false); 865 866 cpuhp_lock_acquire(true); 867 cpuhp_lock_release(true); 868 869 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); 870 ret = cpuhp_kick_ap(cpu, st, st->target); 871 trace_cpuhp_exit(cpu, st->state, prev_state, ret); 872 873 return ret; 874 } 875 876 static struct smp_hotplug_thread cpuhp_threads = { 877 .store = &cpuhp_state.thread, 878 .thread_should_run = cpuhp_should_run, 879 .thread_fn = cpuhp_thread_fun, 880 .thread_comm = "cpuhp/%u", 881 .selfparking = true, 882 }; 883 884 static __init void cpuhp_init_state(void) 885 { 886 struct cpuhp_cpu_state *st; 887 int cpu; 888 889 for_each_possible_cpu(cpu) { 890 st = per_cpu_ptr(&cpuhp_state, cpu); 891 init_completion(&st->done_up); 892 init_completion(&st->done_down); 893 } 894 } 895 896 void __init cpuhp_threads_init(void) 897 { 898 cpuhp_init_state(); 899 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); 900 kthread_unpark(this_cpu_read(cpuhp_state.thread)); 901 } 902 903 /* 904 * 905 * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock 906 * protected region. 907 * 908 * The operation is still serialized against concurrent CPU hotplug via 909 * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_ 910 * serialized against other hotplug related activity like adding or 911 * removing of state callbacks and state instances, which invoke either the 912 * startup or the teardown callback of the affected state. 913 * 914 * This is required for subsystems which are unfixable vs. CPU hotplug and 915 * evade lock inversion problems by scheduling work which has to be 916 * completed _before_ cpu_up()/_cpu_down() returns. 917 * 918 * Don't even think about adding anything to this for any new code or even 919 * drivers. It's only purpose is to keep existing lock order trainwrecks 920 * working. 921 * 922 * For cpu_down() there might be valid reasons to finish cleanups which are 923 * not required to be done under cpu_hotplug_lock, but that's a different 924 * story and would be not invoked via this. 925 */ 926 static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen) 927 { 928 /* 929 * cpusets delegate hotplug operations to a worker to "solve" the 930 * lock order problems. Wait for the worker, but only if tasks are 931 * _not_ frozen (suspend, hibernate) as that would wait forever. 932 * 933 * The wait is required because otherwise the hotplug operation 934 * returns with inconsistent state, which could even be observed in 935 * user space when a new CPU is brought up. The CPU plug uevent 936 * would be delivered and user space reacting on it would fail to 937 * move tasks to the newly plugged CPU up to the point where the 938 * work has finished because up to that point the newly plugged CPU 939 * is not assignable in cpusets/cgroups. On unplug that's not 940 * necessarily a visible issue, but it is still inconsistent state, 941 * which is the real problem which needs to be "fixed". This can't 942 * prevent the transient state between scheduling the work and 943 * returning from waiting for it. 944 */ 945 if (!tasks_frozen) 946 cpuset_wait_for_hotplug(); 947 } 948 949 #ifdef CONFIG_HOTPLUG_CPU 950 #ifndef arch_clear_mm_cpumask_cpu 951 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) 952 #endif 953 954 /** 955 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 956 * @cpu: a CPU id 957 * 958 * This function walks all processes, finds a valid mm struct for each one and 959 * then clears a corresponding bit in mm's cpumask. While this all sounds 960 * trivial, there are various non-obvious corner cases, which this function 961 * tries to solve in a safe manner. 962 * 963 * Also note that the function uses a somewhat relaxed locking scheme, so it may 964 * be called only for an already offlined CPU. 965 */ 966 void clear_tasks_mm_cpumask(int cpu) 967 { 968 struct task_struct *p; 969 970 /* 971 * This function is called after the cpu is taken down and marked 972 * offline, so its not like new tasks will ever get this cpu set in 973 * their mm mask. -- Peter Zijlstra 974 * Thus, we may use rcu_read_lock() here, instead of grabbing 975 * full-fledged tasklist_lock. 976 */ 977 WARN_ON(cpu_online(cpu)); 978 rcu_read_lock(); 979 for_each_process(p) { 980 struct task_struct *t; 981 982 /* 983 * Main thread might exit, but other threads may still have 984 * a valid mm. Find one. 985 */ 986 t = find_lock_task_mm(p); 987 if (!t) 988 continue; 989 arch_clear_mm_cpumask_cpu(cpu, t->mm); 990 task_unlock(t); 991 } 992 rcu_read_unlock(); 993 } 994 995 /* Take this CPU down. */ 996 static int take_cpu_down(void *_param) 997 { 998 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 999 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); 1000 int err, cpu = smp_processor_id(); 1001 int ret; 1002 1003 /* Ensure this CPU doesn't handle any more interrupts. */ 1004 err = __cpu_disable(); 1005 if (err < 0) 1006 return err; 1007 1008 /* 1009 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going 1010 * down, that the current state is CPUHP_TEARDOWN_CPU - 1. 1011 */ 1012 WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1)); 1013 1014 /* Invoke the former CPU_DYING callbacks */ 1015 ret = cpuhp_invoke_callback_range(false, cpu, st, target); 1016 1017 /* 1018 * DYING must not fail! 1019 */ 1020 WARN_ON_ONCE(ret); 1021 1022 /* Give up timekeeping duties */ 1023 tick_handover_do_timer(); 1024 /* Remove CPU from timer broadcasting */ 1025 tick_offline_cpu(cpu); 1026 /* Park the stopper thread */ 1027 stop_machine_park(cpu); 1028 return 0; 1029 } 1030 1031 static int takedown_cpu(unsigned int cpu) 1032 { 1033 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1034 int err; 1035 1036 /* Park the smpboot threads */ 1037 kthread_park(st->thread); 1038 1039 /* 1040 * Prevent irq alloc/free while the dying cpu reorganizes the 1041 * interrupt affinities. 1042 */ 1043 irq_lock_sparse(); 1044 1045 /* 1046 * So now all preempt/rcu users must observe !cpu_active(). 1047 */ 1048 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); 1049 if (err) { 1050 /* CPU refused to die */ 1051 irq_unlock_sparse(); 1052 /* Unpark the hotplug thread so we can rollback there */ 1053 kthread_unpark(st->thread); 1054 return err; 1055 } 1056 BUG_ON(cpu_online(cpu)); 1057 1058 /* 1059 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed 1060 * all runnable tasks from the CPU, there's only the idle task left now 1061 * that the migration thread is done doing the stop_machine thing. 1062 * 1063 * Wait for the stop thread to go away. 1064 */ 1065 wait_for_ap_thread(st, false); 1066 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); 1067 1068 /* Interrupts are moved away from the dying cpu, reenable alloc/free */ 1069 irq_unlock_sparse(); 1070 1071 hotplug_cpu__broadcast_tick_pull(cpu); 1072 /* This actually kills the CPU. */ 1073 __cpu_die(cpu); 1074 1075 tick_cleanup_dead_cpu(cpu); 1076 rcutree_migrate_callbacks(cpu); 1077 return 0; 1078 } 1079 1080 static void cpuhp_complete_idle_dead(void *arg) 1081 { 1082 struct cpuhp_cpu_state *st = arg; 1083 1084 complete_ap_thread(st, false); 1085 } 1086 1087 void cpuhp_report_idle_dead(void) 1088 { 1089 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 1090 1091 BUG_ON(st->state != CPUHP_AP_OFFLINE); 1092 rcu_report_dead(smp_processor_id()); 1093 st->state = CPUHP_AP_IDLE_DEAD; 1094 /* 1095 * We cannot call complete after rcu_report_dead() so we delegate it 1096 * to an online cpu. 1097 */ 1098 smp_call_function_single(cpumask_first(cpu_online_mask), 1099 cpuhp_complete_idle_dead, st, 0); 1100 } 1101 1102 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 1103 enum cpuhp_state target) 1104 { 1105 enum cpuhp_state prev_state = st->state; 1106 int ret = 0; 1107 1108 ret = cpuhp_invoke_callback_range(false, cpu, st, target); 1109 if (ret) { 1110 pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n", 1111 ret, cpu, cpuhp_get_step(st->state)->name, 1112 st->state); 1113 1114 cpuhp_reset_state(cpu, st, prev_state); 1115 1116 if (st->state < prev_state) 1117 WARN_ON(cpuhp_invoke_callback_range(true, cpu, st, 1118 prev_state)); 1119 } 1120 1121 return ret; 1122 } 1123 1124 /* Requires cpu_add_remove_lock to be held */ 1125 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, 1126 enum cpuhp_state target) 1127 { 1128 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1129 int prev_state, ret = 0; 1130 1131 if (num_online_cpus() == 1) 1132 return -EBUSY; 1133 1134 if (!cpu_present(cpu)) 1135 return -EINVAL; 1136 1137 cpus_write_lock(); 1138 1139 cpuhp_tasks_frozen = tasks_frozen; 1140 1141 prev_state = cpuhp_set_state(cpu, st, target); 1142 /* 1143 * If the current CPU state is in the range of the AP hotplug thread, 1144 * then we need to kick the thread. 1145 */ 1146 if (st->state > CPUHP_TEARDOWN_CPU) { 1147 st->target = max((int)target, CPUHP_TEARDOWN_CPU); 1148 ret = cpuhp_kick_ap_work(cpu); 1149 /* 1150 * The AP side has done the error rollback already. Just 1151 * return the error code.. 1152 */ 1153 if (ret) 1154 goto out; 1155 1156 /* 1157 * We might have stopped still in the range of the AP hotplug 1158 * thread. Nothing to do anymore. 1159 */ 1160 if (st->state > CPUHP_TEARDOWN_CPU) 1161 goto out; 1162 1163 st->target = target; 1164 } 1165 /* 1166 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need 1167 * to do the further cleanups. 1168 */ 1169 ret = cpuhp_down_callbacks(cpu, st, target); 1170 if (ret && st->state < prev_state) { 1171 if (st->state == CPUHP_TEARDOWN_CPU) { 1172 cpuhp_reset_state(cpu, st, prev_state); 1173 __cpuhp_kick_ap(st); 1174 } else { 1175 WARN(1, "DEAD callback error for CPU%d", cpu); 1176 } 1177 } 1178 1179 out: 1180 cpus_write_unlock(); 1181 /* 1182 * Do post unplug cleanup. This is still protected against 1183 * concurrent CPU hotplug via cpu_add_remove_lock. 1184 */ 1185 lockup_detector_cleanup(); 1186 arch_smt_update(); 1187 cpu_up_down_serialize_trainwrecks(tasks_frozen); 1188 return ret; 1189 } 1190 1191 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) 1192 { 1193 if (cpu_hotplug_disabled) 1194 return -EBUSY; 1195 return _cpu_down(cpu, 0, target); 1196 } 1197 1198 static int cpu_down(unsigned int cpu, enum cpuhp_state target) 1199 { 1200 int err; 1201 1202 cpu_maps_update_begin(); 1203 err = cpu_down_maps_locked(cpu, target); 1204 cpu_maps_update_done(); 1205 return err; 1206 } 1207 1208 /** 1209 * cpu_device_down - Bring down a cpu device 1210 * @dev: Pointer to the cpu device to offline 1211 * 1212 * This function is meant to be used by device core cpu subsystem only. 1213 * 1214 * Other subsystems should use remove_cpu() instead. 1215 * 1216 * Return: %0 on success or a negative errno code 1217 */ 1218 int cpu_device_down(struct device *dev) 1219 { 1220 return cpu_down(dev->id, CPUHP_OFFLINE); 1221 } 1222 1223 int remove_cpu(unsigned int cpu) 1224 { 1225 int ret; 1226 1227 lock_device_hotplug(); 1228 ret = device_offline(get_cpu_device(cpu)); 1229 unlock_device_hotplug(); 1230 1231 return ret; 1232 } 1233 EXPORT_SYMBOL_GPL(remove_cpu); 1234 1235 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) 1236 { 1237 unsigned int cpu; 1238 int error; 1239 1240 cpu_maps_update_begin(); 1241 1242 /* 1243 * Make certain the cpu I'm about to reboot on is online. 1244 * 1245 * This is inline to what migrate_to_reboot_cpu() already do. 1246 */ 1247 if (!cpu_online(primary_cpu)) 1248 primary_cpu = cpumask_first(cpu_online_mask); 1249 1250 for_each_online_cpu(cpu) { 1251 if (cpu == primary_cpu) 1252 continue; 1253 1254 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); 1255 if (error) { 1256 pr_err("Failed to offline CPU%d - error=%d", 1257 cpu, error); 1258 break; 1259 } 1260 } 1261 1262 /* 1263 * Ensure all but the reboot CPU are offline. 1264 */ 1265 BUG_ON(num_online_cpus() > 1); 1266 1267 /* 1268 * Make sure the CPUs won't be enabled by someone else after this 1269 * point. Kexec will reboot to a new kernel shortly resetting 1270 * everything along the way. 1271 */ 1272 cpu_hotplug_disabled++; 1273 1274 cpu_maps_update_done(); 1275 } 1276 1277 #else 1278 #define takedown_cpu NULL 1279 #endif /*CONFIG_HOTPLUG_CPU*/ 1280 1281 /** 1282 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU 1283 * @cpu: cpu that just started 1284 * 1285 * It must be called by the arch code on the new cpu, before the new cpu 1286 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 1287 */ 1288 void notify_cpu_starting(unsigned int cpu) 1289 { 1290 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1291 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); 1292 int ret; 1293 1294 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ 1295 cpumask_set_cpu(cpu, &cpus_booted_once_mask); 1296 ret = cpuhp_invoke_callback_range(true, cpu, st, target); 1297 1298 /* 1299 * STARTING must not fail! 1300 */ 1301 WARN_ON_ONCE(ret); 1302 } 1303 1304 /* 1305 * Called from the idle task. Wake up the controlling task which brings the 1306 * hotplug thread of the upcoming CPU up and then delegates the rest of the 1307 * online bringup to the hotplug thread. 1308 */ 1309 void cpuhp_online_idle(enum cpuhp_state state) 1310 { 1311 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 1312 1313 /* Happens for the boot cpu */ 1314 if (state != CPUHP_AP_ONLINE_IDLE) 1315 return; 1316 1317 /* 1318 * Unpart the stopper thread before we start the idle loop (and start 1319 * scheduling); this ensures the stopper task is always available. 1320 */ 1321 stop_machine_unpark(smp_processor_id()); 1322 1323 st->state = CPUHP_AP_ONLINE_IDLE; 1324 complete_ap_thread(st, true); 1325 } 1326 1327 /* Requires cpu_add_remove_lock to be held */ 1328 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) 1329 { 1330 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1331 struct task_struct *idle; 1332 int ret = 0; 1333 1334 cpus_write_lock(); 1335 1336 if (!cpu_present(cpu)) { 1337 ret = -EINVAL; 1338 goto out; 1339 } 1340 1341 /* 1342 * The caller of cpu_up() might have raced with another 1343 * caller. Nothing to do. 1344 */ 1345 if (st->state >= target) 1346 goto out; 1347 1348 if (st->state == CPUHP_OFFLINE) { 1349 /* Let it fail before we try to bring the cpu up */ 1350 idle = idle_thread_get(cpu); 1351 if (IS_ERR(idle)) { 1352 ret = PTR_ERR(idle); 1353 goto out; 1354 } 1355 } 1356 1357 cpuhp_tasks_frozen = tasks_frozen; 1358 1359 cpuhp_set_state(cpu, st, target); 1360 /* 1361 * If the current CPU state is in the range of the AP hotplug thread, 1362 * then we need to kick the thread once more. 1363 */ 1364 if (st->state > CPUHP_BRINGUP_CPU) { 1365 ret = cpuhp_kick_ap_work(cpu); 1366 /* 1367 * The AP side has done the error rollback already. Just 1368 * return the error code.. 1369 */ 1370 if (ret) 1371 goto out; 1372 } 1373 1374 /* 1375 * Try to reach the target state. We max out on the BP at 1376 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is 1377 * responsible for bringing it up to the target state. 1378 */ 1379 target = min((int)target, CPUHP_BRINGUP_CPU); 1380 ret = cpuhp_up_callbacks(cpu, st, target); 1381 out: 1382 cpus_write_unlock(); 1383 arch_smt_update(); 1384 cpu_up_down_serialize_trainwrecks(tasks_frozen); 1385 return ret; 1386 } 1387 1388 static int cpu_up(unsigned int cpu, enum cpuhp_state target) 1389 { 1390 int err = 0; 1391 1392 if (!cpu_possible(cpu)) { 1393 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", 1394 cpu); 1395 #if defined(CONFIG_IA64) 1396 pr_err("please check additional_cpus= boot parameter\n"); 1397 #endif 1398 return -EINVAL; 1399 } 1400 1401 err = try_online_node(cpu_to_node(cpu)); 1402 if (err) 1403 return err; 1404 1405 cpu_maps_update_begin(); 1406 1407 if (cpu_hotplug_disabled) { 1408 err = -EBUSY; 1409 goto out; 1410 } 1411 if (!cpu_smt_allowed(cpu)) { 1412 err = -EPERM; 1413 goto out; 1414 } 1415 1416 err = _cpu_up(cpu, 0, target); 1417 out: 1418 cpu_maps_update_done(); 1419 return err; 1420 } 1421 1422 /** 1423 * cpu_device_up - Bring up a cpu device 1424 * @dev: Pointer to the cpu device to online 1425 * 1426 * This function is meant to be used by device core cpu subsystem only. 1427 * 1428 * Other subsystems should use add_cpu() instead. 1429 * 1430 * Return: %0 on success or a negative errno code 1431 */ 1432 int cpu_device_up(struct device *dev) 1433 { 1434 return cpu_up(dev->id, CPUHP_ONLINE); 1435 } 1436 1437 int add_cpu(unsigned int cpu) 1438 { 1439 int ret; 1440 1441 lock_device_hotplug(); 1442 ret = device_online(get_cpu_device(cpu)); 1443 unlock_device_hotplug(); 1444 1445 return ret; 1446 } 1447 EXPORT_SYMBOL_GPL(add_cpu); 1448 1449 /** 1450 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on 1451 * @sleep_cpu: The cpu we hibernated on and should be brought up. 1452 * 1453 * On some architectures like arm64, we can hibernate on any CPU, but on 1454 * wake up the CPU we hibernated on might be offline as a side effect of 1455 * using maxcpus= for example. 1456 * 1457 * Return: %0 on success or a negative errno code 1458 */ 1459 int bringup_hibernate_cpu(unsigned int sleep_cpu) 1460 { 1461 int ret; 1462 1463 if (!cpu_online(sleep_cpu)) { 1464 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n"); 1465 ret = cpu_up(sleep_cpu, CPUHP_ONLINE); 1466 if (ret) { 1467 pr_err("Failed to bring hibernate-CPU up!\n"); 1468 return ret; 1469 } 1470 } 1471 return 0; 1472 } 1473 1474 void bringup_nonboot_cpus(unsigned int setup_max_cpus) 1475 { 1476 unsigned int cpu; 1477 1478 for_each_present_cpu(cpu) { 1479 if (num_online_cpus() >= setup_max_cpus) 1480 break; 1481 if (!cpu_online(cpu)) 1482 cpu_up(cpu, CPUHP_ONLINE); 1483 } 1484 } 1485 1486 #ifdef CONFIG_PM_SLEEP_SMP 1487 static cpumask_var_t frozen_cpus; 1488 1489 int freeze_secondary_cpus(int primary) 1490 { 1491 int cpu, error = 0; 1492 1493 cpu_maps_update_begin(); 1494 if (primary == -1) { 1495 primary = cpumask_first(cpu_online_mask); 1496 if (!housekeeping_cpu(primary, HK_TYPE_TIMER)) 1497 primary = housekeeping_any_cpu(HK_TYPE_TIMER); 1498 } else { 1499 if (!cpu_online(primary)) 1500 primary = cpumask_first(cpu_online_mask); 1501 } 1502 1503 /* 1504 * We take down all of the non-boot CPUs in one shot to avoid races 1505 * with the userspace trying to use the CPU hotplug at the same time 1506 */ 1507 cpumask_clear(frozen_cpus); 1508 1509 pr_info("Disabling non-boot CPUs ...\n"); 1510 for_each_online_cpu(cpu) { 1511 if (cpu == primary) 1512 continue; 1513 1514 if (pm_wakeup_pending()) { 1515 pr_info("Wakeup pending. Abort CPU freeze\n"); 1516 error = -EBUSY; 1517 break; 1518 } 1519 1520 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); 1521 error = _cpu_down(cpu, 1, CPUHP_OFFLINE); 1522 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); 1523 if (!error) 1524 cpumask_set_cpu(cpu, frozen_cpus); 1525 else { 1526 pr_err("Error taking CPU%d down: %d\n", cpu, error); 1527 break; 1528 } 1529 } 1530 1531 if (!error) 1532 BUG_ON(num_online_cpus() > 1); 1533 else 1534 pr_err("Non-boot CPUs are not disabled\n"); 1535 1536 /* 1537 * Make sure the CPUs won't be enabled by someone else. We need to do 1538 * this even in case of failure as all freeze_secondary_cpus() users are 1539 * supposed to do thaw_secondary_cpus() on the failure path. 1540 */ 1541 cpu_hotplug_disabled++; 1542 1543 cpu_maps_update_done(); 1544 return error; 1545 } 1546 1547 void __weak arch_thaw_secondary_cpus_begin(void) 1548 { 1549 } 1550 1551 void __weak arch_thaw_secondary_cpus_end(void) 1552 { 1553 } 1554 1555 void thaw_secondary_cpus(void) 1556 { 1557 int cpu, error; 1558 1559 /* Allow everyone to use the CPU hotplug again */ 1560 cpu_maps_update_begin(); 1561 __cpu_hotplug_enable(); 1562 if (cpumask_empty(frozen_cpus)) 1563 goto out; 1564 1565 pr_info("Enabling non-boot CPUs ...\n"); 1566 1567 arch_thaw_secondary_cpus_begin(); 1568 1569 for_each_cpu(cpu, frozen_cpus) { 1570 trace_suspend_resume(TPS("CPU_ON"), cpu, true); 1571 error = _cpu_up(cpu, 1, CPUHP_ONLINE); 1572 trace_suspend_resume(TPS("CPU_ON"), cpu, false); 1573 if (!error) { 1574 pr_info("CPU%d is up\n", cpu); 1575 continue; 1576 } 1577 pr_warn("Error taking CPU%d up: %d\n", cpu, error); 1578 } 1579 1580 arch_thaw_secondary_cpus_end(); 1581 1582 cpumask_clear(frozen_cpus); 1583 out: 1584 cpu_maps_update_done(); 1585 } 1586 1587 static int __init alloc_frozen_cpus(void) 1588 { 1589 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 1590 return -ENOMEM; 1591 return 0; 1592 } 1593 core_initcall(alloc_frozen_cpus); 1594 1595 /* 1596 * When callbacks for CPU hotplug notifications are being executed, we must 1597 * ensure that the state of the system with respect to the tasks being frozen 1598 * or not, as reported by the notification, remains unchanged *throughout the 1599 * duration* of the execution of the callbacks. 1600 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 1601 * 1602 * This synchronization is implemented by mutually excluding regular CPU 1603 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 1604 * Hibernate notifications. 1605 */ 1606 static int 1607 cpu_hotplug_pm_callback(struct notifier_block *nb, 1608 unsigned long action, void *ptr) 1609 { 1610 switch (action) { 1611 1612 case PM_SUSPEND_PREPARE: 1613 case PM_HIBERNATION_PREPARE: 1614 cpu_hotplug_disable(); 1615 break; 1616 1617 case PM_POST_SUSPEND: 1618 case PM_POST_HIBERNATION: 1619 cpu_hotplug_enable(); 1620 break; 1621 1622 default: 1623 return NOTIFY_DONE; 1624 } 1625 1626 return NOTIFY_OK; 1627 } 1628 1629 1630 static int __init cpu_hotplug_pm_sync_init(void) 1631 { 1632 /* 1633 * cpu_hotplug_pm_callback has higher priority than x86 1634 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 1635 * to disable cpu hotplug to avoid cpu hotplug race. 1636 */ 1637 pm_notifier(cpu_hotplug_pm_callback, 0); 1638 return 0; 1639 } 1640 core_initcall(cpu_hotplug_pm_sync_init); 1641 1642 #endif /* CONFIG_PM_SLEEP_SMP */ 1643 1644 int __boot_cpu_id; 1645 1646 #endif /* CONFIG_SMP */ 1647 1648 /* Boot processor state steps */ 1649 static struct cpuhp_step cpuhp_hp_states[] = { 1650 [CPUHP_OFFLINE] = { 1651 .name = "offline", 1652 .startup.single = NULL, 1653 .teardown.single = NULL, 1654 }, 1655 #ifdef CONFIG_SMP 1656 [CPUHP_CREATE_THREADS]= { 1657 .name = "threads:prepare", 1658 .startup.single = smpboot_create_threads, 1659 .teardown.single = NULL, 1660 .cant_stop = true, 1661 }, 1662 [CPUHP_PERF_PREPARE] = { 1663 .name = "perf:prepare", 1664 .startup.single = perf_event_init_cpu, 1665 .teardown.single = perf_event_exit_cpu, 1666 }, 1667 [CPUHP_RANDOM_PREPARE] = { 1668 .name = "random:prepare", 1669 .startup.single = random_prepare_cpu, 1670 .teardown.single = NULL, 1671 }, 1672 [CPUHP_WORKQUEUE_PREP] = { 1673 .name = "workqueue:prepare", 1674 .startup.single = workqueue_prepare_cpu, 1675 .teardown.single = NULL, 1676 }, 1677 [CPUHP_HRTIMERS_PREPARE] = { 1678 .name = "hrtimers:prepare", 1679 .startup.single = hrtimers_prepare_cpu, 1680 .teardown.single = hrtimers_dead_cpu, 1681 }, 1682 [CPUHP_SMPCFD_PREPARE] = { 1683 .name = "smpcfd:prepare", 1684 .startup.single = smpcfd_prepare_cpu, 1685 .teardown.single = smpcfd_dead_cpu, 1686 }, 1687 [CPUHP_RELAY_PREPARE] = { 1688 .name = "relay:prepare", 1689 .startup.single = relay_prepare_cpu, 1690 .teardown.single = NULL, 1691 }, 1692 [CPUHP_SLAB_PREPARE] = { 1693 .name = "slab:prepare", 1694 .startup.single = slab_prepare_cpu, 1695 .teardown.single = slab_dead_cpu, 1696 }, 1697 [CPUHP_RCUTREE_PREP] = { 1698 .name = "RCU/tree:prepare", 1699 .startup.single = rcutree_prepare_cpu, 1700 .teardown.single = rcutree_dead_cpu, 1701 }, 1702 /* 1703 * On the tear-down path, timers_dead_cpu() must be invoked 1704 * before blk_mq_queue_reinit_notify() from notify_dead(), 1705 * otherwise a RCU stall occurs. 1706 */ 1707 [CPUHP_TIMERS_PREPARE] = { 1708 .name = "timers:prepare", 1709 .startup.single = timers_prepare_cpu, 1710 .teardown.single = timers_dead_cpu, 1711 }, 1712 /* Kicks the plugged cpu into life */ 1713 [CPUHP_BRINGUP_CPU] = { 1714 .name = "cpu:bringup", 1715 .startup.single = bringup_cpu, 1716 .teardown.single = finish_cpu, 1717 .cant_stop = true, 1718 }, 1719 /* Final state before CPU kills itself */ 1720 [CPUHP_AP_IDLE_DEAD] = { 1721 .name = "idle:dead", 1722 }, 1723 /* 1724 * Last state before CPU enters the idle loop to die. Transient state 1725 * for synchronization. 1726 */ 1727 [CPUHP_AP_OFFLINE] = { 1728 .name = "ap:offline", 1729 .cant_stop = true, 1730 }, 1731 /* First state is scheduler control. Interrupts are disabled */ 1732 [CPUHP_AP_SCHED_STARTING] = { 1733 .name = "sched:starting", 1734 .startup.single = sched_cpu_starting, 1735 .teardown.single = sched_cpu_dying, 1736 }, 1737 [CPUHP_AP_RCUTREE_DYING] = { 1738 .name = "RCU/tree:dying", 1739 .startup.single = NULL, 1740 .teardown.single = rcutree_dying_cpu, 1741 }, 1742 [CPUHP_AP_SMPCFD_DYING] = { 1743 .name = "smpcfd:dying", 1744 .startup.single = NULL, 1745 .teardown.single = smpcfd_dying_cpu, 1746 }, 1747 /* Entry state on starting. Interrupts enabled from here on. Transient 1748 * state for synchronsization */ 1749 [CPUHP_AP_ONLINE] = { 1750 .name = "ap:online", 1751 }, 1752 /* 1753 * Handled on control processor until the plugged processor manages 1754 * this itself. 1755 */ 1756 [CPUHP_TEARDOWN_CPU] = { 1757 .name = "cpu:teardown", 1758 .startup.single = NULL, 1759 .teardown.single = takedown_cpu, 1760 .cant_stop = true, 1761 }, 1762 1763 [CPUHP_AP_SCHED_WAIT_EMPTY] = { 1764 .name = "sched:waitempty", 1765 .startup.single = NULL, 1766 .teardown.single = sched_cpu_wait_empty, 1767 }, 1768 1769 /* Handle smpboot threads park/unpark */ 1770 [CPUHP_AP_SMPBOOT_THREADS] = { 1771 .name = "smpboot/threads:online", 1772 .startup.single = smpboot_unpark_threads, 1773 .teardown.single = smpboot_park_threads, 1774 }, 1775 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = { 1776 .name = "irq/affinity:online", 1777 .startup.single = irq_affinity_online_cpu, 1778 .teardown.single = NULL, 1779 }, 1780 [CPUHP_AP_PERF_ONLINE] = { 1781 .name = "perf:online", 1782 .startup.single = perf_event_init_cpu, 1783 .teardown.single = perf_event_exit_cpu, 1784 }, 1785 [CPUHP_AP_WATCHDOG_ONLINE] = { 1786 .name = "lockup_detector:online", 1787 .startup.single = lockup_detector_online_cpu, 1788 .teardown.single = lockup_detector_offline_cpu, 1789 }, 1790 [CPUHP_AP_WORKQUEUE_ONLINE] = { 1791 .name = "workqueue:online", 1792 .startup.single = workqueue_online_cpu, 1793 .teardown.single = workqueue_offline_cpu, 1794 }, 1795 [CPUHP_AP_RANDOM_ONLINE] = { 1796 .name = "random:online", 1797 .startup.single = random_online_cpu, 1798 .teardown.single = NULL, 1799 }, 1800 [CPUHP_AP_RCUTREE_ONLINE] = { 1801 .name = "RCU/tree:online", 1802 .startup.single = rcutree_online_cpu, 1803 .teardown.single = rcutree_offline_cpu, 1804 }, 1805 #endif 1806 /* 1807 * The dynamically registered state space is here 1808 */ 1809 1810 #ifdef CONFIG_SMP 1811 /* Last state is scheduler control setting the cpu active */ 1812 [CPUHP_AP_ACTIVE] = { 1813 .name = "sched:active", 1814 .startup.single = sched_cpu_activate, 1815 .teardown.single = sched_cpu_deactivate, 1816 }, 1817 #endif 1818 1819 /* CPU is fully up and running. */ 1820 [CPUHP_ONLINE] = { 1821 .name = "online", 1822 .startup.single = NULL, 1823 .teardown.single = NULL, 1824 }, 1825 }; 1826 1827 /* Sanity check for callbacks */ 1828 static int cpuhp_cb_check(enum cpuhp_state state) 1829 { 1830 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) 1831 return -EINVAL; 1832 return 0; 1833 } 1834 1835 /* 1836 * Returns a free for dynamic slot assignment of the Online state. The states 1837 * are protected by the cpuhp_slot_states mutex and an empty slot is identified 1838 * by having no name assigned. 1839 */ 1840 static int cpuhp_reserve_state(enum cpuhp_state state) 1841 { 1842 enum cpuhp_state i, end; 1843 struct cpuhp_step *step; 1844 1845 switch (state) { 1846 case CPUHP_AP_ONLINE_DYN: 1847 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN; 1848 end = CPUHP_AP_ONLINE_DYN_END; 1849 break; 1850 case CPUHP_BP_PREPARE_DYN: 1851 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN; 1852 end = CPUHP_BP_PREPARE_DYN_END; 1853 break; 1854 default: 1855 return -EINVAL; 1856 } 1857 1858 for (i = state; i <= end; i++, step++) { 1859 if (!step->name) 1860 return i; 1861 } 1862 WARN(1, "No more dynamic states available for CPU hotplug\n"); 1863 return -ENOSPC; 1864 } 1865 1866 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, 1867 int (*startup)(unsigned int cpu), 1868 int (*teardown)(unsigned int cpu), 1869 bool multi_instance) 1870 { 1871 /* (Un)Install the callbacks for further cpu hotplug operations */ 1872 struct cpuhp_step *sp; 1873 int ret = 0; 1874 1875 /* 1876 * If name is NULL, then the state gets removed. 1877 * 1878 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on 1879 * the first allocation from these dynamic ranges, so the removal 1880 * would trigger a new allocation and clear the wrong (already 1881 * empty) state, leaving the callbacks of the to be cleared state 1882 * dangling, which causes wreckage on the next hotplug operation. 1883 */ 1884 if (name && (state == CPUHP_AP_ONLINE_DYN || 1885 state == CPUHP_BP_PREPARE_DYN)) { 1886 ret = cpuhp_reserve_state(state); 1887 if (ret < 0) 1888 return ret; 1889 state = ret; 1890 } 1891 sp = cpuhp_get_step(state); 1892 if (name && sp->name) 1893 return -EBUSY; 1894 1895 sp->startup.single = startup; 1896 sp->teardown.single = teardown; 1897 sp->name = name; 1898 sp->multi_instance = multi_instance; 1899 INIT_HLIST_HEAD(&sp->list); 1900 return ret; 1901 } 1902 1903 static void *cpuhp_get_teardown_cb(enum cpuhp_state state) 1904 { 1905 return cpuhp_get_step(state)->teardown.single; 1906 } 1907 1908 /* 1909 * Call the startup/teardown function for a step either on the AP or 1910 * on the current CPU. 1911 */ 1912 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, 1913 struct hlist_node *node) 1914 { 1915 struct cpuhp_step *sp = cpuhp_get_step(state); 1916 int ret; 1917 1918 /* 1919 * If there's nothing to do, we done. 1920 * Relies on the union for multi_instance. 1921 */ 1922 if (cpuhp_step_empty(bringup, sp)) 1923 return 0; 1924 /* 1925 * The non AP bound callbacks can fail on bringup. On teardown 1926 * e.g. module removal we crash for now. 1927 */ 1928 #ifdef CONFIG_SMP 1929 if (cpuhp_is_ap_state(state)) 1930 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); 1931 else 1932 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); 1933 #else 1934 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); 1935 #endif 1936 BUG_ON(ret && !bringup); 1937 return ret; 1938 } 1939 1940 /* 1941 * Called from __cpuhp_setup_state on a recoverable failure. 1942 * 1943 * Note: The teardown callbacks for rollback are not allowed to fail! 1944 */ 1945 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, 1946 struct hlist_node *node) 1947 { 1948 int cpu; 1949 1950 /* Roll back the already executed steps on the other cpus */ 1951 for_each_present_cpu(cpu) { 1952 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1953 int cpustate = st->state; 1954 1955 if (cpu >= failedcpu) 1956 break; 1957 1958 /* Did we invoke the startup call on that cpu ? */ 1959 if (cpustate >= state) 1960 cpuhp_issue_call(cpu, state, false, node); 1961 } 1962 } 1963 1964 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, 1965 struct hlist_node *node, 1966 bool invoke) 1967 { 1968 struct cpuhp_step *sp; 1969 int cpu; 1970 int ret; 1971 1972 lockdep_assert_cpus_held(); 1973 1974 sp = cpuhp_get_step(state); 1975 if (sp->multi_instance == false) 1976 return -EINVAL; 1977 1978 mutex_lock(&cpuhp_state_mutex); 1979 1980 if (!invoke || !sp->startup.multi) 1981 goto add_node; 1982 1983 /* 1984 * Try to call the startup callback for each present cpu 1985 * depending on the hotplug state of the cpu. 1986 */ 1987 for_each_present_cpu(cpu) { 1988 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1989 int cpustate = st->state; 1990 1991 if (cpustate < state) 1992 continue; 1993 1994 ret = cpuhp_issue_call(cpu, state, true, node); 1995 if (ret) { 1996 if (sp->teardown.multi) 1997 cpuhp_rollback_install(cpu, state, node); 1998 goto unlock; 1999 } 2000 } 2001 add_node: 2002 ret = 0; 2003 hlist_add_head(node, &sp->list); 2004 unlock: 2005 mutex_unlock(&cpuhp_state_mutex); 2006 return ret; 2007 } 2008 2009 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, 2010 bool invoke) 2011 { 2012 int ret; 2013 2014 cpus_read_lock(); 2015 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke); 2016 cpus_read_unlock(); 2017 return ret; 2018 } 2019 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); 2020 2021 /** 2022 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state 2023 * @state: The state to setup 2024 * @name: Name of the step 2025 * @invoke: If true, the startup function is invoked for cpus where 2026 * cpu state >= @state 2027 * @startup: startup callback function 2028 * @teardown: teardown callback function 2029 * @multi_instance: State is set up for multiple instances which get 2030 * added afterwards. 2031 * 2032 * The caller needs to hold cpus read locked while calling this function. 2033 * Return: 2034 * On success: 2035 * Positive state number if @state is CPUHP_AP_ONLINE_DYN; 2036 * 0 for all other states 2037 * On failure: proper (negative) error code 2038 */ 2039 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, 2040 const char *name, bool invoke, 2041 int (*startup)(unsigned int cpu), 2042 int (*teardown)(unsigned int cpu), 2043 bool multi_instance) 2044 { 2045 int cpu, ret = 0; 2046 bool dynstate; 2047 2048 lockdep_assert_cpus_held(); 2049 2050 if (cpuhp_cb_check(state) || !name) 2051 return -EINVAL; 2052 2053 mutex_lock(&cpuhp_state_mutex); 2054 2055 ret = cpuhp_store_callbacks(state, name, startup, teardown, 2056 multi_instance); 2057 2058 dynstate = state == CPUHP_AP_ONLINE_DYN; 2059 if (ret > 0 && dynstate) { 2060 state = ret; 2061 ret = 0; 2062 } 2063 2064 if (ret || !invoke || !startup) 2065 goto out; 2066 2067 /* 2068 * Try to call the startup callback for each present cpu 2069 * depending on the hotplug state of the cpu. 2070 */ 2071 for_each_present_cpu(cpu) { 2072 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 2073 int cpustate = st->state; 2074 2075 if (cpustate < state) 2076 continue; 2077 2078 ret = cpuhp_issue_call(cpu, state, true, NULL); 2079 if (ret) { 2080 if (teardown) 2081 cpuhp_rollback_install(cpu, state, NULL); 2082 cpuhp_store_callbacks(state, NULL, NULL, NULL, false); 2083 goto out; 2084 } 2085 } 2086 out: 2087 mutex_unlock(&cpuhp_state_mutex); 2088 /* 2089 * If the requested state is CPUHP_AP_ONLINE_DYN, return the 2090 * dynamically allocated state in case of success. 2091 */ 2092 if (!ret && dynstate) 2093 return state; 2094 return ret; 2095 } 2096 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked); 2097 2098 int __cpuhp_setup_state(enum cpuhp_state state, 2099 const char *name, bool invoke, 2100 int (*startup)(unsigned int cpu), 2101 int (*teardown)(unsigned int cpu), 2102 bool multi_instance) 2103 { 2104 int ret; 2105 2106 cpus_read_lock(); 2107 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup, 2108 teardown, multi_instance); 2109 cpus_read_unlock(); 2110 return ret; 2111 } 2112 EXPORT_SYMBOL(__cpuhp_setup_state); 2113 2114 int __cpuhp_state_remove_instance(enum cpuhp_state state, 2115 struct hlist_node *node, bool invoke) 2116 { 2117 struct cpuhp_step *sp = cpuhp_get_step(state); 2118 int cpu; 2119 2120 BUG_ON(cpuhp_cb_check(state)); 2121 2122 if (!sp->multi_instance) 2123 return -EINVAL; 2124 2125 cpus_read_lock(); 2126 mutex_lock(&cpuhp_state_mutex); 2127 2128 if (!invoke || !cpuhp_get_teardown_cb(state)) 2129 goto remove; 2130 /* 2131 * Call the teardown callback for each present cpu depending 2132 * on the hotplug state of the cpu. This function is not 2133 * allowed to fail currently! 2134 */ 2135 for_each_present_cpu(cpu) { 2136 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 2137 int cpustate = st->state; 2138 2139 if (cpustate >= state) 2140 cpuhp_issue_call(cpu, state, false, node); 2141 } 2142 2143 remove: 2144 hlist_del(node); 2145 mutex_unlock(&cpuhp_state_mutex); 2146 cpus_read_unlock(); 2147 2148 return 0; 2149 } 2150 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); 2151 2152 /** 2153 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state 2154 * @state: The state to remove 2155 * @invoke: If true, the teardown function is invoked for cpus where 2156 * cpu state >= @state 2157 * 2158 * The caller needs to hold cpus read locked while calling this function. 2159 * The teardown callback is currently not allowed to fail. Think 2160 * about module removal! 2161 */ 2162 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke) 2163 { 2164 struct cpuhp_step *sp = cpuhp_get_step(state); 2165 int cpu; 2166 2167 BUG_ON(cpuhp_cb_check(state)); 2168 2169 lockdep_assert_cpus_held(); 2170 2171 mutex_lock(&cpuhp_state_mutex); 2172 if (sp->multi_instance) { 2173 WARN(!hlist_empty(&sp->list), 2174 "Error: Removing state %d which has instances left.\n", 2175 state); 2176 goto remove; 2177 } 2178 2179 if (!invoke || !cpuhp_get_teardown_cb(state)) 2180 goto remove; 2181 2182 /* 2183 * Call the teardown callback for each present cpu depending 2184 * on the hotplug state of the cpu. This function is not 2185 * allowed to fail currently! 2186 */ 2187 for_each_present_cpu(cpu) { 2188 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 2189 int cpustate = st->state; 2190 2191 if (cpustate >= state) 2192 cpuhp_issue_call(cpu, state, false, NULL); 2193 } 2194 remove: 2195 cpuhp_store_callbacks(state, NULL, NULL, NULL, false); 2196 mutex_unlock(&cpuhp_state_mutex); 2197 } 2198 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked); 2199 2200 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) 2201 { 2202 cpus_read_lock(); 2203 __cpuhp_remove_state_cpuslocked(state, invoke); 2204 cpus_read_unlock(); 2205 } 2206 EXPORT_SYMBOL(__cpuhp_remove_state); 2207 2208 #ifdef CONFIG_HOTPLUG_SMT 2209 static void cpuhp_offline_cpu_device(unsigned int cpu) 2210 { 2211 struct device *dev = get_cpu_device(cpu); 2212 2213 dev->offline = true; 2214 /* Tell user space about the state change */ 2215 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 2216 } 2217 2218 static void cpuhp_online_cpu_device(unsigned int cpu) 2219 { 2220 struct device *dev = get_cpu_device(cpu); 2221 2222 dev->offline = false; 2223 /* Tell user space about the state change */ 2224 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2225 } 2226 2227 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) 2228 { 2229 int cpu, ret = 0; 2230 2231 cpu_maps_update_begin(); 2232 for_each_online_cpu(cpu) { 2233 if (topology_is_primary_thread(cpu)) 2234 continue; 2235 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); 2236 if (ret) 2237 break; 2238 /* 2239 * As this needs to hold the cpu maps lock it's impossible 2240 * to call device_offline() because that ends up calling 2241 * cpu_down() which takes cpu maps lock. cpu maps lock 2242 * needs to be held as this might race against in kernel 2243 * abusers of the hotplug machinery (thermal management). 2244 * 2245 * So nothing would update device:offline state. That would 2246 * leave the sysfs entry stale and prevent onlining after 2247 * smt control has been changed to 'off' again. This is 2248 * called under the sysfs hotplug lock, so it is properly 2249 * serialized against the regular offline usage. 2250 */ 2251 cpuhp_offline_cpu_device(cpu); 2252 } 2253 if (!ret) 2254 cpu_smt_control = ctrlval; 2255 cpu_maps_update_done(); 2256 return ret; 2257 } 2258 2259 int cpuhp_smt_enable(void) 2260 { 2261 int cpu, ret = 0; 2262 2263 cpu_maps_update_begin(); 2264 cpu_smt_control = CPU_SMT_ENABLED; 2265 for_each_present_cpu(cpu) { 2266 /* Skip online CPUs and CPUs on offline nodes */ 2267 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) 2268 continue; 2269 ret = _cpu_up(cpu, 0, CPUHP_ONLINE); 2270 if (ret) 2271 break; 2272 /* See comment in cpuhp_smt_disable() */ 2273 cpuhp_online_cpu_device(cpu); 2274 } 2275 cpu_maps_update_done(); 2276 return ret; 2277 } 2278 #endif 2279 2280 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) 2281 static ssize_t state_show(struct device *dev, 2282 struct device_attribute *attr, char *buf) 2283 { 2284 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 2285 2286 return sprintf(buf, "%d\n", st->state); 2287 } 2288 static DEVICE_ATTR_RO(state); 2289 2290 static ssize_t target_store(struct device *dev, struct device_attribute *attr, 2291 const char *buf, size_t count) 2292 { 2293 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 2294 struct cpuhp_step *sp; 2295 int target, ret; 2296 2297 ret = kstrtoint(buf, 10, &target); 2298 if (ret) 2299 return ret; 2300 2301 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL 2302 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) 2303 return -EINVAL; 2304 #else 2305 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) 2306 return -EINVAL; 2307 #endif 2308 2309 ret = lock_device_hotplug_sysfs(); 2310 if (ret) 2311 return ret; 2312 2313 mutex_lock(&cpuhp_state_mutex); 2314 sp = cpuhp_get_step(target); 2315 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; 2316 mutex_unlock(&cpuhp_state_mutex); 2317 if (ret) 2318 goto out; 2319 2320 if (st->state < target) 2321 ret = cpu_up(dev->id, target); 2322 else 2323 ret = cpu_down(dev->id, target); 2324 out: 2325 unlock_device_hotplug(); 2326 return ret ? ret : count; 2327 } 2328 2329 static ssize_t target_show(struct device *dev, 2330 struct device_attribute *attr, char *buf) 2331 { 2332 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 2333 2334 return sprintf(buf, "%d\n", st->target); 2335 } 2336 static DEVICE_ATTR_RW(target); 2337 2338 static ssize_t fail_store(struct device *dev, struct device_attribute *attr, 2339 const char *buf, size_t count) 2340 { 2341 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 2342 struct cpuhp_step *sp; 2343 int fail, ret; 2344 2345 ret = kstrtoint(buf, 10, &fail); 2346 if (ret) 2347 return ret; 2348 2349 if (fail == CPUHP_INVALID) { 2350 st->fail = fail; 2351 return count; 2352 } 2353 2354 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE) 2355 return -EINVAL; 2356 2357 /* 2358 * Cannot fail STARTING/DYING callbacks. 2359 */ 2360 if (cpuhp_is_atomic_state(fail)) 2361 return -EINVAL; 2362 2363 /* 2364 * DEAD callbacks cannot fail... 2365 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter 2366 * triggering STARTING callbacks, a failure in this state would 2367 * hinder rollback. 2368 */ 2369 if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU) 2370 return -EINVAL; 2371 2372 /* 2373 * Cannot fail anything that doesn't have callbacks. 2374 */ 2375 mutex_lock(&cpuhp_state_mutex); 2376 sp = cpuhp_get_step(fail); 2377 if (!sp->startup.single && !sp->teardown.single) 2378 ret = -EINVAL; 2379 mutex_unlock(&cpuhp_state_mutex); 2380 if (ret) 2381 return ret; 2382 2383 st->fail = fail; 2384 2385 return count; 2386 } 2387 2388 static ssize_t fail_show(struct device *dev, 2389 struct device_attribute *attr, char *buf) 2390 { 2391 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 2392 2393 return sprintf(buf, "%d\n", st->fail); 2394 } 2395 2396 static DEVICE_ATTR_RW(fail); 2397 2398 static struct attribute *cpuhp_cpu_attrs[] = { 2399 &dev_attr_state.attr, 2400 &dev_attr_target.attr, 2401 &dev_attr_fail.attr, 2402 NULL 2403 }; 2404 2405 static const struct attribute_group cpuhp_cpu_attr_group = { 2406 .attrs = cpuhp_cpu_attrs, 2407 .name = "hotplug", 2408 NULL 2409 }; 2410 2411 static ssize_t states_show(struct device *dev, 2412 struct device_attribute *attr, char *buf) 2413 { 2414 ssize_t cur, res = 0; 2415 int i; 2416 2417 mutex_lock(&cpuhp_state_mutex); 2418 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) { 2419 struct cpuhp_step *sp = cpuhp_get_step(i); 2420 2421 if (sp->name) { 2422 cur = sprintf(buf, "%3d: %s\n", i, sp->name); 2423 buf += cur; 2424 res += cur; 2425 } 2426 } 2427 mutex_unlock(&cpuhp_state_mutex); 2428 return res; 2429 } 2430 static DEVICE_ATTR_RO(states); 2431 2432 static struct attribute *cpuhp_cpu_root_attrs[] = { 2433 &dev_attr_states.attr, 2434 NULL 2435 }; 2436 2437 static const struct attribute_group cpuhp_cpu_root_attr_group = { 2438 .attrs = cpuhp_cpu_root_attrs, 2439 .name = "hotplug", 2440 NULL 2441 }; 2442 2443 #ifdef CONFIG_HOTPLUG_SMT 2444 2445 static ssize_t 2446 __store_smt_control(struct device *dev, struct device_attribute *attr, 2447 const char *buf, size_t count) 2448 { 2449 int ctrlval, ret; 2450 2451 if (sysfs_streq(buf, "on")) 2452 ctrlval = CPU_SMT_ENABLED; 2453 else if (sysfs_streq(buf, "off")) 2454 ctrlval = CPU_SMT_DISABLED; 2455 else if (sysfs_streq(buf, "forceoff")) 2456 ctrlval = CPU_SMT_FORCE_DISABLED; 2457 else 2458 return -EINVAL; 2459 2460 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) 2461 return -EPERM; 2462 2463 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 2464 return -ENODEV; 2465 2466 ret = lock_device_hotplug_sysfs(); 2467 if (ret) 2468 return ret; 2469 2470 if (ctrlval != cpu_smt_control) { 2471 switch (ctrlval) { 2472 case CPU_SMT_ENABLED: 2473 ret = cpuhp_smt_enable(); 2474 break; 2475 case CPU_SMT_DISABLED: 2476 case CPU_SMT_FORCE_DISABLED: 2477 ret = cpuhp_smt_disable(ctrlval); 2478 break; 2479 } 2480 } 2481 2482 unlock_device_hotplug(); 2483 return ret ? ret : count; 2484 } 2485 2486 #else /* !CONFIG_HOTPLUG_SMT */ 2487 static ssize_t 2488 __store_smt_control(struct device *dev, struct device_attribute *attr, 2489 const char *buf, size_t count) 2490 { 2491 return -ENODEV; 2492 } 2493 #endif /* CONFIG_HOTPLUG_SMT */ 2494 2495 static const char *smt_states[] = { 2496 [CPU_SMT_ENABLED] = "on", 2497 [CPU_SMT_DISABLED] = "off", 2498 [CPU_SMT_FORCE_DISABLED] = "forceoff", 2499 [CPU_SMT_NOT_SUPPORTED] = "notsupported", 2500 [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented", 2501 }; 2502 2503 static ssize_t control_show(struct device *dev, 2504 struct device_attribute *attr, char *buf) 2505 { 2506 const char *state = smt_states[cpu_smt_control]; 2507 2508 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state); 2509 } 2510 2511 static ssize_t control_store(struct device *dev, struct device_attribute *attr, 2512 const char *buf, size_t count) 2513 { 2514 return __store_smt_control(dev, attr, buf, count); 2515 } 2516 static DEVICE_ATTR_RW(control); 2517 2518 static ssize_t active_show(struct device *dev, 2519 struct device_attribute *attr, char *buf) 2520 { 2521 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active()); 2522 } 2523 static DEVICE_ATTR_RO(active); 2524 2525 static struct attribute *cpuhp_smt_attrs[] = { 2526 &dev_attr_control.attr, 2527 &dev_attr_active.attr, 2528 NULL 2529 }; 2530 2531 static const struct attribute_group cpuhp_smt_attr_group = { 2532 .attrs = cpuhp_smt_attrs, 2533 .name = "smt", 2534 NULL 2535 }; 2536 2537 static int __init cpu_smt_sysfs_init(void) 2538 { 2539 return sysfs_create_group(&cpu_subsys.dev_root->kobj, 2540 &cpuhp_smt_attr_group); 2541 } 2542 2543 static int __init cpuhp_sysfs_init(void) 2544 { 2545 int cpu, ret; 2546 2547 ret = cpu_smt_sysfs_init(); 2548 if (ret) 2549 return ret; 2550 2551 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, 2552 &cpuhp_cpu_root_attr_group); 2553 if (ret) 2554 return ret; 2555 2556 for_each_possible_cpu(cpu) { 2557 struct device *dev = get_cpu_device(cpu); 2558 2559 if (!dev) 2560 continue; 2561 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); 2562 if (ret) 2563 return ret; 2564 } 2565 return 0; 2566 } 2567 device_initcall(cpuhp_sysfs_init); 2568 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */ 2569 2570 /* 2571 * cpu_bit_bitmap[] is a special, "compressed" data structure that 2572 * represents all NR_CPUS bits binary values of 1<<nr. 2573 * 2574 * It is used by cpumask_of() to get a constant address to a CPU 2575 * mask value that has a single bit set only. 2576 */ 2577 2578 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 2579 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 2580 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 2581 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 2582 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 2583 2584 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 2585 2586 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 2587 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 2588 #if BITS_PER_LONG > 32 2589 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 2590 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 2591 #endif 2592 }; 2593 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 2594 2595 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 2596 EXPORT_SYMBOL(cpu_all_bits); 2597 2598 #ifdef CONFIG_INIT_ALL_POSSIBLE 2599 struct cpumask __cpu_possible_mask __read_mostly 2600 = {CPU_BITS_ALL}; 2601 #else 2602 struct cpumask __cpu_possible_mask __read_mostly; 2603 #endif 2604 EXPORT_SYMBOL(__cpu_possible_mask); 2605 2606 struct cpumask __cpu_online_mask __read_mostly; 2607 EXPORT_SYMBOL(__cpu_online_mask); 2608 2609 struct cpumask __cpu_present_mask __read_mostly; 2610 EXPORT_SYMBOL(__cpu_present_mask); 2611 2612 struct cpumask __cpu_active_mask __read_mostly; 2613 EXPORT_SYMBOL(__cpu_active_mask); 2614 2615 struct cpumask __cpu_dying_mask __read_mostly; 2616 EXPORT_SYMBOL(__cpu_dying_mask); 2617 2618 atomic_t __num_online_cpus __read_mostly; 2619 EXPORT_SYMBOL(__num_online_cpus); 2620 2621 void init_cpu_present(const struct cpumask *src) 2622 { 2623 cpumask_copy(&__cpu_present_mask, src); 2624 } 2625 2626 void init_cpu_possible(const struct cpumask *src) 2627 { 2628 cpumask_copy(&__cpu_possible_mask, src); 2629 } 2630 2631 void init_cpu_online(const struct cpumask *src) 2632 { 2633 cpumask_copy(&__cpu_online_mask, src); 2634 } 2635 2636 void set_cpu_online(unsigned int cpu, bool online) 2637 { 2638 /* 2639 * atomic_inc/dec() is required to handle the horrid abuse of this 2640 * function by the reboot and kexec code which invoke it from 2641 * IPI/NMI broadcasts when shutting down CPUs. Invocation from 2642 * regular CPU hotplug is properly serialized. 2643 * 2644 * Note, that the fact that __num_online_cpus is of type atomic_t 2645 * does not protect readers which are not serialized against 2646 * concurrent hotplug operations. 2647 */ 2648 if (online) { 2649 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask)) 2650 atomic_inc(&__num_online_cpus); 2651 } else { 2652 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask)) 2653 atomic_dec(&__num_online_cpus); 2654 } 2655 } 2656 2657 /* 2658 * Activate the first processor. 2659 */ 2660 void __init boot_cpu_init(void) 2661 { 2662 int cpu = smp_processor_id(); 2663 2664 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 2665 set_cpu_online(cpu, true); 2666 set_cpu_active(cpu, true); 2667 set_cpu_present(cpu, true); 2668 set_cpu_possible(cpu, true); 2669 2670 #ifdef CONFIG_SMP 2671 __boot_cpu_id = cpu; 2672 #endif 2673 } 2674 2675 /* 2676 * Must be called _AFTER_ setting up the per_cpu areas 2677 */ 2678 void __init boot_cpu_hotplug_init(void) 2679 { 2680 #ifdef CONFIG_SMP 2681 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask); 2682 #endif 2683 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); 2684 } 2685 2686 /* 2687 * These are used for a global "mitigations=" cmdline option for toggling 2688 * optional CPU mitigations. 2689 */ 2690 enum cpu_mitigations { 2691 CPU_MITIGATIONS_OFF, 2692 CPU_MITIGATIONS_AUTO, 2693 CPU_MITIGATIONS_AUTO_NOSMT, 2694 }; 2695 2696 static enum cpu_mitigations cpu_mitigations __ro_after_init = 2697 CPU_MITIGATIONS_AUTO; 2698 2699 static int __init mitigations_parse_cmdline(char *arg) 2700 { 2701 if (!strcmp(arg, "off")) 2702 cpu_mitigations = CPU_MITIGATIONS_OFF; 2703 else if (!strcmp(arg, "auto")) 2704 cpu_mitigations = CPU_MITIGATIONS_AUTO; 2705 else if (!strcmp(arg, "auto,nosmt")) 2706 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; 2707 else 2708 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", 2709 arg); 2710 2711 return 0; 2712 } 2713 early_param("mitigations", mitigations_parse_cmdline); 2714 2715 /* mitigations=off */ 2716 bool cpu_mitigations_off(void) 2717 { 2718 return cpu_mitigations == CPU_MITIGATIONS_OFF; 2719 } 2720 EXPORT_SYMBOL_GPL(cpu_mitigations_off); 2721 2722 /* mitigations=auto,nosmt */ 2723 bool cpu_mitigations_auto_nosmt(void) 2724 { 2725 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; 2726 } 2727 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt); 2728