1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sched/hotplug.h> 12 #include <linux/sched/task.h> 13 #include <linux/unistd.h> 14 #include <linux/cpu.h> 15 #include <linux/oom.h> 16 #include <linux/rcupdate.h> 17 #include <linux/export.h> 18 #include <linux/bug.h> 19 #include <linux/kthread.h> 20 #include <linux/stop_machine.h> 21 #include <linux/mutex.h> 22 #include <linux/gfp.h> 23 #include <linux/suspend.h> 24 #include <linux/lockdep.h> 25 #include <linux/tick.h> 26 #include <linux/irq.h> 27 #include <linux/nmi.h> 28 #include <linux/smpboot.h> 29 #include <linux/relay.h> 30 #include <linux/slab.h> 31 #include <linux/percpu-rwsem.h> 32 33 #include <trace/events/power.h> 34 #define CREATE_TRACE_POINTS 35 #include <trace/events/cpuhp.h> 36 37 #include "smpboot.h" 38 39 /** 40 * cpuhp_cpu_state - Per cpu hotplug state storage 41 * @state: The current cpu state 42 * @target: The target state 43 * @thread: Pointer to the hotplug thread 44 * @should_run: Thread should execute 45 * @rollback: Perform a rollback 46 * @single: Single callback invocation 47 * @bringup: Single callback bringup or teardown selector 48 * @cb_state: The state for a single callback (install/uninstall) 49 * @result: Result of the operation 50 * @done_up: Signal completion to the issuer of the task for cpu-up 51 * @done_down: Signal completion to the issuer of the task for cpu-down 52 */ 53 struct cpuhp_cpu_state { 54 enum cpuhp_state state; 55 enum cpuhp_state target; 56 enum cpuhp_state fail; 57 #ifdef CONFIG_SMP 58 struct task_struct *thread; 59 bool should_run; 60 bool rollback; 61 bool single; 62 bool bringup; 63 bool booted_once; 64 struct hlist_node *node; 65 struct hlist_node *last; 66 enum cpuhp_state cb_state; 67 int result; 68 struct completion done_up; 69 struct completion done_down; 70 #endif 71 }; 72 73 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { 74 .fail = CPUHP_INVALID, 75 }; 76 77 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) 78 static struct lockdep_map cpuhp_state_up_map = 79 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); 80 static struct lockdep_map cpuhp_state_down_map = 81 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map); 82 83 84 static inline void cpuhp_lock_acquire(bool bringup) 85 { 86 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); 87 } 88 89 static inline void cpuhp_lock_release(bool bringup) 90 { 91 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); 92 } 93 #else 94 95 static inline void cpuhp_lock_acquire(bool bringup) { } 96 static inline void cpuhp_lock_release(bool bringup) { } 97 98 #endif 99 100 /** 101 * cpuhp_step - Hotplug state machine step 102 * @name: Name of the step 103 * @startup: Startup function of the step 104 * @teardown: Teardown function of the step 105 * @cant_stop: Bringup/teardown can't be stopped at this step 106 */ 107 struct cpuhp_step { 108 const char *name; 109 union { 110 int (*single)(unsigned int cpu); 111 int (*multi)(unsigned int cpu, 112 struct hlist_node *node); 113 } startup; 114 union { 115 int (*single)(unsigned int cpu); 116 int (*multi)(unsigned int cpu, 117 struct hlist_node *node); 118 } teardown; 119 struct hlist_head list; 120 bool cant_stop; 121 bool multi_instance; 122 }; 123 124 static DEFINE_MUTEX(cpuhp_state_mutex); 125 static struct cpuhp_step cpuhp_hp_states[]; 126 127 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) 128 { 129 return cpuhp_hp_states + state; 130 } 131 132 /** 133 * cpuhp_invoke_callback _ Invoke the callbacks for a given state 134 * @cpu: The cpu for which the callback should be invoked 135 * @state: The state to do callbacks for 136 * @bringup: True if the bringup callback should be invoked 137 * @node: For multi-instance, do a single entry callback for install/remove 138 * @lastp: For multi-instance rollback, remember how far we got 139 * 140 * Called from cpu hotplug and from the state register machinery. 141 */ 142 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, 143 bool bringup, struct hlist_node *node, 144 struct hlist_node **lastp) 145 { 146 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 147 struct cpuhp_step *step = cpuhp_get_step(state); 148 int (*cbm)(unsigned int cpu, struct hlist_node *node); 149 int (*cb)(unsigned int cpu); 150 int ret, cnt; 151 152 if (st->fail == state) { 153 st->fail = CPUHP_INVALID; 154 155 if (!(bringup ? step->startup.single : step->teardown.single)) 156 return 0; 157 158 return -EAGAIN; 159 } 160 161 if (!step->multi_instance) { 162 WARN_ON_ONCE(lastp && *lastp); 163 cb = bringup ? step->startup.single : step->teardown.single; 164 if (!cb) 165 return 0; 166 trace_cpuhp_enter(cpu, st->target, state, cb); 167 ret = cb(cpu); 168 trace_cpuhp_exit(cpu, st->state, state, ret); 169 return ret; 170 } 171 cbm = bringup ? step->startup.multi : step->teardown.multi; 172 if (!cbm) 173 return 0; 174 175 /* Single invocation for instance add/remove */ 176 if (node) { 177 WARN_ON_ONCE(lastp && *lastp); 178 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 179 ret = cbm(cpu, node); 180 trace_cpuhp_exit(cpu, st->state, state, ret); 181 return ret; 182 } 183 184 /* State transition. Invoke on all instances */ 185 cnt = 0; 186 hlist_for_each(node, &step->list) { 187 if (lastp && node == *lastp) 188 break; 189 190 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 191 ret = cbm(cpu, node); 192 trace_cpuhp_exit(cpu, st->state, state, ret); 193 if (ret) { 194 if (!lastp) 195 goto err; 196 197 *lastp = node; 198 return ret; 199 } 200 cnt++; 201 } 202 if (lastp) 203 *lastp = NULL; 204 return 0; 205 err: 206 /* Rollback the instances if one failed */ 207 cbm = !bringup ? step->startup.multi : step->teardown.multi; 208 if (!cbm) 209 return ret; 210 211 hlist_for_each(node, &step->list) { 212 if (!cnt--) 213 break; 214 215 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 216 ret = cbm(cpu, node); 217 trace_cpuhp_exit(cpu, st->state, state, ret); 218 /* 219 * Rollback must not fail, 220 */ 221 WARN_ON_ONCE(ret); 222 } 223 return ret; 224 } 225 226 #ifdef CONFIG_SMP 227 static bool cpuhp_is_ap_state(enum cpuhp_state state) 228 { 229 /* 230 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation 231 * purposes as that state is handled explicitly in cpu_down. 232 */ 233 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; 234 } 235 236 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup) 237 { 238 struct completion *done = bringup ? &st->done_up : &st->done_down; 239 wait_for_completion(done); 240 } 241 242 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) 243 { 244 struct completion *done = bringup ? &st->done_up : &st->done_down; 245 complete(done); 246 } 247 248 /* 249 * The former STARTING/DYING states, ran with IRQs disabled and must not fail. 250 */ 251 static bool cpuhp_is_atomic_state(enum cpuhp_state state) 252 { 253 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; 254 } 255 256 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 257 static DEFINE_MUTEX(cpu_add_remove_lock); 258 bool cpuhp_tasks_frozen; 259 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); 260 261 /* 262 * The following two APIs (cpu_maps_update_begin/done) must be used when 263 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. 264 */ 265 void cpu_maps_update_begin(void) 266 { 267 mutex_lock(&cpu_add_remove_lock); 268 } 269 270 void cpu_maps_update_done(void) 271 { 272 mutex_unlock(&cpu_add_remove_lock); 273 } 274 275 /* 276 * If set, cpu_up and cpu_down will return -EBUSY and do nothing. 277 * Should always be manipulated under cpu_add_remove_lock 278 */ 279 static int cpu_hotplug_disabled; 280 281 #ifdef CONFIG_HOTPLUG_CPU 282 283 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); 284 285 void cpus_read_lock(void) 286 { 287 percpu_down_read(&cpu_hotplug_lock); 288 } 289 EXPORT_SYMBOL_GPL(cpus_read_lock); 290 291 int cpus_read_trylock(void) 292 { 293 return percpu_down_read_trylock(&cpu_hotplug_lock); 294 } 295 EXPORT_SYMBOL_GPL(cpus_read_trylock); 296 297 void cpus_read_unlock(void) 298 { 299 percpu_up_read(&cpu_hotplug_lock); 300 } 301 EXPORT_SYMBOL_GPL(cpus_read_unlock); 302 303 void cpus_write_lock(void) 304 { 305 percpu_down_write(&cpu_hotplug_lock); 306 } 307 308 void cpus_write_unlock(void) 309 { 310 percpu_up_write(&cpu_hotplug_lock); 311 } 312 313 void lockdep_assert_cpus_held(void) 314 { 315 percpu_rwsem_assert_held(&cpu_hotplug_lock); 316 } 317 318 /* 319 * Wait for currently running CPU hotplug operations to complete (if any) and 320 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects 321 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the 322 * hotplug path before performing hotplug operations. So acquiring that lock 323 * guarantees mutual exclusion from any currently running hotplug operations. 324 */ 325 void cpu_hotplug_disable(void) 326 { 327 cpu_maps_update_begin(); 328 cpu_hotplug_disabled++; 329 cpu_maps_update_done(); 330 } 331 EXPORT_SYMBOL_GPL(cpu_hotplug_disable); 332 333 static void __cpu_hotplug_enable(void) 334 { 335 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) 336 return; 337 cpu_hotplug_disabled--; 338 } 339 340 void cpu_hotplug_enable(void) 341 { 342 cpu_maps_update_begin(); 343 __cpu_hotplug_enable(); 344 cpu_maps_update_done(); 345 } 346 EXPORT_SYMBOL_GPL(cpu_hotplug_enable); 347 #endif /* CONFIG_HOTPLUG_CPU */ 348 349 #ifdef CONFIG_HOTPLUG_SMT 350 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 351 EXPORT_SYMBOL_GPL(cpu_smt_control); 352 353 static bool cpu_smt_available __read_mostly; 354 355 void __init cpu_smt_disable(bool force) 356 { 357 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || 358 cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 359 return; 360 361 if (force) { 362 pr_info("SMT: Force disabled\n"); 363 cpu_smt_control = CPU_SMT_FORCE_DISABLED; 364 } else { 365 cpu_smt_control = CPU_SMT_DISABLED; 366 } 367 } 368 369 /* 370 * The decision whether SMT is supported can only be done after the full 371 * CPU identification. Called from architecture code before non boot CPUs 372 * are brought up. 373 */ 374 void __init cpu_smt_check_topology_early(void) 375 { 376 if (!topology_smt_supported()) 377 cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 378 } 379 380 /* 381 * If SMT was disabled by BIOS, detect it here, after the CPUs have been 382 * brought online. This ensures the smt/l1tf sysfs entries are consistent 383 * with reality. cpu_smt_available is set to true during the bringup of non 384 * boot CPUs when a SMT sibling is detected. Note, this may overwrite 385 * cpu_smt_control's previous setting. 386 */ 387 void __init cpu_smt_check_topology(void) 388 { 389 if (!cpu_smt_available) 390 cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 391 } 392 393 static int __init smt_cmdline_disable(char *str) 394 { 395 cpu_smt_disable(str && !strcmp(str, "force")); 396 return 0; 397 } 398 early_param("nosmt", smt_cmdline_disable); 399 400 static inline bool cpu_smt_allowed(unsigned int cpu) 401 { 402 if (topology_is_primary_thread(cpu)) 403 return true; 404 405 /* 406 * If the CPU is not a 'primary' thread and the booted_once bit is 407 * set then the processor has SMT support. Store this information 408 * for the late check of SMT support in cpu_smt_check_topology(). 409 */ 410 if (per_cpu(cpuhp_state, cpu).booted_once) 411 cpu_smt_available = true; 412 413 if (cpu_smt_control == CPU_SMT_ENABLED) 414 return true; 415 416 /* 417 * On x86 it's required to boot all logical CPUs at least once so 418 * that the init code can get a chance to set CR4.MCE on each 419 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any 420 * core will shutdown the machine. 421 */ 422 return !per_cpu(cpuhp_state, cpu).booted_once; 423 } 424 #else 425 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } 426 #endif 427 428 static inline enum cpuhp_state 429 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target) 430 { 431 enum cpuhp_state prev_state = st->state; 432 433 st->rollback = false; 434 st->last = NULL; 435 436 st->target = target; 437 st->single = false; 438 st->bringup = st->state < target; 439 440 return prev_state; 441 } 442 443 static inline void 444 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state) 445 { 446 st->rollback = true; 447 448 /* 449 * If we have st->last we need to undo partial multi_instance of this 450 * state first. Otherwise start undo at the previous state. 451 */ 452 if (!st->last) { 453 if (st->bringup) 454 st->state--; 455 else 456 st->state++; 457 } 458 459 st->target = prev_state; 460 st->bringup = !st->bringup; 461 } 462 463 /* Regular hotplug invocation of the AP hotplug thread */ 464 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st) 465 { 466 if (!st->single && st->state == st->target) 467 return; 468 469 st->result = 0; 470 /* 471 * Make sure the above stores are visible before should_run becomes 472 * true. Paired with the mb() above in cpuhp_thread_fun() 473 */ 474 smp_mb(); 475 st->should_run = true; 476 wake_up_process(st->thread); 477 wait_for_ap_thread(st, st->bringup); 478 } 479 480 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target) 481 { 482 enum cpuhp_state prev_state; 483 int ret; 484 485 prev_state = cpuhp_set_state(st, target); 486 __cpuhp_kick_ap(st); 487 if ((ret = st->result)) { 488 cpuhp_reset_state(st, prev_state); 489 __cpuhp_kick_ap(st); 490 } 491 492 return ret; 493 } 494 495 static int bringup_wait_for_ap(unsigned int cpu) 496 { 497 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 498 499 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ 500 wait_for_ap_thread(st, true); 501 if (WARN_ON_ONCE((!cpu_online(cpu)))) 502 return -ECANCELED; 503 504 /* Unpark the stopper thread and the hotplug thread of the target cpu */ 505 stop_machine_unpark(cpu); 506 kthread_unpark(st->thread); 507 508 /* 509 * SMT soft disabling on X86 requires to bring the CPU out of the 510 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The 511 * CPU marked itself as booted_once in cpu_notify_starting() so the 512 * cpu_smt_allowed() check will now return false if this is not the 513 * primary sibling. 514 */ 515 if (!cpu_smt_allowed(cpu)) 516 return -ECANCELED; 517 518 if (st->target <= CPUHP_AP_ONLINE_IDLE) 519 return 0; 520 521 return cpuhp_kick_ap(st, st->target); 522 } 523 524 static int bringup_cpu(unsigned int cpu) 525 { 526 struct task_struct *idle = idle_thread_get(cpu); 527 int ret; 528 529 /* 530 * Some architectures have to walk the irq descriptors to 531 * setup the vector space for the cpu which comes online. 532 * Prevent irq alloc/free across the bringup. 533 */ 534 irq_lock_sparse(); 535 536 /* Arch-specific enabling code. */ 537 ret = __cpu_up(cpu, idle); 538 irq_unlock_sparse(); 539 if (ret) 540 return ret; 541 return bringup_wait_for_ap(cpu); 542 } 543 544 /* 545 * Hotplug state machine related functions 546 */ 547 548 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) 549 { 550 for (st->state--; st->state > st->target; st->state--) 551 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); 552 } 553 554 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 555 enum cpuhp_state target) 556 { 557 enum cpuhp_state prev_state = st->state; 558 int ret = 0; 559 560 while (st->state < target) { 561 st->state++; 562 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); 563 if (ret) { 564 st->target = prev_state; 565 undo_cpu_up(cpu, st); 566 break; 567 } 568 } 569 return ret; 570 } 571 572 /* 573 * The cpu hotplug threads manage the bringup and teardown of the cpus 574 */ 575 static void cpuhp_create(unsigned int cpu) 576 { 577 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 578 579 init_completion(&st->done_up); 580 init_completion(&st->done_down); 581 } 582 583 static int cpuhp_should_run(unsigned int cpu) 584 { 585 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 586 587 return st->should_run; 588 } 589 590 /* 591 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke 592 * callbacks when a state gets [un]installed at runtime. 593 * 594 * Each invocation of this function by the smpboot thread does a single AP 595 * state callback. 596 * 597 * It has 3 modes of operation: 598 * - single: runs st->cb_state 599 * - up: runs ++st->state, while st->state < st->target 600 * - down: runs st->state--, while st->state > st->target 601 * 602 * When complete or on error, should_run is cleared and the completion is fired. 603 */ 604 static void cpuhp_thread_fun(unsigned int cpu) 605 { 606 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 607 bool bringup = st->bringup; 608 enum cpuhp_state state; 609 610 /* 611 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures 612 * that if we see ->should_run we also see the rest of the state. 613 */ 614 smp_mb(); 615 616 if (WARN_ON_ONCE(!st->should_run)) 617 return; 618 619 cpuhp_lock_acquire(bringup); 620 621 if (st->single) { 622 state = st->cb_state; 623 st->should_run = false; 624 } else { 625 if (bringup) { 626 st->state++; 627 state = st->state; 628 st->should_run = (st->state < st->target); 629 WARN_ON_ONCE(st->state > st->target); 630 } else { 631 state = st->state; 632 st->state--; 633 st->should_run = (st->state > st->target); 634 WARN_ON_ONCE(st->state < st->target); 635 } 636 } 637 638 WARN_ON_ONCE(!cpuhp_is_ap_state(state)); 639 640 if (cpuhp_is_atomic_state(state)) { 641 local_irq_disable(); 642 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); 643 local_irq_enable(); 644 645 /* 646 * STARTING/DYING must not fail! 647 */ 648 WARN_ON_ONCE(st->result); 649 } else { 650 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); 651 } 652 653 if (st->result) { 654 /* 655 * If we fail on a rollback, we're up a creek without no 656 * paddle, no way forward, no way back. We loose, thanks for 657 * playing. 658 */ 659 WARN_ON_ONCE(st->rollback); 660 st->should_run = false; 661 } 662 663 cpuhp_lock_release(bringup); 664 665 if (!st->should_run) 666 complete_ap_thread(st, bringup); 667 } 668 669 /* Invoke a single callback on a remote cpu */ 670 static int 671 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, 672 struct hlist_node *node) 673 { 674 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 675 int ret; 676 677 if (!cpu_online(cpu)) 678 return 0; 679 680 cpuhp_lock_acquire(false); 681 cpuhp_lock_release(false); 682 683 cpuhp_lock_acquire(true); 684 cpuhp_lock_release(true); 685 686 /* 687 * If we are up and running, use the hotplug thread. For early calls 688 * we invoke the thread function directly. 689 */ 690 if (!st->thread) 691 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); 692 693 st->rollback = false; 694 st->last = NULL; 695 696 st->node = node; 697 st->bringup = bringup; 698 st->cb_state = state; 699 st->single = true; 700 701 __cpuhp_kick_ap(st); 702 703 /* 704 * If we failed and did a partial, do a rollback. 705 */ 706 if ((ret = st->result) && st->last) { 707 st->rollback = true; 708 st->bringup = !bringup; 709 710 __cpuhp_kick_ap(st); 711 } 712 713 /* 714 * Clean up the leftovers so the next hotplug operation wont use stale 715 * data. 716 */ 717 st->node = st->last = NULL; 718 return ret; 719 } 720 721 static int cpuhp_kick_ap_work(unsigned int cpu) 722 { 723 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 724 enum cpuhp_state prev_state = st->state; 725 int ret; 726 727 cpuhp_lock_acquire(false); 728 cpuhp_lock_release(false); 729 730 cpuhp_lock_acquire(true); 731 cpuhp_lock_release(true); 732 733 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); 734 ret = cpuhp_kick_ap(st, st->target); 735 trace_cpuhp_exit(cpu, st->state, prev_state, ret); 736 737 return ret; 738 } 739 740 static struct smp_hotplug_thread cpuhp_threads = { 741 .store = &cpuhp_state.thread, 742 .create = &cpuhp_create, 743 .thread_should_run = cpuhp_should_run, 744 .thread_fn = cpuhp_thread_fun, 745 .thread_comm = "cpuhp/%u", 746 .selfparking = true, 747 }; 748 749 void __init cpuhp_threads_init(void) 750 { 751 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); 752 kthread_unpark(this_cpu_read(cpuhp_state.thread)); 753 } 754 755 #ifdef CONFIG_HOTPLUG_CPU 756 /** 757 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 758 * @cpu: a CPU id 759 * 760 * This function walks all processes, finds a valid mm struct for each one and 761 * then clears a corresponding bit in mm's cpumask. While this all sounds 762 * trivial, there are various non-obvious corner cases, which this function 763 * tries to solve in a safe manner. 764 * 765 * Also note that the function uses a somewhat relaxed locking scheme, so it may 766 * be called only for an already offlined CPU. 767 */ 768 void clear_tasks_mm_cpumask(int cpu) 769 { 770 struct task_struct *p; 771 772 /* 773 * This function is called after the cpu is taken down and marked 774 * offline, so its not like new tasks will ever get this cpu set in 775 * their mm mask. -- Peter Zijlstra 776 * Thus, we may use rcu_read_lock() here, instead of grabbing 777 * full-fledged tasklist_lock. 778 */ 779 WARN_ON(cpu_online(cpu)); 780 rcu_read_lock(); 781 for_each_process(p) { 782 struct task_struct *t; 783 784 /* 785 * Main thread might exit, but other threads may still have 786 * a valid mm. Find one. 787 */ 788 t = find_lock_task_mm(p); 789 if (!t) 790 continue; 791 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 792 task_unlock(t); 793 } 794 rcu_read_unlock(); 795 } 796 797 /* Take this CPU down. */ 798 static int take_cpu_down(void *_param) 799 { 800 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 801 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); 802 int err, cpu = smp_processor_id(); 803 int ret; 804 805 /* Ensure this CPU doesn't handle any more interrupts. */ 806 err = __cpu_disable(); 807 if (err < 0) 808 return err; 809 810 /* 811 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not 812 * do this step again. 813 */ 814 WARN_ON(st->state != CPUHP_TEARDOWN_CPU); 815 st->state--; 816 /* Invoke the former CPU_DYING callbacks */ 817 for (; st->state > target; st->state--) { 818 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); 819 /* 820 * DYING must not fail! 821 */ 822 WARN_ON_ONCE(ret); 823 } 824 825 /* Give up timekeeping duties */ 826 tick_handover_do_timer(); 827 /* Park the stopper thread */ 828 stop_machine_park(cpu); 829 return 0; 830 } 831 832 static int takedown_cpu(unsigned int cpu) 833 { 834 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 835 int err; 836 837 /* Park the smpboot threads */ 838 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); 839 840 /* 841 * Prevent irq alloc/free while the dying cpu reorganizes the 842 * interrupt affinities. 843 */ 844 irq_lock_sparse(); 845 846 /* 847 * So now all preempt/rcu users must observe !cpu_active(). 848 */ 849 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); 850 if (err) { 851 /* CPU refused to die */ 852 irq_unlock_sparse(); 853 /* Unpark the hotplug thread so we can rollback there */ 854 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); 855 return err; 856 } 857 BUG_ON(cpu_online(cpu)); 858 859 /* 860 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed 861 * all runnable tasks from the CPU, there's only the idle task left now 862 * that the migration thread is done doing the stop_machine thing. 863 * 864 * Wait for the stop thread to go away. 865 */ 866 wait_for_ap_thread(st, false); 867 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); 868 869 /* Interrupts are moved away from the dying cpu, reenable alloc/free */ 870 irq_unlock_sparse(); 871 872 hotplug_cpu__broadcast_tick_pull(cpu); 873 /* This actually kills the CPU. */ 874 __cpu_die(cpu); 875 876 tick_cleanup_dead_cpu(cpu); 877 rcutree_migrate_callbacks(cpu); 878 return 0; 879 } 880 881 static void cpuhp_complete_idle_dead(void *arg) 882 { 883 struct cpuhp_cpu_state *st = arg; 884 885 complete_ap_thread(st, false); 886 } 887 888 void cpuhp_report_idle_dead(void) 889 { 890 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 891 892 BUG_ON(st->state != CPUHP_AP_OFFLINE); 893 rcu_report_dead(smp_processor_id()); 894 st->state = CPUHP_AP_IDLE_DEAD; 895 /* 896 * We cannot call complete after rcu_report_dead() so we delegate it 897 * to an online cpu. 898 */ 899 smp_call_function_single(cpumask_first(cpu_online_mask), 900 cpuhp_complete_idle_dead, st, 0); 901 } 902 903 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) 904 { 905 for (st->state++; st->state < st->target; st->state++) 906 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); 907 } 908 909 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 910 enum cpuhp_state target) 911 { 912 enum cpuhp_state prev_state = st->state; 913 int ret = 0; 914 915 for (; st->state > target; st->state--) { 916 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); 917 if (ret) { 918 st->target = prev_state; 919 undo_cpu_down(cpu, st); 920 break; 921 } 922 } 923 return ret; 924 } 925 926 /* Requires cpu_add_remove_lock to be held */ 927 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, 928 enum cpuhp_state target) 929 { 930 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 931 int prev_state, ret = 0; 932 933 if (num_online_cpus() == 1) 934 return -EBUSY; 935 936 if (!cpu_present(cpu)) 937 return -EINVAL; 938 939 cpus_write_lock(); 940 941 cpuhp_tasks_frozen = tasks_frozen; 942 943 prev_state = cpuhp_set_state(st, target); 944 /* 945 * If the current CPU state is in the range of the AP hotplug thread, 946 * then we need to kick the thread. 947 */ 948 if (st->state > CPUHP_TEARDOWN_CPU) { 949 st->target = max((int)target, CPUHP_TEARDOWN_CPU); 950 ret = cpuhp_kick_ap_work(cpu); 951 /* 952 * The AP side has done the error rollback already. Just 953 * return the error code.. 954 */ 955 if (ret) 956 goto out; 957 958 /* 959 * We might have stopped still in the range of the AP hotplug 960 * thread. Nothing to do anymore. 961 */ 962 if (st->state > CPUHP_TEARDOWN_CPU) 963 goto out; 964 965 st->target = target; 966 } 967 /* 968 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need 969 * to do the further cleanups. 970 */ 971 ret = cpuhp_down_callbacks(cpu, st, target); 972 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { 973 cpuhp_reset_state(st, prev_state); 974 __cpuhp_kick_ap(st); 975 } 976 977 out: 978 cpus_write_unlock(); 979 /* 980 * Do post unplug cleanup. This is still protected against 981 * concurrent CPU hotplug via cpu_add_remove_lock. 982 */ 983 lockup_detector_cleanup(); 984 return ret; 985 } 986 987 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) 988 { 989 if (cpu_hotplug_disabled) 990 return -EBUSY; 991 return _cpu_down(cpu, 0, target); 992 } 993 994 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target) 995 { 996 int err; 997 998 cpu_maps_update_begin(); 999 err = cpu_down_maps_locked(cpu, target); 1000 cpu_maps_update_done(); 1001 return err; 1002 } 1003 1004 int cpu_down(unsigned int cpu) 1005 { 1006 return do_cpu_down(cpu, CPUHP_OFFLINE); 1007 } 1008 EXPORT_SYMBOL(cpu_down); 1009 1010 #else 1011 #define takedown_cpu NULL 1012 #endif /*CONFIG_HOTPLUG_CPU*/ 1013 1014 /** 1015 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU 1016 * @cpu: cpu that just started 1017 * 1018 * It must be called by the arch code on the new cpu, before the new cpu 1019 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 1020 */ 1021 void notify_cpu_starting(unsigned int cpu) 1022 { 1023 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1024 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); 1025 int ret; 1026 1027 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ 1028 st->booted_once = true; 1029 while (st->state < target) { 1030 st->state++; 1031 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); 1032 /* 1033 * STARTING must not fail! 1034 */ 1035 WARN_ON_ONCE(ret); 1036 } 1037 } 1038 1039 /* 1040 * Called from the idle task. Wake up the controlling task which brings the 1041 * stopper and the hotplug thread of the upcoming CPU up and then delegates 1042 * the rest of the online bringup to the hotplug thread. 1043 */ 1044 void cpuhp_online_idle(enum cpuhp_state state) 1045 { 1046 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 1047 1048 /* Happens for the boot cpu */ 1049 if (state != CPUHP_AP_ONLINE_IDLE) 1050 return; 1051 1052 st->state = CPUHP_AP_ONLINE_IDLE; 1053 complete_ap_thread(st, true); 1054 } 1055 1056 /* Requires cpu_add_remove_lock to be held */ 1057 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) 1058 { 1059 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1060 struct task_struct *idle; 1061 int ret = 0; 1062 1063 cpus_write_lock(); 1064 1065 if (!cpu_present(cpu)) { 1066 ret = -EINVAL; 1067 goto out; 1068 } 1069 1070 /* 1071 * The caller of do_cpu_up might have raced with another 1072 * caller. Ignore it for now. 1073 */ 1074 if (st->state >= target) 1075 goto out; 1076 1077 if (st->state == CPUHP_OFFLINE) { 1078 /* Let it fail before we try to bring the cpu up */ 1079 idle = idle_thread_get(cpu); 1080 if (IS_ERR(idle)) { 1081 ret = PTR_ERR(idle); 1082 goto out; 1083 } 1084 } 1085 1086 cpuhp_tasks_frozen = tasks_frozen; 1087 1088 cpuhp_set_state(st, target); 1089 /* 1090 * If the current CPU state is in the range of the AP hotplug thread, 1091 * then we need to kick the thread once more. 1092 */ 1093 if (st->state > CPUHP_BRINGUP_CPU) { 1094 ret = cpuhp_kick_ap_work(cpu); 1095 /* 1096 * The AP side has done the error rollback already. Just 1097 * return the error code.. 1098 */ 1099 if (ret) 1100 goto out; 1101 } 1102 1103 /* 1104 * Try to reach the target state. We max out on the BP at 1105 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is 1106 * responsible for bringing it up to the target state. 1107 */ 1108 target = min((int)target, CPUHP_BRINGUP_CPU); 1109 ret = cpuhp_up_callbacks(cpu, st, target); 1110 out: 1111 cpus_write_unlock(); 1112 return ret; 1113 } 1114 1115 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target) 1116 { 1117 int err = 0; 1118 1119 if (!cpu_possible(cpu)) { 1120 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", 1121 cpu); 1122 #if defined(CONFIG_IA64) 1123 pr_err("please check additional_cpus= boot parameter\n"); 1124 #endif 1125 return -EINVAL; 1126 } 1127 1128 err = try_online_node(cpu_to_node(cpu)); 1129 if (err) 1130 return err; 1131 1132 cpu_maps_update_begin(); 1133 1134 if (cpu_hotplug_disabled) { 1135 err = -EBUSY; 1136 goto out; 1137 } 1138 if (!cpu_smt_allowed(cpu)) { 1139 err = -EPERM; 1140 goto out; 1141 } 1142 1143 err = _cpu_up(cpu, 0, target); 1144 out: 1145 cpu_maps_update_done(); 1146 return err; 1147 } 1148 1149 int cpu_up(unsigned int cpu) 1150 { 1151 return do_cpu_up(cpu, CPUHP_ONLINE); 1152 } 1153 EXPORT_SYMBOL_GPL(cpu_up); 1154 1155 #ifdef CONFIG_PM_SLEEP_SMP 1156 static cpumask_var_t frozen_cpus; 1157 1158 int freeze_secondary_cpus(int primary) 1159 { 1160 int cpu, error = 0; 1161 1162 cpu_maps_update_begin(); 1163 if (!cpu_online(primary)) 1164 primary = cpumask_first(cpu_online_mask); 1165 /* 1166 * We take down all of the non-boot CPUs in one shot to avoid races 1167 * with the userspace trying to use the CPU hotplug at the same time 1168 */ 1169 cpumask_clear(frozen_cpus); 1170 1171 pr_info("Disabling non-boot CPUs ...\n"); 1172 for_each_online_cpu(cpu) { 1173 if (cpu == primary) 1174 continue; 1175 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); 1176 error = _cpu_down(cpu, 1, CPUHP_OFFLINE); 1177 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); 1178 if (!error) 1179 cpumask_set_cpu(cpu, frozen_cpus); 1180 else { 1181 pr_err("Error taking CPU%d down: %d\n", cpu, error); 1182 break; 1183 } 1184 } 1185 1186 if (!error) 1187 BUG_ON(num_online_cpus() > 1); 1188 else 1189 pr_err("Non-boot CPUs are not disabled\n"); 1190 1191 /* 1192 * Make sure the CPUs won't be enabled by someone else. We need to do 1193 * this even in case of failure as all disable_nonboot_cpus() users are 1194 * supposed to do enable_nonboot_cpus() on the failure path. 1195 */ 1196 cpu_hotplug_disabled++; 1197 1198 cpu_maps_update_done(); 1199 return error; 1200 } 1201 1202 void __weak arch_enable_nonboot_cpus_begin(void) 1203 { 1204 } 1205 1206 void __weak arch_enable_nonboot_cpus_end(void) 1207 { 1208 } 1209 1210 void enable_nonboot_cpus(void) 1211 { 1212 int cpu, error; 1213 1214 /* Allow everyone to use the CPU hotplug again */ 1215 cpu_maps_update_begin(); 1216 __cpu_hotplug_enable(); 1217 if (cpumask_empty(frozen_cpus)) 1218 goto out; 1219 1220 pr_info("Enabling non-boot CPUs ...\n"); 1221 1222 arch_enable_nonboot_cpus_begin(); 1223 1224 for_each_cpu(cpu, frozen_cpus) { 1225 trace_suspend_resume(TPS("CPU_ON"), cpu, true); 1226 error = _cpu_up(cpu, 1, CPUHP_ONLINE); 1227 trace_suspend_resume(TPS("CPU_ON"), cpu, false); 1228 if (!error) { 1229 pr_info("CPU%d is up\n", cpu); 1230 continue; 1231 } 1232 pr_warn("Error taking CPU%d up: %d\n", cpu, error); 1233 } 1234 1235 arch_enable_nonboot_cpus_end(); 1236 1237 cpumask_clear(frozen_cpus); 1238 out: 1239 cpu_maps_update_done(); 1240 } 1241 1242 static int __init alloc_frozen_cpus(void) 1243 { 1244 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 1245 return -ENOMEM; 1246 return 0; 1247 } 1248 core_initcall(alloc_frozen_cpus); 1249 1250 /* 1251 * When callbacks for CPU hotplug notifications are being executed, we must 1252 * ensure that the state of the system with respect to the tasks being frozen 1253 * or not, as reported by the notification, remains unchanged *throughout the 1254 * duration* of the execution of the callbacks. 1255 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 1256 * 1257 * This synchronization is implemented by mutually excluding regular CPU 1258 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 1259 * Hibernate notifications. 1260 */ 1261 static int 1262 cpu_hotplug_pm_callback(struct notifier_block *nb, 1263 unsigned long action, void *ptr) 1264 { 1265 switch (action) { 1266 1267 case PM_SUSPEND_PREPARE: 1268 case PM_HIBERNATION_PREPARE: 1269 cpu_hotplug_disable(); 1270 break; 1271 1272 case PM_POST_SUSPEND: 1273 case PM_POST_HIBERNATION: 1274 cpu_hotplug_enable(); 1275 break; 1276 1277 default: 1278 return NOTIFY_DONE; 1279 } 1280 1281 return NOTIFY_OK; 1282 } 1283 1284 1285 static int __init cpu_hotplug_pm_sync_init(void) 1286 { 1287 /* 1288 * cpu_hotplug_pm_callback has higher priority than x86 1289 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 1290 * to disable cpu hotplug to avoid cpu hotplug race. 1291 */ 1292 pm_notifier(cpu_hotplug_pm_callback, 0); 1293 return 0; 1294 } 1295 core_initcall(cpu_hotplug_pm_sync_init); 1296 1297 #endif /* CONFIG_PM_SLEEP_SMP */ 1298 1299 int __boot_cpu_id; 1300 1301 #endif /* CONFIG_SMP */ 1302 1303 /* Boot processor state steps */ 1304 static struct cpuhp_step cpuhp_hp_states[] = { 1305 [CPUHP_OFFLINE] = { 1306 .name = "offline", 1307 .startup.single = NULL, 1308 .teardown.single = NULL, 1309 }, 1310 #ifdef CONFIG_SMP 1311 [CPUHP_CREATE_THREADS]= { 1312 .name = "threads:prepare", 1313 .startup.single = smpboot_create_threads, 1314 .teardown.single = NULL, 1315 .cant_stop = true, 1316 }, 1317 [CPUHP_PERF_PREPARE] = { 1318 .name = "perf:prepare", 1319 .startup.single = perf_event_init_cpu, 1320 .teardown.single = perf_event_exit_cpu, 1321 }, 1322 [CPUHP_WORKQUEUE_PREP] = { 1323 .name = "workqueue:prepare", 1324 .startup.single = workqueue_prepare_cpu, 1325 .teardown.single = NULL, 1326 }, 1327 [CPUHP_HRTIMERS_PREPARE] = { 1328 .name = "hrtimers:prepare", 1329 .startup.single = hrtimers_prepare_cpu, 1330 .teardown.single = hrtimers_dead_cpu, 1331 }, 1332 [CPUHP_SMPCFD_PREPARE] = { 1333 .name = "smpcfd:prepare", 1334 .startup.single = smpcfd_prepare_cpu, 1335 .teardown.single = smpcfd_dead_cpu, 1336 }, 1337 [CPUHP_RELAY_PREPARE] = { 1338 .name = "relay:prepare", 1339 .startup.single = relay_prepare_cpu, 1340 .teardown.single = NULL, 1341 }, 1342 [CPUHP_SLAB_PREPARE] = { 1343 .name = "slab:prepare", 1344 .startup.single = slab_prepare_cpu, 1345 .teardown.single = slab_dead_cpu, 1346 }, 1347 [CPUHP_RCUTREE_PREP] = { 1348 .name = "RCU/tree:prepare", 1349 .startup.single = rcutree_prepare_cpu, 1350 .teardown.single = rcutree_dead_cpu, 1351 }, 1352 /* 1353 * On the tear-down path, timers_dead_cpu() must be invoked 1354 * before blk_mq_queue_reinit_notify() from notify_dead(), 1355 * otherwise a RCU stall occurs. 1356 */ 1357 [CPUHP_TIMERS_PREPARE] = { 1358 .name = "timers:prepare", 1359 .startup.single = timers_prepare_cpu, 1360 .teardown.single = timers_dead_cpu, 1361 }, 1362 /* Kicks the plugged cpu into life */ 1363 [CPUHP_BRINGUP_CPU] = { 1364 .name = "cpu:bringup", 1365 .startup.single = bringup_cpu, 1366 .teardown.single = NULL, 1367 .cant_stop = true, 1368 }, 1369 /* Final state before CPU kills itself */ 1370 [CPUHP_AP_IDLE_DEAD] = { 1371 .name = "idle:dead", 1372 }, 1373 /* 1374 * Last state before CPU enters the idle loop to die. Transient state 1375 * for synchronization. 1376 */ 1377 [CPUHP_AP_OFFLINE] = { 1378 .name = "ap:offline", 1379 .cant_stop = true, 1380 }, 1381 /* First state is scheduler control. Interrupts are disabled */ 1382 [CPUHP_AP_SCHED_STARTING] = { 1383 .name = "sched:starting", 1384 .startup.single = sched_cpu_starting, 1385 .teardown.single = sched_cpu_dying, 1386 }, 1387 [CPUHP_AP_RCUTREE_DYING] = { 1388 .name = "RCU/tree:dying", 1389 .startup.single = NULL, 1390 .teardown.single = rcutree_dying_cpu, 1391 }, 1392 [CPUHP_AP_SMPCFD_DYING] = { 1393 .name = "smpcfd:dying", 1394 .startup.single = NULL, 1395 .teardown.single = smpcfd_dying_cpu, 1396 }, 1397 /* Entry state on starting. Interrupts enabled from here on. Transient 1398 * state for synchronsization */ 1399 [CPUHP_AP_ONLINE] = { 1400 .name = "ap:online", 1401 }, 1402 /* 1403 * Handled on controll processor until the plugged processor manages 1404 * this itself. 1405 */ 1406 [CPUHP_TEARDOWN_CPU] = { 1407 .name = "cpu:teardown", 1408 .startup.single = NULL, 1409 .teardown.single = takedown_cpu, 1410 .cant_stop = true, 1411 }, 1412 /* Handle smpboot threads park/unpark */ 1413 [CPUHP_AP_SMPBOOT_THREADS] = { 1414 .name = "smpboot/threads:online", 1415 .startup.single = smpboot_unpark_threads, 1416 .teardown.single = smpboot_park_threads, 1417 }, 1418 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = { 1419 .name = "irq/affinity:online", 1420 .startup.single = irq_affinity_online_cpu, 1421 .teardown.single = NULL, 1422 }, 1423 [CPUHP_AP_PERF_ONLINE] = { 1424 .name = "perf:online", 1425 .startup.single = perf_event_init_cpu, 1426 .teardown.single = perf_event_exit_cpu, 1427 }, 1428 [CPUHP_AP_WATCHDOG_ONLINE] = { 1429 .name = "lockup_detector:online", 1430 .startup.single = lockup_detector_online_cpu, 1431 .teardown.single = lockup_detector_offline_cpu, 1432 }, 1433 [CPUHP_AP_WORKQUEUE_ONLINE] = { 1434 .name = "workqueue:online", 1435 .startup.single = workqueue_online_cpu, 1436 .teardown.single = workqueue_offline_cpu, 1437 }, 1438 [CPUHP_AP_RCUTREE_ONLINE] = { 1439 .name = "RCU/tree:online", 1440 .startup.single = rcutree_online_cpu, 1441 .teardown.single = rcutree_offline_cpu, 1442 }, 1443 #endif 1444 /* 1445 * The dynamically registered state space is here 1446 */ 1447 1448 #ifdef CONFIG_SMP 1449 /* Last state is scheduler control setting the cpu active */ 1450 [CPUHP_AP_ACTIVE] = { 1451 .name = "sched:active", 1452 .startup.single = sched_cpu_activate, 1453 .teardown.single = sched_cpu_deactivate, 1454 }, 1455 #endif 1456 1457 /* CPU is fully up and running. */ 1458 [CPUHP_ONLINE] = { 1459 .name = "online", 1460 .startup.single = NULL, 1461 .teardown.single = NULL, 1462 }, 1463 }; 1464 1465 /* Sanity check for callbacks */ 1466 static int cpuhp_cb_check(enum cpuhp_state state) 1467 { 1468 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) 1469 return -EINVAL; 1470 return 0; 1471 } 1472 1473 /* 1474 * Returns a free for dynamic slot assignment of the Online state. The states 1475 * are protected by the cpuhp_slot_states mutex and an empty slot is identified 1476 * by having no name assigned. 1477 */ 1478 static int cpuhp_reserve_state(enum cpuhp_state state) 1479 { 1480 enum cpuhp_state i, end; 1481 struct cpuhp_step *step; 1482 1483 switch (state) { 1484 case CPUHP_AP_ONLINE_DYN: 1485 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN; 1486 end = CPUHP_AP_ONLINE_DYN_END; 1487 break; 1488 case CPUHP_BP_PREPARE_DYN: 1489 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN; 1490 end = CPUHP_BP_PREPARE_DYN_END; 1491 break; 1492 default: 1493 return -EINVAL; 1494 } 1495 1496 for (i = state; i <= end; i++, step++) { 1497 if (!step->name) 1498 return i; 1499 } 1500 WARN(1, "No more dynamic states available for CPU hotplug\n"); 1501 return -ENOSPC; 1502 } 1503 1504 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, 1505 int (*startup)(unsigned int cpu), 1506 int (*teardown)(unsigned int cpu), 1507 bool multi_instance) 1508 { 1509 /* (Un)Install the callbacks for further cpu hotplug operations */ 1510 struct cpuhp_step *sp; 1511 int ret = 0; 1512 1513 /* 1514 * If name is NULL, then the state gets removed. 1515 * 1516 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on 1517 * the first allocation from these dynamic ranges, so the removal 1518 * would trigger a new allocation and clear the wrong (already 1519 * empty) state, leaving the callbacks of the to be cleared state 1520 * dangling, which causes wreckage on the next hotplug operation. 1521 */ 1522 if (name && (state == CPUHP_AP_ONLINE_DYN || 1523 state == CPUHP_BP_PREPARE_DYN)) { 1524 ret = cpuhp_reserve_state(state); 1525 if (ret < 0) 1526 return ret; 1527 state = ret; 1528 } 1529 sp = cpuhp_get_step(state); 1530 if (name && sp->name) 1531 return -EBUSY; 1532 1533 sp->startup.single = startup; 1534 sp->teardown.single = teardown; 1535 sp->name = name; 1536 sp->multi_instance = multi_instance; 1537 INIT_HLIST_HEAD(&sp->list); 1538 return ret; 1539 } 1540 1541 static void *cpuhp_get_teardown_cb(enum cpuhp_state state) 1542 { 1543 return cpuhp_get_step(state)->teardown.single; 1544 } 1545 1546 /* 1547 * Call the startup/teardown function for a step either on the AP or 1548 * on the current CPU. 1549 */ 1550 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, 1551 struct hlist_node *node) 1552 { 1553 struct cpuhp_step *sp = cpuhp_get_step(state); 1554 int ret; 1555 1556 /* 1557 * If there's nothing to do, we done. 1558 * Relies on the union for multi_instance. 1559 */ 1560 if ((bringup && !sp->startup.single) || 1561 (!bringup && !sp->teardown.single)) 1562 return 0; 1563 /* 1564 * The non AP bound callbacks can fail on bringup. On teardown 1565 * e.g. module removal we crash for now. 1566 */ 1567 #ifdef CONFIG_SMP 1568 if (cpuhp_is_ap_state(state)) 1569 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); 1570 else 1571 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); 1572 #else 1573 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); 1574 #endif 1575 BUG_ON(ret && !bringup); 1576 return ret; 1577 } 1578 1579 /* 1580 * Called from __cpuhp_setup_state on a recoverable failure. 1581 * 1582 * Note: The teardown callbacks for rollback are not allowed to fail! 1583 */ 1584 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, 1585 struct hlist_node *node) 1586 { 1587 int cpu; 1588 1589 /* Roll back the already executed steps on the other cpus */ 1590 for_each_present_cpu(cpu) { 1591 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1592 int cpustate = st->state; 1593 1594 if (cpu >= failedcpu) 1595 break; 1596 1597 /* Did we invoke the startup call on that cpu ? */ 1598 if (cpustate >= state) 1599 cpuhp_issue_call(cpu, state, false, node); 1600 } 1601 } 1602 1603 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, 1604 struct hlist_node *node, 1605 bool invoke) 1606 { 1607 struct cpuhp_step *sp; 1608 int cpu; 1609 int ret; 1610 1611 lockdep_assert_cpus_held(); 1612 1613 sp = cpuhp_get_step(state); 1614 if (sp->multi_instance == false) 1615 return -EINVAL; 1616 1617 mutex_lock(&cpuhp_state_mutex); 1618 1619 if (!invoke || !sp->startup.multi) 1620 goto add_node; 1621 1622 /* 1623 * Try to call the startup callback for each present cpu 1624 * depending on the hotplug state of the cpu. 1625 */ 1626 for_each_present_cpu(cpu) { 1627 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1628 int cpustate = st->state; 1629 1630 if (cpustate < state) 1631 continue; 1632 1633 ret = cpuhp_issue_call(cpu, state, true, node); 1634 if (ret) { 1635 if (sp->teardown.multi) 1636 cpuhp_rollback_install(cpu, state, node); 1637 goto unlock; 1638 } 1639 } 1640 add_node: 1641 ret = 0; 1642 hlist_add_head(node, &sp->list); 1643 unlock: 1644 mutex_unlock(&cpuhp_state_mutex); 1645 return ret; 1646 } 1647 1648 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, 1649 bool invoke) 1650 { 1651 int ret; 1652 1653 cpus_read_lock(); 1654 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke); 1655 cpus_read_unlock(); 1656 return ret; 1657 } 1658 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); 1659 1660 /** 1661 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state 1662 * @state: The state to setup 1663 * @invoke: If true, the startup function is invoked for cpus where 1664 * cpu state >= @state 1665 * @startup: startup callback function 1666 * @teardown: teardown callback function 1667 * @multi_instance: State is set up for multiple instances which get 1668 * added afterwards. 1669 * 1670 * The caller needs to hold cpus read locked while calling this function. 1671 * Returns: 1672 * On success: 1673 * Positive state number if @state is CPUHP_AP_ONLINE_DYN 1674 * 0 for all other states 1675 * On failure: proper (negative) error code 1676 */ 1677 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, 1678 const char *name, bool invoke, 1679 int (*startup)(unsigned int cpu), 1680 int (*teardown)(unsigned int cpu), 1681 bool multi_instance) 1682 { 1683 int cpu, ret = 0; 1684 bool dynstate; 1685 1686 lockdep_assert_cpus_held(); 1687 1688 if (cpuhp_cb_check(state) || !name) 1689 return -EINVAL; 1690 1691 mutex_lock(&cpuhp_state_mutex); 1692 1693 ret = cpuhp_store_callbacks(state, name, startup, teardown, 1694 multi_instance); 1695 1696 dynstate = state == CPUHP_AP_ONLINE_DYN; 1697 if (ret > 0 && dynstate) { 1698 state = ret; 1699 ret = 0; 1700 } 1701 1702 if (ret || !invoke || !startup) 1703 goto out; 1704 1705 /* 1706 * Try to call the startup callback for each present cpu 1707 * depending on the hotplug state of the cpu. 1708 */ 1709 for_each_present_cpu(cpu) { 1710 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1711 int cpustate = st->state; 1712 1713 if (cpustate < state) 1714 continue; 1715 1716 ret = cpuhp_issue_call(cpu, state, true, NULL); 1717 if (ret) { 1718 if (teardown) 1719 cpuhp_rollback_install(cpu, state, NULL); 1720 cpuhp_store_callbacks(state, NULL, NULL, NULL, false); 1721 goto out; 1722 } 1723 } 1724 out: 1725 mutex_unlock(&cpuhp_state_mutex); 1726 /* 1727 * If the requested state is CPUHP_AP_ONLINE_DYN, return the 1728 * dynamically allocated state in case of success. 1729 */ 1730 if (!ret && dynstate) 1731 return state; 1732 return ret; 1733 } 1734 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked); 1735 1736 int __cpuhp_setup_state(enum cpuhp_state state, 1737 const char *name, bool invoke, 1738 int (*startup)(unsigned int cpu), 1739 int (*teardown)(unsigned int cpu), 1740 bool multi_instance) 1741 { 1742 int ret; 1743 1744 cpus_read_lock(); 1745 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup, 1746 teardown, multi_instance); 1747 cpus_read_unlock(); 1748 return ret; 1749 } 1750 EXPORT_SYMBOL(__cpuhp_setup_state); 1751 1752 int __cpuhp_state_remove_instance(enum cpuhp_state state, 1753 struct hlist_node *node, bool invoke) 1754 { 1755 struct cpuhp_step *sp = cpuhp_get_step(state); 1756 int cpu; 1757 1758 BUG_ON(cpuhp_cb_check(state)); 1759 1760 if (!sp->multi_instance) 1761 return -EINVAL; 1762 1763 cpus_read_lock(); 1764 mutex_lock(&cpuhp_state_mutex); 1765 1766 if (!invoke || !cpuhp_get_teardown_cb(state)) 1767 goto remove; 1768 /* 1769 * Call the teardown callback for each present cpu depending 1770 * on the hotplug state of the cpu. This function is not 1771 * allowed to fail currently! 1772 */ 1773 for_each_present_cpu(cpu) { 1774 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1775 int cpustate = st->state; 1776 1777 if (cpustate >= state) 1778 cpuhp_issue_call(cpu, state, false, node); 1779 } 1780 1781 remove: 1782 hlist_del(node); 1783 mutex_unlock(&cpuhp_state_mutex); 1784 cpus_read_unlock(); 1785 1786 return 0; 1787 } 1788 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); 1789 1790 /** 1791 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state 1792 * @state: The state to remove 1793 * @invoke: If true, the teardown function is invoked for cpus where 1794 * cpu state >= @state 1795 * 1796 * The caller needs to hold cpus read locked while calling this function. 1797 * The teardown callback is currently not allowed to fail. Think 1798 * about module removal! 1799 */ 1800 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke) 1801 { 1802 struct cpuhp_step *sp = cpuhp_get_step(state); 1803 int cpu; 1804 1805 BUG_ON(cpuhp_cb_check(state)); 1806 1807 lockdep_assert_cpus_held(); 1808 1809 mutex_lock(&cpuhp_state_mutex); 1810 if (sp->multi_instance) { 1811 WARN(!hlist_empty(&sp->list), 1812 "Error: Removing state %d which has instances left.\n", 1813 state); 1814 goto remove; 1815 } 1816 1817 if (!invoke || !cpuhp_get_teardown_cb(state)) 1818 goto remove; 1819 1820 /* 1821 * Call the teardown callback for each present cpu depending 1822 * on the hotplug state of the cpu. This function is not 1823 * allowed to fail currently! 1824 */ 1825 for_each_present_cpu(cpu) { 1826 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1827 int cpustate = st->state; 1828 1829 if (cpustate >= state) 1830 cpuhp_issue_call(cpu, state, false, NULL); 1831 } 1832 remove: 1833 cpuhp_store_callbacks(state, NULL, NULL, NULL, false); 1834 mutex_unlock(&cpuhp_state_mutex); 1835 } 1836 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked); 1837 1838 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) 1839 { 1840 cpus_read_lock(); 1841 __cpuhp_remove_state_cpuslocked(state, invoke); 1842 cpus_read_unlock(); 1843 } 1844 EXPORT_SYMBOL(__cpuhp_remove_state); 1845 1846 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) 1847 static ssize_t show_cpuhp_state(struct device *dev, 1848 struct device_attribute *attr, char *buf) 1849 { 1850 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 1851 1852 return sprintf(buf, "%d\n", st->state); 1853 } 1854 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL); 1855 1856 static ssize_t write_cpuhp_target(struct device *dev, 1857 struct device_attribute *attr, 1858 const char *buf, size_t count) 1859 { 1860 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 1861 struct cpuhp_step *sp; 1862 int target, ret; 1863 1864 ret = kstrtoint(buf, 10, &target); 1865 if (ret) 1866 return ret; 1867 1868 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL 1869 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) 1870 return -EINVAL; 1871 #else 1872 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) 1873 return -EINVAL; 1874 #endif 1875 1876 ret = lock_device_hotplug_sysfs(); 1877 if (ret) 1878 return ret; 1879 1880 mutex_lock(&cpuhp_state_mutex); 1881 sp = cpuhp_get_step(target); 1882 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; 1883 mutex_unlock(&cpuhp_state_mutex); 1884 if (ret) 1885 goto out; 1886 1887 if (st->state < target) 1888 ret = do_cpu_up(dev->id, target); 1889 else 1890 ret = do_cpu_down(dev->id, target); 1891 out: 1892 unlock_device_hotplug(); 1893 return ret ? ret : count; 1894 } 1895 1896 static ssize_t show_cpuhp_target(struct device *dev, 1897 struct device_attribute *attr, char *buf) 1898 { 1899 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 1900 1901 return sprintf(buf, "%d\n", st->target); 1902 } 1903 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); 1904 1905 1906 static ssize_t write_cpuhp_fail(struct device *dev, 1907 struct device_attribute *attr, 1908 const char *buf, size_t count) 1909 { 1910 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 1911 struct cpuhp_step *sp; 1912 int fail, ret; 1913 1914 ret = kstrtoint(buf, 10, &fail); 1915 if (ret) 1916 return ret; 1917 1918 /* 1919 * Cannot fail STARTING/DYING callbacks. 1920 */ 1921 if (cpuhp_is_atomic_state(fail)) 1922 return -EINVAL; 1923 1924 /* 1925 * Cannot fail anything that doesn't have callbacks. 1926 */ 1927 mutex_lock(&cpuhp_state_mutex); 1928 sp = cpuhp_get_step(fail); 1929 if (!sp->startup.single && !sp->teardown.single) 1930 ret = -EINVAL; 1931 mutex_unlock(&cpuhp_state_mutex); 1932 if (ret) 1933 return ret; 1934 1935 st->fail = fail; 1936 1937 return count; 1938 } 1939 1940 static ssize_t show_cpuhp_fail(struct device *dev, 1941 struct device_attribute *attr, char *buf) 1942 { 1943 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); 1944 1945 return sprintf(buf, "%d\n", st->fail); 1946 } 1947 1948 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail); 1949 1950 static struct attribute *cpuhp_cpu_attrs[] = { 1951 &dev_attr_state.attr, 1952 &dev_attr_target.attr, 1953 &dev_attr_fail.attr, 1954 NULL 1955 }; 1956 1957 static const struct attribute_group cpuhp_cpu_attr_group = { 1958 .attrs = cpuhp_cpu_attrs, 1959 .name = "hotplug", 1960 NULL 1961 }; 1962 1963 static ssize_t show_cpuhp_states(struct device *dev, 1964 struct device_attribute *attr, char *buf) 1965 { 1966 ssize_t cur, res = 0; 1967 int i; 1968 1969 mutex_lock(&cpuhp_state_mutex); 1970 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) { 1971 struct cpuhp_step *sp = cpuhp_get_step(i); 1972 1973 if (sp->name) { 1974 cur = sprintf(buf, "%3d: %s\n", i, sp->name); 1975 buf += cur; 1976 res += cur; 1977 } 1978 } 1979 mutex_unlock(&cpuhp_state_mutex); 1980 return res; 1981 } 1982 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL); 1983 1984 static struct attribute *cpuhp_cpu_root_attrs[] = { 1985 &dev_attr_states.attr, 1986 NULL 1987 }; 1988 1989 static const struct attribute_group cpuhp_cpu_root_attr_group = { 1990 .attrs = cpuhp_cpu_root_attrs, 1991 .name = "hotplug", 1992 NULL 1993 }; 1994 1995 #ifdef CONFIG_HOTPLUG_SMT 1996 1997 static const char *smt_states[] = { 1998 [CPU_SMT_ENABLED] = "on", 1999 [CPU_SMT_DISABLED] = "off", 2000 [CPU_SMT_FORCE_DISABLED] = "forceoff", 2001 [CPU_SMT_NOT_SUPPORTED] = "notsupported", 2002 }; 2003 2004 static ssize_t 2005 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf) 2006 { 2007 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]); 2008 } 2009 2010 static void cpuhp_offline_cpu_device(unsigned int cpu) 2011 { 2012 struct device *dev = get_cpu_device(cpu); 2013 2014 dev->offline = true; 2015 /* Tell user space about the state change */ 2016 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 2017 } 2018 2019 static void cpuhp_online_cpu_device(unsigned int cpu) 2020 { 2021 struct device *dev = get_cpu_device(cpu); 2022 2023 dev->offline = false; 2024 /* Tell user space about the state change */ 2025 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2026 } 2027 2028 static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) 2029 { 2030 int cpu, ret = 0; 2031 2032 cpu_maps_update_begin(); 2033 for_each_online_cpu(cpu) { 2034 if (topology_is_primary_thread(cpu)) 2035 continue; 2036 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); 2037 if (ret) 2038 break; 2039 /* 2040 * As this needs to hold the cpu maps lock it's impossible 2041 * to call device_offline() because that ends up calling 2042 * cpu_down() which takes cpu maps lock. cpu maps lock 2043 * needs to be held as this might race against in kernel 2044 * abusers of the hotplug machinery (thermal management). 2045 * 2046 * So nothing would update device:offline state. That would 2047 * leave the sysfs entry stale and prevent onlining after 2048 * smt control has been changed to 'off' again. This is 2049 * called under the sysfs hotplug lock, so it is properly 2050 * serialized against the regular offline usage. 2051 */ 2052 cpuhp_offline_cpu_device(cpu); 2053 } 2054 if (!ret) 2055 cpu_smt_control = ctrlval; 2056 cpu_maps_update_done(); 2057 return ret; 2058 } 2059 2060 static int cpuhp_smt_enable(void) 2061 { 2062 int cpu, ret = 0; 2063 2064 cpu_maps_update_begin(); 2065 cpu_smt_control = CPU_SMT_ENABLED; 2066 for_each_present_cpu(cpu) { 2067 /* Skip online CPUs and CPUs on offline nodes */ 2068 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) 2069 continue; 2070 ret = _cpu_up(cpu, 0, CPUHP_ONLINE); 2071 if (ret) 2072 break; 2073 /* See comment in cpuhp_smt_disable() */ 2074 cpuhp_online_cpu_device(cpu); 2075 } 2076 cpu_maps_update_done(); 2077 return ret; 2078 } 2079 2080 static ssize_t 2081 store_smt_control(struct device *dev, struct device_attribute *attr, 2082 const char *buf, size_t count) 2083 { 2084 int ctrlval, ret; 2085 2086 if (sysfs_streq(buf, "on")) 2087 ctrlval = CPU_SMT_ENABLED; 2088 else if (sysfs_streq(buf, "off")) 2089 ctrlval = CPU_SMT_DISABLED; 2090 else if (sysfs_streq(buf, "forceoff")) 2091 ctrlval = CPU_SMT_FORCE_DISABLED; 2092 else 2093 return -EINVAL; 2094 2095 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) 2096 return -EPERM; 2097 2098 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 2099 return -ENODEV; 2100 2101 ret = lock_device_hotplug_sysfs(); 2102 if (ret) 2103 return ret; 2104 2105 if (ctrlval != cpu_smt_control) { 2106 switch (ctrlval) { 2107 case CPU_SMT_ENABLED: 2108 ret = cpuhp_smt_enable(); 2109 break; 2110 case CPU_SMT_DISABLED: 2111 case CPU_SMT_FORCE_DISABLED: 2112 ret = cpuhp_smt_disable(ctrlval); 2113 break; 2114 } 2115 } 2116 2117 unlock_device_hotplug(); 2118 return ret ? ret : count; 2119 } 2120 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control); 2121 2122 static ssize_t 2123 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf) 2124 { 2125 bool active = topology_max_smt_threads() > 1; 2126 2127 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active); 2128 } 2129 static DEVICE_ATTR(active, 0444, show_smt_active, NULL); 2130 2131 static struct attribute *cpuhp_smt_attrs[] = { 2132 &dev_attr_control.attr, 2133 &dev_attr_active.attr, 2134 NULL 2135 }; 2136 2137 static const struct attribute_group cpuhp_smt_attr_group = { 2138 .attrs = cpuhp_smt_attrs, 2139 .name = "smt", 2140 NULL 2141 }; 2142 2143 static int __init cpu_smt_state_init(void) 2144 { 2145 return sysfs_create_group(&cpu_subsys.dev_root->kobj, 2146 &cpuhp_smt_attr_group); 2147 } 2148 2149 #else 2150 static inline int cpu_smt_state_init(void) { return 0; } 2151 #endif 2152 2153 static int __init cpuhp_sysfs_init(void) 2154 { 2155 int cpu, ret; 2156 2157 ret = cpu_smt_state_init(); 2158 if (ret) 2159 return ret; 2160 2161 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, 2162 &cpuhp_cpu_root_attr_group); 2163 if (ret) 2164 return ret; 2165 2166 for_each_possible_cpu(cpu) { 2167 struct device *dev = get_cpu_device(cpu); 2168 2169 if (!dev) 2170 continue; 2171 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); 2172 if (ret) 2173 return ret; 2174 } 2175 return 0; 2176 } 2177 device_initcall(cpuhp_sysfs_init); 2178 #endif 2179 2180 /* 2181 * cpu_bit_bitmap[] is a special, "compressed" data structure that 2182 * represents all NR_CPUS bits binary values of 1<<nr. 2183 * 2184 * It is used by cpumask_of() to get a constant address to a CPU 2185 * mask value that has a single bit set only. 2186 */ 2187 2188 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 2189 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 2190 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 2191 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 2192 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 2193 2194 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 2195 2196 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 2197 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 2198 #if BITS_PER_LONG > 32 2199 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 2200 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 2201 #endif 2202 }; 2203 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 2204 2205 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 2206 EXPORT_SYMBOL(cpu_all_bits); 2207 2208 #ifdef CONFIG_INIT_ALL_POSSIBLE 2209 struct cpumask __cpu_possible_mask __read_mostly 2210 = {CPU_BITS_ALL}; 2211 #else 2212 struct cpumask __cpu_possible_mask __read_mostly; 2213 #endif 2214 EXPORT_SYMBOL(__cpu_possible_mask); 2215 2216 struct cpumask __cpu_online_mask __read_mostly; 2217 EXPORT_SYMBOL(__cpu_online_mask); 2218 2219 struct cpumask __cpu_present_mask __read_mostly; 2220 EXPORT_SYMBOL(__cpu_present_mask); 2221 2222 struct cpumask __cpu_active_mask __read_mostly; 2223 EXPORT_SYMBOL(__cpu_active_mask); 2224 2225 void init_cpu_present(const struct cpumask *src) 2226 { 2227 cpumask_copy(&__cpu_present_mask, src); 2228 } 2229 2230 void init_cpu_possible(const struct cpumask *src) 2231 { 2232 cpumask_copy(&__cpu_possible_mask, src); 2233 } 2234 2235 void init_cpu_online(const struct cpumask *src) 2236 { 2237 cpumask_copy(&__cpu_online_mask, src); 2238 } 2239 2240 /* 2241 * Activate the first processor. 2242 */ 2243 void __init boot_cpu_init(void) 2244 { 2245 int cpu = smp_processor_id(); 2246 2247 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 2248 set_cpu_online(cpu, true); 2249 set_cpu_active(cpu, true); 2250 set_cpu_present(cpu, true); 2251 set_cpu_possible(cpu, true); 2252 2253 #ifdef CONFIG_SMP 2254 __boot_cpu_id = cpu; 2255 #endif 2256 } 2257 2258 /* 2259 * Must be called _AFTER_ setting up the per_cpu areas 2260 */ 2261 void __init boot_cpu_hotplug_init(void) 2262 { 2263 #ifdef CONFIG_SMP 2264 this_cpu_write(cpuhp_state.booted_once, true); 2265 #endif 2266 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); 2267 } 2268