1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/oom.h> 14 #include <linux/rcupdate.h> 15 #include <linux/export.h> 16 #include <linux/bug.h> 17 #include <linux/kthread.h> 18 #include <linux/stop_machine.h> 19 #include <linux/mutex.h> 20 #include <linux/gfp.h> 21 #include <linux/suspend.h> 22 #include <linux/lockdep.h> 23 #include <trace/events/power.h> 24 25 #include "smpboot.h" 26 27 #ifdef CONFIG_SMP 28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 29 static DEFINE_MUTEX(cpu_add_remove_lock); 30 31 /* 32 * The following two APIs (cpu_maps_update_begin/done) must be used when 33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. 34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU 35 * hotplug callback (un)registration performed using __register_cpu_notifier() 36 * or __unregister_cpu_notifier(). 37 */ 38 void cpu_maps_update_begin(void) 39 { 40 mutex_lock(&cpu_add_remove_lock); 41 } 42 EXPORT_SYMBOL(cpu_notifier_register_begin); 43 44 void cpu_maps_update_done(void) 45 { 46 mutex_unlock(&cpu_add_remove_lock); 47 } 48 EXPORT_SYMBOL(cpu_notifier_register_done); 49 50 static RAW_NOTIFIER_HEAD(cpu_chain); 51 52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 53 * Should always be manipulated under cpu_add_remove_lock 54 */ 55 static int cpu_hotplug_disabled; 56 57 #ifdef CONFIG_HOTPLUG_CPU 58 59 static struct { 60 struct task_struct *active_writer; 61 struct mutex lock; /* Synchronizes accesses to refcount, */ 62 /* 63 * Also blocks the new readers during 64 * an ongoing cpu hotplug operation. 65 */ 66 int refcount; 67 68 #ifdef CONFIG_DEBUG_LOCK_ALLOC 69 struct lockdep_map dep_map; 70 #endif 71 } cpu_hotplug = { 72 .active_writer = NULL, 73 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 74 .refcount = 0, 75 #ifdef CONFIG_DEBUG_LOCK_ALLOC 76 .dep_map = {.name = "cpu_hotplug.lock" }, 77 #endif 78 }; 79 80 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ 81 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) 82 #define cpuhp_lock_acquire_tryread() \ 83 lock_map_acquire_tryread(&cpu_hotplug.dep_map) 84 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) 85 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) 86 87 void get_online_cpus(void) 88 { 89 might_sleep(); 90 if (cpu_hotplug.active_writer == current) 91 return; 92 cpuhp_lock_acquire_read(); 93 mutex_lock(&cpu_hotplug.lock); 94 cpu_hotplug.refcount++; 95 mutex_unlock(&cpu_hotplug.lock); 96 } 97 EXPORT_SYMBOL_GPL(get_online_cpus); 98 99 bool try_get_online_cpus(void) 100 { 101 if (cpu_hotplug.active_writer == current) 102 return true; 103 if (!mutex_trylock(&cpu_hotplug.lock)) 104 return false; 105 cpuhp_lock_acquire_tryread(); 106 cpu_hotplug.refcount++; 107 mutex_unlock(&cpu_hotplug.lock); 108 return true; 109 } 110 EXPORT_SYMBOL_GPL(try_get_online_cpus); 111 112 void put_online_cpus(void) 113 { 114 if (cpu_hotplug.active_writer == current) 115 return; 116 mutex_lock(&cpu_hotplug.lock); 117 118 if (WARN_ON(!cpu_hotplug.refcount)) 119 cpu_hotplug.refcount++; /* try to fix things up */ 120 121 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 122 wake_up_process(cpu_hotplug.active_writer); 123 mutex_unlock(&cpu_hotplug.lock); 124 cpuhp_lock_release(); 125 126 } 127 EXPORT_SYMBOL_GPL(put_online_cpus); 128 129 /* 130 * This ensures that the hotplug operation can begin only when the 131 * refcount goes to zero. 132 * 133 * Note that during a cpu-hotplug operation, the new readers, if any, 134 * will be blocked by the cpu_hotplug.lock 135 * 136 * Since cpu_hotplug_begin() is always called after invoking 137 * cpu_maps_update_begin(), we can be sure that only one writer is active. 138 * 139 * Note that theoretically, there is a possibility of a livelock: 140 * - Refcount goes to zero, last reader wakes up the sleeping 141 * writer. 142 * - Last reader unlocks the cpu_hotplug.lock. 143 * - A new reader arrives at this moment, bumps up the refcount. 144 * - The writer acquires the cpu_hotplug.lock finds the refcount 145 * non zero and goes to sleep again. 146 * 147 * However, this is very difficult to achieve in practice since 148 * get_online_cpus() not an api which is called all that often. 149 * 150 */ 151 void cpu_hotplug_begin(void) 152 { 153 cpu_hotplug.active_writer = current; 154 155 cpuhp_lock_acquire(); 156 for (;;) { 157 mutex_lock(&cpu_hotplug.lock); 158 if (likely(!cpu_hotplug.refcount)) 159 break; 160 __set_current_state(TASK_UNINTERRUPTIBLE); 161 mutex_unlock(&cpu_hotplug.lock); 162 schedule(); 163 } 164 } 165 166 void cpu_hotplug_done(void) 167 { 168 cpu_hotplug.active_writer = NULL; 169 mutex_unlock(&cpu_hotplug.lock); 170 cpuhp_lock_release(); 171 } 172 173 /* 174 * Wait for currently running CPU hotplug operations to complete (if any) and 175 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects 176 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the 177 * hotplug path before performing hotplug operations. So acquiring that lock 178 * guarantees mutual exclusion from any currently running hotplug operations. 179 */ 180 void cpu_hotplug_disable(void) 181 { 182 cpu_maps_update_begin(); 183 cpu_hotplug_disabled = 1; 184 cpu_maps_update_done(); 185 } 186 187 void cpu_hotplug_enable(void) 188 { 189 cpu_maps_update_begin(); 190 cpu_hotplug_disabled = 0; 191 cpu_maps_update_done(); 192 } 193 194 #endif /* CONFIG_HOTPLUG_CPU */ 195 196 /* Need to know about CPUs going up/down? */ 197 int __ref register_cpu_notifier(struct notifier_block *nb) 198 { 199 int ret; 200 cpu_maps_update_begin(); 201 ret = raw_notifier_chain_register(&cpu_chain, nb); 202 cpu_maps_update_done(); 203 return ret; 204 } 205 206 int __ref __register_cpu_notifier(struct notifier_block *nb) 207 { 208 return raw_notifier_chain_register(&cpu_chain, nb); 209 } 210 211 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 212 int *nr_calls) 213 { 214 int ret; 215 216 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 217 nr_calls); 218 219 return notifier_to_errno(ret); 220 } 221 222 static int cpu_notify(unsigned long val, void *v) 223 { 224 return __cpu_notify(val, v, -1, NULL); 225 } 226 227 #ifdef CONFIG_HOTPLUG_CPU 228 229 static void cpu_notify_nofail(unsigned long val, void *v) 230 { 231 BUG_ON(cpu_notify(val, v)); 232 } 233 EXPORT_SYMBOL(register_cpu_notifier); 234 EXPORT_SYMBOL(__register_cpu_notifier); 235 236 void __ref unregister_cpu_notifier(struct notifier_block *nb) 237 { 238 cpu_maps_update_begin(); 239 raw_notifier_chain_unregister(&cpu_chain, nb); 240 cpu_maps_update_done(); 241 } 242 EXPORT_SYMBOL(unregister_cpu_notifier); 243 244 void __ref __unregister_cpu_notifier(struct notifier_block *nb) 245 { 246 raw_notifier_chain_unregister(&cpu_chain, nb); 247 } 248 EXPORT_SYMBOL(__unregister_cpu_notifier); 249 250 /** 251 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 252 * @cpu: a CPU id 253 * 254 * This function walks all processes, finds a valid mm struct for each one and 255 * then clears a corresponding bit in mm's cpumask. While this all sounds 256 * trivial, there are various non-obvious corner cases, which this function 257 * tries to solve in a safe manner. 258 * 259 * Also note that the function uses a somewhat relaxed locking scheme, so it may 260 * be called only for an already offlined CPU. 261 */ 262 void clear_tasks_mm_cpumask(int cpu) 263 { 264 struct task_struct *p; 265 266 /* 267 * This function is called after the cpu is taken down and marked 268 * offline, so its not like new tasks will ever get this cpu set in 269 * their mm mask. -- Peter Zijlstra 270 * Thus, we may use rcu_read_lock() here, instead of grabbing 271 * full-fledged tasklist_lock. 272 */ 273 WARN_ON(cpu_online(cpu)); 274 rcu_read_lock(); 275 for_each_process(p) { 276 struct task_struct *t; 277 278 /* 279 * Main thread might exit, but other threads may still have 280 * a valid mm. Find one. 281 */ 282 t = find_lock_task_mm(p); 283 if (!t) 284 continue; 285 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 286 task_unlock(t); 287 } 288 rcu_read_unlock(); 289 } 290 291 static inline void check_for_tasks(int dead_cpu) 292 { 293 struct task_struct *g, *p; 294 295 read_lock_irq(&tasklist_lock); 296 do_each_thread(g, p) { 297 if (!p->on_rq) 298 continue; 299 /* 300 * We do the check with unlocked task_rq(p)->lock. 301 * Order the reading to do not warn about a task, 302 * which was running on this cpu in the past, and 303 * it's just been woken on another cpu. 304 */ 305 rmb(); 306 if (task_cpu(p) != dead_cpu) 307 continue; 308 309 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n", 310 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); 311 } while_each_thread(g, p); 312 read_unlock_irq(&tasklist_lock); 313 } 314 315 struct take_cpu_down_param { 316 unsigned long mod; 317 void *hcpu; 318 }; 319 320 /* Take this CPU down. */ 321 static int __ref take_cpu_down(void *_param) 322 { 323 struct take_cpu_down_param *param = _param; 324 int err; 325 326 /* Ensure this CPU doesn't handle any more interrupts. */ 327 err = __cpu_disable(); 328 if (err < 0) 329 return err; 330 331 cpu_notify(CPU_DYING | param->mod, param->hcpu); 332 /* Park the stopper thread */ 333 kthread_park(current); 334 return 0; 335 } 336 337 /* Requires cpu_add_remove_lock to be held */ 338 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 339 { 340 int err, nr_calls = 0; 341 void *hcpu = (void *)(long)cpu; 342 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 343 struct take_cpu_down_param tcd_param = { 344 .mod = mod, 345 .hcpu = hcpu, 346 }; 347 348 if (num_online_cpus() == 1) 349 return -EBUSY; 350 351 if (!cpu_online(cpu)) 352 return -EINVAL; 353 354 cpu_hotplug_begin(); 355 356 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 357 if (err) { 358 nr_calls--; 359 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 360 pr_warn("%s: attempt to take down CPU %u failed\n", 361 __func__, cpu); 362 goto out_release; 363 } 364 365 /* 366 * By now we've cleared cpu_active_mask, wait for all preempt-disabled 367 * and RCU users of this state to go away such that all new such users 368 * will observe it. 369 * 370 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might 371 * not imply sync_sched(), so explicitly call both. 372 * 373 * Do sync before park smpboot threads to take care the rcu boost case. 374 */ 375 #ifdef CONFIG_PREEMPT 376 synchronize_sched(); 377 #endif 378 synchronize_rcu(); 379 380 smpboot_park_threads(cpu); 381 382 /* 383 * So now all preempt/rcu users must observe !cpu_active(). 384 */ 385 386 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 387 if (err) { 388 /* CPU didn't die: tell everyone. Can't complain. */ 389 smpboot_unpark_threads(cpu); 390 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 391 goto out_release; 392 } 393 BUG_ON(cpu_online(cpu)); 394 395 /* 396 * The migration_call() CPU_DYING callback will have removed all 397 * runnable tasks from the cpu, there's only the idle task left now 398 * that the migration thread is done doing the stop_machine thing. 399 * 400 * Wait for the stop thread to go away. 401 */ 402 while (!idle_cpu(cpu)) 403 cpu_relax(); 404 405 /* This actually kills the CPU. */ 406 __cpu_die(cpu); 407 408 /* CPU is completely dead: tell everyone. Too late to complain. */ 409 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 410 411 check_for_tasks(cpu); 412 413 out_release: 414 cpu_hotplug_done(); 415 if (!err) 416 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 417 return err; 418 } 419 420 int __ref cpu_down(unsigned int cpu) 421 { 422 int err; 423 424 cpu_maps_update_begin(); 425 426 if (cpu_hotplug_disabled) { 427 err = -EBUSY; 428 goto out; 429 } 430 431 err = _cpu_down(cpu, 0); 432 433 out: 434 cpu_maps_update_done(); 435 return err; 436 } 437 EXPORT_SYMBOL(cpu_down); 438 #endif /*CONFIG_HOTPLUG_CPU*/ 439 440 /* Requires cpu_add_remove_lock to be held */ 441 static int _cpu_up(unsigned int cpu, int tasks_frozen) 442 { 443 int ret, nr_calls = 0; 444 void *hcpu = (void *)(long)cpu; 445 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 446 struct task_struct *idle; 447 448 cpu_hotplug_begin(); 449 450 if (cpu_online(cpu) || !cpu_present(cpu)) { 451 ret = -EINVAL; 452 goto out; 453 } 454 455 idle = idle_thread_get(cpu); 456 if (IS_ERR(idle)) { 457 ret = PTR_ERR(idle); 458 goto out; 459 } 460 461 ret = smpboot_create_threads(cpu); 462 if (ret) 463 goto out; 464 465 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 466 if (ret) { 467 nr_calls--; 468 pr_warn("%s: attempt to bring up CPU %u failed\n", 469 __func__, cpu); 470 goto out_notify; 471 } 472 473 /* Arch-specific enabling code. */ 474 ret = __cpu_up(cpu, idle); 475 if (ret != 0) 476 goto out_notify; 477 BUG_ON(!cpu_online(cpu)); 478 479 /* Wake the per cpu threads */ 480 smpboot_unpark_threads(cpu); 481 482 /* Now call notifier in preparation. */ 483 cpu_notify(CPU_ONLINE | mod, hcpu); 484 485 out_notify: 486 if (ret != 0) 487 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 488 out: 489 cpu_hotplug_done(); 490 491 return ret; 492 } 493 494 int cpu_up(unsigned int cpu) 495 { 496 int err = 0; 497 498 if (!cpu_possible(cpu)) { 499 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", 500 cpu); 501 #if defined(CONFIG_IA64) 502 pr_err("please check additional_cpus= boot parameter\n"); 503 #endif 504 return -EINVAL; 505 } 506 507 err = try_online_node(cpu_to_node(cpu)); 508 if (err) 509 return err; 510 511 cpu_maps_update_begin(); 512 513 if (cpu_hotplug_disabled) { 514 err = -EBUSY; 515 goto out; 516 } 517 518 err = _cpu_up(cpu, 0); 519 520 out: 521 cpu_maps_update_done(); 522 return err; 523 } 524 EXPORT_SYMBOL_GPL(cpu_up); 525 526 #ifdef CONFIG_PM_SLEEP_SMP 527 static cpumask_var_t frozen_cpus; 528 529 int disable_nonboot_cpus(void) 530 { 531 int cpu, first_cpu, error = 0; 532 533 cpu_maps_update_begin(); 534 first_cpu = cpumask_first(cpu_online_mask); 535 /* 536 * We take down all of the non-boot CPUs in one shot to avoid races 537 * with the userspace trying to use the CPU hotplug at the same time 538 */ 539 cpumask_clear(frozen_cpus); 540 541 pr_info("Disabling non-boot CPUs ...\n"); 542 for_each_online_cpu(cpu) { 543 if (cpu == first_cpu) 544 continue; 545 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); 546 error = _cpu_down(cpu, 1); 547 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); 548 if (!error) 549 cpumask_set_cpu(cpu, frozen_cpus); 550 else { 551 pr_err("Error taking CPU%d down: %d\n", cpu, error); 552 break; 553 } 554 } 555 556 if (!error) { 557 BUG_ON(num_online_cpus() > 1); 558 /* Make sure the CPUs won't be enabled by someone else */ 559 cpu_hotplug_disabled = 1; 560 } else { 561 pr_err("Non-boot CPUs are not disabled\n"); 562 } 563 cpu_maps_update_done(); 564 return error; 565 } 566 567 void __weak arch_enable_nonboot_cpus_begin(void) 568 { 569 } 570 571 void __weak arch_enable_nonboot_cpus_end(void) 572 { 573 } 574 575 void __ref enable_nonboot_cpus(void) 576 { 577 int cpu, error; 578 579 /* Allow everyone to use the CPU hotplug again */ 580 cpu_maps_update_begin(); 581 cpu_hotplug_disabled = 0; 582 if (cpumask_empty(frozen_cpus)) 583 goto out; 584 585 pr_info("Enabling non-boot CPUs ...\n"); 586 587 arch_enable_nonboot_cpus_begin(); 588 589 for_each_cpu(cpu, frozen_cpus) { 590 trace_suspend_resume(TPS("CPU_ON"), cpu, true); 591 error = _cpu_up(cpu, 1); 592 trace_suspend_resume(TPS("CPU_ON"), cpu, false); 593 if (!error) { 594 pr_info("CPU%d is up\n", cpu); 595 continue; 596 } 597 pr_warn("Error taking CPU%d up: %d\n", cpu, error); 598 } 599 600 arch_enable_nonboot_cpus_end(); 601 602 cpumask_clear(frozen_cpus); 603 out: 604 cpu_maps_update_done(); 605 } 606 607 static int __init alloc_frozen_cpus(void) 608 { 609 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 610 return -ENOMEM; 611 return 0; 612 } 613 core_initcall(alloc_frozen_cpus); 614 615 /* 616 * When callbacks for CPU hotplug notifications are being executed, we must 617 * ensure that the state of the system with respect to the tasks being frozen 618 * or not, as reported by the notification, remains unchanged *throughout the 619 * duration* of the execution of the callbacks. 620 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 621 * 622 * This synchronization is implemented by mutually excluding regular CPU 623 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 624 * Hibernate notifications. 625 */ 626 static int 627 cpu_hotplug_pm_callback(struct notifier_block *nb, 628 unsigned long action, void *ptr) 629 { 630 switch (action) { 631 632 case PM_SUSPEND_PREPARE: 633 case PM_HIBERNATION_PREPARE: 634 cpu_hotplug_disable(); 635 break; 636 637 case PM_POST_SUSPEND: 638 case PM_POST_HIBERNATION: 639 cpu_hotplug_enable(); 640 break; 641 642 default: 643 return NOTIFY_DONE; 644 } 645 646 return NOTIFY_OK; 647 } 648 649 650 static int __init cpu_hotplug_pm_sync_init(void) 651 { 652 /* 653 * cpu_hotplug_pm_callback has higher priority than x86 654 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 655 * to disable cpu hotplug to avoid cpu hotplug race. 656 */ 657 pm_notifier(cpu_hotplug_pm_callback, 0); 658 return 0; 659 } 660 core_initcall(cpu_hotplug_pm_sync_init); 661 662 #endif /* CONFIG_PM_SLEEP_SMP */ 663 664 /** 665 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 666 * @cpu: cpu that just started 667 * 668 * This function calls the cpu_chain notifiers with CPU_STARTING. 669 * It must be called by the arch code on the new cpu, before the new cpu 670 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 671 */ 672 void notify_cpu_starting(unsigned int cpu) 673 { 674 unsigned long val = CPU_STARTING; 675 676 #ifdef CONFIG_PM_SLEEP_SMP 677 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 678 val = CPU_STARTING_FROZEN; 679 #endif /* CONFIG_PM_SLEEP_SMP */ 680 cpu_notify(val, (void *)(long)cpu); 681 } 682 683 #endif /* CONFIG_SMP */ 684 685 /* 686 * cpu_bit_bitmap[] is a special, "compressed" data structure that 687 * represents all NR_CPUS bits binary values of 1<<nr. 688 * 689 * It is used by cpumask_of() to get a constant address to a CPU 690 * mask value that has a single bit set only. 691 */ 692 693 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 694 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 695 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 696 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 697 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 698 699 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 700 701 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 702 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 703 #if BITS_PER_LONG > 32 704 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 705 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 706 #endif 707 }; 708 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 709 710 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 711 EXPORT_SYMBOL(cpu_all_bits); 712 713 #ifdef CONFIG_INIT_ALL_POSSIBLE 714 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 715 = CPU_BITS_ALL; 716 #else 717 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 718 #endif 719 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 720 EXPORT_SYMBOL(cpu_possible_mask); 721 722 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 723 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 724 EXPORT_SYMBOL(cpu_online_mask); 725 726 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 727 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 728 EXPORT_SYMBOL(cpu_present_mask); 729 730 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 731 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 732 EXPORT_SYMBOL(cpu_active_mask); 733 734 void set_cpu_possible(unsigned int cpu, bool possible) 735 { 736 if (possible) 737 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 738 else 739 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 740 } 741 742 void set_cpu_present(unsigned int cpu, bool present) 743 { 744 if (present) 745 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 746 else 747 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 748 } 749 750 void set_cpu_online(unsigned int cpu, bool online) 751 { 752 if (online) { 753 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 754 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 755 } else { 756 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 757 } 758 } 759 760 void set_cpu_active(unsigned int cpu, bool active) 761 { 762 if (active) 763 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 764 else 765 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 766 } 767 768 void init_cpu_present(const struct cpumask *src) 769 { 770 cpumask_copy(to_cpumask(cpu_present_bits), src); 771 } 772 773 void init_cpu_possible(const struct cpumask *src) 774 { 775 cpumask_copy(to_cpumask(cpu_possible_bits), src); 776 } 777 778 void init_cpu_online(const struct cpumask *src) 779 { 780 cpumask_copy(to_cpumask(cpu_online_bits), src); 781 } 782