1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/oom.h> 14 #include <linux/rcupdate.h> 15 #include <linux/export.h> 16 #include <linux/bug.h> 17 #include <linux/kthread.h> 18 #include <linux/stop_machine.h> 19 #include <linux/mutex.h> 20 #include <linux/gfp.h> 21 #include <linux/suspend.h> 22 #include <linux/lockdep.h> 23 #include <trace/events/power.h> 24 25 #include "smpboot.h" 26 27 #ifdef CONFIG_SMP 28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 29 static DEFINE_MUTEX(cpu_add_remove_lock); 30 31 /* 32 * The following two APIs (cpu_maps_update_begin/done) must be used when 33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. 34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU 35 * hotplug callback (un)registration performed using __register_cpu_notifier() 36 * or __unregister_cpu_notifier(). 37 */ 38 void cpu_maps_update_begin(void) 39 { 40 mutex_lock(&cpu_add_remove_lock); 41 } 42 EXPORT_SYMBOL(cpu_notifier_register_begin); 43 44 void cpu_maps_update_done(void) 45 { 46 mutex_unlock(&cpu_add_remove_lock); 47 } 48 EXPORT_SYMBOL(cpu_notifier_register_done); 49 50 static RAW_NOTIFIER_HEAD(cpu_chain); 51 52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 53 * Should always be manipulated under cpu_add_remove_lock 54 */ 55 static int cpu_hotplug_disabled; 56 57 #ifdef CONFIG_HOTPLUG_CPU 58 59 static struct { 60 struct task_struct *active_writer; 61 struct mutex lock; /* Synchronizes accesses to refcount, */ 62 /* 63 * Also blocks the new readers during 64 * an ongoing cpu hotplug operation. 65 */ 66 int refcount; 67 68 #ifdef CONFIG_DEBUG_LOCK_ALLOC 69 struct lockdep_map dep_map; 70 #endif 71 } cpu_hotplug = { 72 .active_writer = NULL, 73 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 74 .refcount = 0, 75 #ifdef CONFIG_DEBUG_LOCK_ALLOC 76 .dep_map = {.name = "cpu_hotplug.lock" }, 77 #endif 78 }; 79 80 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ 81 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) 82 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) 83 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) 84 85 void get_online_cpus(void) 86 { 87 might_sleep(); 88 if (cpu_hotplug.active_writer == current) 89 return; 90 cpuhp_lock_acquire_read(); 91 mutex_lock(&cpu_hotplug.lock); 92 cpu_hotplug.refcount++; 93 mutex_unlock(&cpu_hotplug.lock); 94 95 } 96 EXPORT_SYMBOL_GPL(get_online_cpus); 97 98 void put_online_cpus(void) 99 { 100 if (cpu_hotplug.active_writer == current) 101 return; 102 mutex_lock(&cpu_hotplug.lock); 103 104 if (WARN_ON(!cpu_hotplug.refcount)) 105 cpu_hotplug.refcount++; /* try to fix things up */ 106 107 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 108 wake_up_process(cpu_hotplug.active_writer); 109 mutex_unlock(&cpu_hotplug.lock); 110 cpuhp_lock_release(); 111 112 } 113 EXPORT_SYMBOL_GPL(put_online_cpus); 114 115 /* 116 * This ensures that the hotplug operation can begin only when the 117 * refcount goes to zero. 118 * 119 * Note that during a cpu-hotplug operation, the new readers, if any, 120 * will be blocked by the cpu_hotplug.lock 121 * 122 * Since cpu_hotplug_begin() is always called after invoking 123 * cpu_maps_update_begin(), we can be sure that only one writer is active. 124 * 125 * Note that theoretically, there is a possibility of a livelock: 126 * - Refcount goes to zero, last reader wakes up the sleeping 127 * writer. 128 * - Last reader unlocks the cpu_hotplug.lock. 129 * - A new reader arrives at this moment, bumps up the refcount. 130 * - The writer acquires the cpu_hotplug.lock finds the refcount 131 * non zero and goes to sleep again. 132 * 133 * However, this is very difficult to achieve in practice since 134 * get_online_cpus() not an api which is called all that often. 135 * 136 */ 137 void cpu_hotplug_begin(void) 138 { 139 cpu_hotplug.active_writer = current; 140 141 cpuhp_lock_acquire(); 142 for (;;) { 143 mutex_lock(&cpu_hotplug.lock); 144 if (likely(!cpu_hotplug.refcount)) 145 break; 146 __set_current_state(TASK_UNINTERRUPTIBLE); 147 mutex_unlock(&cpu_hotplug.lock); 148 schedule(); 149 } 150 } 151 152 void cpu_hotplug_done(void) 153 { 154 cpu_hotplug.active_writer = NULL; 155 mutex_unlock(&cpu_hotplug.lock); 156 cpuhp_lock_release(); 157 } 158 159 /* 160 * Wait for currently running CPU hotplug operations to complete (if any) and 161 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects 162 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the 163 * hotplug path before performing hotplug operations. So acquiring that lock 164 * guarantees mutual exclusion from any currently running hotplug operations. 165 */ 166 void cpu_hotplug_disable(void) 167 { 168 cpu_maps_update_begin(); 169 cpu_hotplug_disabled = 1; 170 cpu_maps_update_done(); 171 } 172 173 void cpu_hotplug_enable(void) 174 { 175 cpu_maps_update_begin(); 176 cpu_hotplug_disabled = 0; 177 cpu_maps_update_done(); 178 } 179 180 #endif /* CONFIG_HOTPLUG_CPU */ 181 182 /* Need to know about CPUs going up/down? */ 183 int __ref register_cpu_notifier(struct notifier_block *nb) 184 { 185 int ret; 186 cpu_maps_update_begin(); 187 ret = raw_notifier_chain_register(&cpu_chain, nb); 188 cpu_maps_update_done(); 189 return ret; 190 } 191 192 int __ref __register_cpu_notifier(struct notifier_block *nb) 193 { 194 return raw_notifier_chain_register(&cpu_chain, nb); 195 } 196 197 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 198 int *nr_calls) 199 { 200 int ret; 201 202 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 203 nr_calls); 204 205 return notifier_to_errno(ret); 206 } 207 208 static int cpu_notify(unsigned long val, void *v) 209 { 210 return __cpu_notify(val, v, -1, NULL); 211 } 212 213 #ifdef CONFIG_HOTPLUG_CPU 214 215 static void cpu_notify_nofail(unsigned long val, void *v) 216 { 217 BUG_ON(cpu_notify(val, v)); 218 } 219 EXPORT_SYMBOL(register_cpu_notifier); 220 EXPORT_SYMBOL(__register_cpu_notifier); 221 222 void __ref unregister_cpu_notifier(struct notifier_block *nb) 223 { 224 cpu_maps_update_begin(); 225 raw_notifier_chain_unregister(&cpu_chain, nb); 226 cpu_maps_update_done(); 227 } 228 EXPORT_SYMBOL(unregister_cpu_notifier); 229 230 void __ref __unregister_cpu_notifier(struct notifier_block *nb) 231 { 232 raw_notifier_chain_unregister(&cpu_chain, nb); 233 } 234 EXPORT_SYMBOL(__unregister_cpu_notifier); 235 236 /** 237 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 238 * @cpu: a CPU id 239 * 240 * This function walks all processes, finds a valid mm struct for each one and 241 * then clears a corresponding bit in mm's cpumask. While this all sounds 242 * trivial, there are various non-obvious corner cases, which this function 243 * tries to solve in a safe manner. 244 * 245 * Also note that the function uses a somewhat relaxed locking scheme, so it may 246 * be called only for an already offlined CPU. 247 */ 248 void clear_tasks_mm_cpumask(int cpu) 249 { 250 struct task_struct *p; 251 252 /* 253 * This function is called after the cpu is taken down and marked 254 * offline, so its not like new tasks will ever get this cpu set in 255 * their mm mask. -- Peter Zijlstra 256 * Thus, we may use rcu_read_lock() here, instead of grabbing 257 * full-fledged tasklist_lock. 258 */ 259 WARN_ON(cpu_online(cpu)); 260 rcu_read_lock(); 261 for_each_process(p) { 262 struct task_struct *t; 263 264 /* 265 * Main thread might exit, but other threads may still have 266 * a valid mm. Find one. 267 */ 268 t = find_lock_task_mm(p); 269 if (!t) 270 continue; 271 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 272 task_unlock(t); 273 } 274 rcu_read_unlock(); 275 } 276 277 static inline void check_for_tasks(int cpu) 278 { 279 struct task_struct *p; 280 cputime_t utime, stime; 281 282 write_lock_irq(&tasklist_lock); 283 for_each_process(p) { 284 task_cputime(p, &utime, &stime); 285 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 286 (utime || stime)) 287 pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)\n", 288 p->comm, task_pid_nr(p), cpu, 289 p->state, p->flags); 290 } 291 write_unlock_irq(&tasklist_lock); 292 } 293 294 struct take_cpu_down_param { 295 unsigned long mod; 296 void *hcpu; 297 }; 298 299 /* Take this CPU down. */ 300 static int __ref take_cpu_down(void *_param) 301 { 302 struct take_cpu_down_param *param = _param; 303 int err; 304 305 /* Ensure this CPU doesn't handle any more interrupts. */ 306 err = __cpu_disable(); 307 if (err < 0) 308 return err; 309 310 cpu_notify(CPU_DYING | param->mod, param->hcpu); 311 /* Park the stopper thread */ 312 kthread_park(current); 313 return 0; 314 } 315 316 /* Requires cpu_add_remove_lock to be held */ 317 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 318 { 319 int err, nr_calls = 0; 320 void *hcpu = (void *)(long)cpu; 321 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 322 struct take_cpu_down_param tcd_param = { 323 .mod = mod, 324 .hcpu = hcpu, 325 }; 326 327 if (num_online_cpus() == 1) 328 return -EBUSY; 329 330 if (!cpu_online(cpu)) 331 return -EINVAL; 332 333 cpu_hotplug_begin(); 334 335 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 336 if (err) { 337 nr_calls--; 338 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 339 pr_warn("%s: attempt to take down CPU %u failed\n", 340 __func__, cpu); 341 goto out_release; 342 } 343 344 /* 345 * By now we've cleared cpu_active_mask, wait for all preempt-disabled 346 * and RCU users of this state to go away such that all new such users 347 * will observe it. 348 * 349 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might 350 * not imply sync_sched(), so explicitly call both. 351 * 352 * Do sync before park smpboot threads to take care the rcu boost case. 353 */ 354 #ifdef CONFIG_PREEMPT 355 synchronize_sched(); 356 #endif 357 synchronize_rcu(); 358 359 smpboot_park_threads(cpu); 360 361 /* 362 * So now all preempt/rcu users must observe !cpu_active(). 363 */ 364 365 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 366 if (err) { 367 /* CPU didn't die: tell everyone. Can't complain. */ 368 smpboot_unpark_threads(cpu); 369 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 370 goto out_release; 371 } 372 BUG_ON(cpu_online(cpu)); 373 374 /* 375 * The migration_call() CPU_DYING callback will have removed all 376 * runnable tasks from the cpu, there's only the idle task left now 377 * that the migration thread is done doing the stop_machine thing. 378 * 379 * Wait for the stop thread to go away. 380 */ 381 while (!idle_cpu(cpu)) 382 cpu_relax(); 383 384 /* This actually kills the CPU. */ 385 __cpu_die(cpu); 386 387 /* CPU is completely dead: tell everyone. Too late to complain. */ 388 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 389 390 check_for_tasks(cpu); 391 392 out_release: 393 cpu_hotplug_done(); 394 if (!err) 395 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 396 return err; 397 } 398 399 int __ref cpu_down(unsigned int cpu) 400 { 401 int err; 402 403 cpu_maps_update_begin(); 404 405 if (cpu_hotplug_disabled) { 406 err = -EBUSY; 407 goto out; 408 } 409 410 err = _cpu_down(cpu, 0); 411 412 out: 413 cpu_maps_update_done(); 414 return err; 415 } 416 EXPORT_SYMBOL(cpu_down); 417 #endif /*CONFIG_HOTPLUG_CPU*/ 418 419 /* Requires cpu_add_remove_lock to be held */ 420 static int _cpu_up(unsigned int cpu, int tasks_frozen) 421 { 422 int ret, nr_calls = 0; 423 void *hcpu = (void *)(long)cpu; 424 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 425 struct task_struct *idle; 426 427 cpu_hotplug_begin(); 428 429 if (cpu_online(cpu) || !cpu_present(cpu)) { 430 ret = -EINVAL; 431 goto out; 432 } 433 434 idle = idle_thread_get(cpu); 435 if (IS_ERR(idle)) { 436 ret = PTR_ERR(idle); 437 goto out; 438 } 439 440 ret = smpboot_create_threads(cpu); 441 if (ret) 442 goto out; 443 444 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 445 if (ret) { 446 nr_calls--; 447 pr_warn("%s: attempt to bring up CPU %u failed\n", 448 __func__, cpu); 449 goto out_notify; 450 } 451 452 /* Arch-specific enabling code. */ 453 ret = __cpu_up(cpu, idle); 454 if (ret != 0) 455 goto out_notify; 456 BUG_ON(!cpu_online(cpu)); 457 458 /* Wake the per cpu threads */ 459 smpboot_unpark_threads(cpu); 460 461 /* Now call notifier in preparation. */ 462 cpu_notify(CPU_ONLINE | mod, hcpu); 463 464 out_notify: 465 if (ret != 0) 466 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 467 out: 468 cpu_hotplug_done(); 469 470 return ret; 471 } 472 473 int cpu_up(unsigned int cpu) 474 { 475 int err = 0; 476 477 if (!cpu_possible(cpu)) { 478 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", 479 cpu); 480 #if defined(CONFIG_IA64) 481 pr_err("please check additional_cpus= boot parameter\n"); 482 #endif 483 return -EINVAL; 484 } 485 486 err = try_online_node(cpu_to_node(cpu)); 487 if (err) 488 return err; 489 490 cpu_maps_update_begin(); 491 492 if (cpu_hotplug_disabled) { 493 err = -EBUSY; 494 goto out; 495 } 496 497 err = _cpu_up(cpu, 0); 498 499 out: 500 cpu_maps_update_done(); 501 return err; 502 } 503 EXPORT_SYMBOL_GPL(cpu_up); 504 505 #ifdef CONFIG_PM_SLEEP_SMP 506 static cpumask_var_t frozen_cpus; 507 508 int disable_nonboot_cpus(void) 509 { 510 int cpu, first_cpu, error = 0; 511 512 cpu_maps_update_begin(); 513 first_cpu = cpumask_first(cpu_online_mask); 514 /* 515 * We take down all of the non-boot CPUs in one shot to avoid races 516 * with the userspace trying to use the CPU hotplug at the same time 517 */ 518 cpumask_clear(frozen_cpus); 519 520 pr_info("Disabling non-boot CPUs ...\n"); 521 for_each_online_cpu(cpu) { 522 if (cpu == first_cpu) 523 continue; 524 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); 525 error = _cpu_down(cpu, 1); 526 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); 527 if (!error) 528 cpumask_set_cpu(cpu, frozen_cpus); 529 else { 530 pr_err("Error taking CPU%d down: %d\n", cpu, error); 531 break; 532 } 533 } 534 535 if (!error) { 536 BUG_ON(num_online_cpus() > 1); 537 /* Make sure the CPUs won't be enabled by someone else */ 538 cpu_hotplug_disabled = 1; 539 } else { 540 pr_err("Non-boot CPUs are not disabled\n"); 541 } 542 cpu_maps_update_done(); 543 return error; 544 } 545 546 void __weak arch_enable_nonboot_cpus_begin(void) 547 { 548 } 549 550 void __weak arch_enable_nonboot_cpus_end(void) 551 { 552 } 553 554 void __ref enable_nonboot_cpus(void) 555 { 556 int cpu, error; 557 558 /* Allow everyone to use the CPU hotplug again */ 559 cpu_maps_update_begin(); 560 cpu_hotplug_disabled = 0; 561 if (cpumask_empty(frozen_cpus)) 562 goto out; 563 564 pr_info("Enabling non-boot CPUs ...\n"); 565 566 arch_enable_nonboot_cpus_begin(); 567 568 for_each_cpu(cpu, frozen_cpus) { 569 trace_suspend_resume(TPS("CPU_ON"), cpu, true); 570 error = _cpu_up(cpu, 1); 571 trace_suspend_resume(TPS("CPU_ON"), cpu, false); 572 if (!error) { 573 pr_info("CPU%d is up\n", cpu); 574 continue; 575 } 576 pr_warn("Error taking CPU%d up: %d\n", cpu, error); 577 } 578 579 arch_enable_nonboot_cpus_end(); 580 581 cpumask_clear(frozen_cpus); 582 out: 583 cpu_maps_update_done(); 584 } 585 586 static int __init alloc_frozen_cpus(void) 587 { 588 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 589 return -ENOMEM; 590 return 0; 591 } 592 core_initcall(alloc_frozen_cpus); 593 594 /* 595 * When callbacks for CPU hotplug notifications are being executed, we must 596 * ensure that the state of the system with respect to the tasks being frozen 597 * or not, as reported by the notification, remains unchanged *throughout the 598 * duration* of the execution of the callbacks. 599 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 600 * 601 * This synchronization is implemented by mutually excluding regular CPU 602 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 603 * Hibernate notifications. 604 */ 605 static int 606 cpu_hotplug_pm_callback(struct notifier_block *nb, 607 unsigned long action, void *ptr) 608 { 609 switch (action) { 610 611 case PM_SUSPEND_PREPARE: 612 case PM_HIBERNATION_PREPARE: 613 cpu_hotplug_disable(); 614 break; 615 616 case PM_POST_SUSPEND: 617 case PM_POST_HIBERNATION: 618 cpu_hotplug_enable(); 619 break; 620 621 default: 622 return NOTIFY_DONE; 623 } 624 625 return NOTIFY_OK; 626 } 627 628 629 static int __init cpu_hotplug_pm_sync_init(void) 630 { 631 /* 632 * cpu_hotplug_pm_callback has higher priority than x86 633 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 634 * to disable cpu hotplug to avoid cpu hotplug race. 635 */ 636 pm_notifier(cpu_hotplug_pm_callback, 0); 637 return 0; 638 } 639 core_initcall(cpu_hotplug_pm_sync_init); 640 641 #endif /* CONFIG_PM_SLEEP_SMP */ 642 643 /** 644 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 645 * @cpu: cpu that just started 646 * 647 * This function calls the cpu_chain notifiers with CPU_STARTING. 648 * It must be called by the arch code on the new cpu, before the new cpu 649 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 650 */ 651 void notify_cpu_starting(unsigned int cpu) 652 { 653 unsigned long val = CPU_STARTING; 654 655 #ifdef CONFIG_PM_SLEEP_SMP 656 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 657 val = CPU_STARTING_FROZEN; 658 #endif /* CONFIG_PM_SLEEP_SMP */ 659 cpu_notify(val, (void *)(long)cpu); 660 } 661 662 #endif /* CONFIG_SMP */ 663 664 /* 665 * cpu_bit_bitmap[] is a special, "compressed" data structure that 666 * represents all NR_CPUS bits binary values of 1<<nr. 667 * 668 * It is used by cpumask_of() to get a constant address to a CPU 669 * mask value that has a single bit set only. 670 */ 671 672 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 673 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 674 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 675 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 676 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 677 678 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 679 680 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 681 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 682 #if BITS_PER_LONG > 32 683 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 684 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 685 #endif 686 }; 687 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 688 689 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 690 EXPORT_SYMBOL(cpu_all_bits); 691 692 #ifdef CONFIG_INIT_ALL_POSSIBLE 693 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 694 = CPU_BITS_ALL; 695 #else 696 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 697 #endif 698 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 699 EXPORT_SYMBOL(cpu_possible_mask); 700 701 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 702 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 703 EXPORT_SYMBOL(cpu_online_mask); 704 705 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 706 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 707 EXPORT_SYMBOL(cpu_present_mask); 708 709 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 710 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 711 EXPORT_SYMBOL(cpu_active_mask); 712 713 void set_cpu_possible(unsigned int cpu, bool possible) 714 { 715 if (possible) 716 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 717 else 718 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 719 } 720 721 void set_cpu_present(unsigned int cpu, bool present) 722 { 723 if (present) 724 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 725 else 726 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 727 } 728 729 void set_cpu_online(unsigned int cpu, bool online) 730 { 731 if (online) { 732 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 733 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 734 } else { 735 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 736 } 737 } 738 739 void set_cpu_active(unsigned int cpu, bool active) 740 { 741 if (active) 742 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 743 else 744 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 745 } 746 747 void init_cpu_present(const struct cpumask *src) 748 { 749 cpumask_copy(to_cpumask(cpu_present_bits), src); 750 } 751 752 void init_cpu_possible(const struct cpumask *src) 753 { 754 cpumask_copy(to_cpumask(cpu_possible_bits), src); 755 } 756 757 void init_cpu_online(const struct cpumask *src) 758 { 759 cpumask_copy(to_cpumask(cpu_online_bits), src); 760 } 761