1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/oom.h> 14 #include <linux/rcupdate.h> 15 #include <linux/export.h> 16 #include <linux/bug.h> 17 #include <linux/kthread.h> 18 #include <linux/stop_machine.h> 19 #include <linux/mutex.h> 20 #include <linux/gfp.h> 21 #include <linux/suspend.h> 22 #include <linux/lockdep.h> 23 24 #include "smpboot.h" 25 26 #ifdef CONFIG_SMP 27 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 28 static DEFINE_MUTEX(cpu_add_remove_lock); 29 30 /* 31 * The following two APIs (cpu_maps_update_begin/done) must be used when 32 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. 33 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU 34 * hotplug callback (un)registration performed using __register_cpu_notifier() 35 * or __unregister_cpu_notifier(). 36 */ 37 void cpu_maps_update_begin(void) 38 { 39 mutex_lock(&cpu_add_remove_lock); 40 } 41 EXPORT_SYMBOL(cpu_notifier_register_begin); 42 43 void cpu_maps_update_done(void) 44 { 45 mutex_unlock(&cpu_add_remove_lock); 46 } 47 EXPORT_SYMBOL(cpu_notifier_register_done); 48 49 static RAW_NOTIFIER_HEAD(cpu_chain); 50 51 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 52 * Should always be manipulated under cpu_add_remove_lock 53 */ 54 static int cpu_hotplug_disabled; 55 56 #ifdef CONFIG_HOTPLUG_CPU 57 58 static struct { 59 struct task_struct *active_writer; 60 struct mutex lock; /* Synchronizes accesses to refcount, */ 61 /* 62 * Also blocks the new readers during 63 * an ongoing cpu hotplug operation. 64 */ 65 int refcount; 66 67 #ifdef CONFIG_DEBUG_LOCK_ALLOC 68 struct lockdep_map dep_map; 69 #endif 70 } cpu_hotplug = { 71 .active_writer = NULL, 72 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 73 .refcount = 0, 74 #ifdef CONFIG_DEBUG_LOCK_ALLOC 75 .dep_map = {.name = "cpu_hotplug.lock" }, 76 #endif 77 }; 78 79 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ 80 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) 81 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) 82 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) 83 84 void get_online_cpus(void) 85 { 86 might_sleep(); 87 if (cpu_hotplug.active_writer == current) 88 return; 89 cpuhp_lock_acquire_read(); 90 mutex_lock(&cpu_hotplug.lock); 91 cpu_hotplug.refcount++; 92 mutex_unlock(&cpu_hotplug.lock); 93 94 } 95 EXPORT_SYMBOL_GPL(get_online_cpus); 96 97 void put_online_cpus(void) 98 { 99 if (cpu_hotplug.active_writer == current) 100 return; 101 mutex_lock(&cpu_hotplug.lock); 102 103 if (WARN_ON(!cpu_hotplug.refcount)) 104 cpu_hotplug.refcount++; /* try to fix things up */ 105 106 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 107 wake_up_process(cpu_hotplug.active_writer); 108 mutex_unlock(&cpu_hotplug.lock); 109 cpuhp_lock_release(); 110 111 } 112 EXPORT_SYMBOL_GPL(put_online_cpus); 113 114 /* 115 * This ensures that the hotplug operation can begin only when the 116 * refcount goes to zero. 117 * 118 * Note that during a cpu-hotplug operation, the new readers, if any, 119 * will be blocked by the cpu_hotplug.lock 120 * 121 * Since cpu_hotplug_begin() is always called after invoking 122 * cpu_maps_update_begin(), we can be sure that only one writer is active. 123 * 124 * Note that theoretically, there is a possibility of a livelock: 125 * - Refcount goes to zero, last reader wakes up the sleeping 126 * writer. 127 * - Last reader unlocks the cpu_hotplug.lock. 128 * - A new reader arrives at this moment, bumps up the refcount. 129 * - The writer acquires the cpu_hotplug.lock finds the refcount 130 * non zero and goes to sleep again. 131 * 132 * However, this is very difficult to achieve in practice since 133 * get_online_cpus() not an api which is called all that often. 134 * 135 */ 136 void cpu_hotplug_begin(void) 137 { 138 cpu_hotplug.active_writer = current; 139 140 cpuhp_lock_acquire(); 141 for (;;) { 142 mutex_lock(&cpu_hotplug.lock); 143 if (likely(!cpu_hotplug.refcount)) 144 break; 145 __set_current_state(TASK_UNINTERRUPTIBLE); 146 mutex_unlock(&cpu_hotplug.lock); 147 schedule(); 148 } 149 } 150 151 void cpu_hotplug_done(void) 152 { 153 cpu_hotplug.active_writer = NULL; 154 mutex_unlock(&cpu_hotplug.lock); 155 cpuhp_lock_release(); 156 } 157 158 /* 159 * Wait for currently running CPU hotplug operations to complete (if any) and 160 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects 161 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the 162 * hotplug path before performing hotplug operations. So acquiring that lock 163 * guarantees mutual exclusion from any currently running hotplug operations. 164 */ 165 void cpu_hotplug_disable(void) 166 { 167 cpu_maps_update_begin(); 168 cpu_hotplug_disabled = 1; 169 cpu_maps_update_done(); 170 } 171 172 void cpu_hotplug_enable(void) 173 { 174 cpu_maps_update_begin(); 175 cpu_hotplug_disabled = 0; 176 cpu_maps_update_done(); 177 } 178 179 #endif /* CONFIG_HOTPLUG_CPU */ 180 181 /* Need to know about CPUs going up/down? */ 182 int __ref register_cpu_notifier(struct notifier_block *nb) 183 { 184 int ret; 185 cpu_maps_update_begin(); 186 ret = raw_notifier_chain_register(&cpu_chain, nb); 187 cpu_maps_update_done(); 188 return ret; 189 } 190 191 int __ref __register_cpu_notifier(struct notifier_block *nb) 192 { 193 return raw_notifier_chain_register(&cpu_chain, nb); 194 } 195 196 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 197 int *nr_calls) 198 { 199 int ret; 200 201 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 202 nr_calls); 203 204 return notifier_to_errno(ret); 205 } 206 207 static int cpu_notify(unsigned long val, void *v) 208 { 209 return __cpu_notify(val, v, -1, NULL); 210 } 211 212 #ifdef CONFIG_HOTPLUG_CPU 213 214 static void cpu_notify_nofail(unsigned long val, void *v) 215 { 216 BUG_ON(cpu_notify(val, v)); 217 } 218 EXPORT_SYMBOL(register_cpu_notifier); 219 EXPORT_SYMBOL(__register_cpu_notifier); 220 221 void __ref unregister_cpu_notifier(struct notifier_block *nb) 222 { 223 cpu_maps_update_begin(); 224 raw_notifier_chain_unregister(&cpu_chain, nb); 225 cpu_maps_update_done(); 226 } 227 EXPORT_SYMBOL(unregister_cpu_notifier); 228 229 void __ref __unregister_cpu_notifier(struct notifier_block *nb) 230 { 231 raw_notifier_chain_unregister(&cpu_chain, nb); 232 } 233 EXPORT_SYMBOL(__unregister_cpu_notifier); 234 235 /** 236 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 237 * @cpu: a CPU id 238 * 239 * This function walks all processes, finds a valid mm struct for each one and 240 * then clears a corresponding bit in mm's cpumask. While this all sounds 241 * trivial, there are various non-obvious corner cases, which this function 242 * tries to solve in a safe manner. 243 * 244 * Also note that the function uses a somewhat relaxed locking scheme, so it may 245 * be called only for an already offlined CPU. 246 */ 247 void clear_tasks_mm_cpumask(int cpu) 248 { 249 struct task_struct *p; 250 251 /* 252 * This function is called after the cpu is taken down and marked 253 * offline, so its not like new tasks will ever get this cpu set in 254 * their mm mask. -- Peter Zijlstra 255 * Thus, we may use rcu_read_lock() here, instead of grabbing 256 * full-fledged tasklist_lock. 257 */ 258 WARN_ON(cpu_online(cpu)); 259 rcu_read_lock(); 260 for_each_process(p) { 261 struct task_struct *t; 262 263 /* 264 * Main thread might exit, but other threads may still have 265 * a valid mm. Find one. 266 */ 267 t = find_lock_task_mm(p); 268 if (!t) 269 continue; 270 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 271 task_unlock(t); 272 } 273 rcu_read_unlock(); 274 } 275 276 static inline void check_for_tasks(int cpu) 277 { 278 struct task_struct *p; 279 cputime_t utime, stime; 280 281 write_lock_irq(&tasklist_lock); 282 for_each_process(p) { 283 task_cputime(p, &utime, &stime); 284 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 285 (utime || stime)) 286 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 287 "(state = %ld, flags = %x)\n", 288 p->comm, task_pid_nr(p), cpu, 289 p->state, p->flags); 290 } 291 write_unlock_irq(&tasklist_lock); 292 } 293 294 struct take_cpu_down_param { 295 unsigned long mod; 296 void *hcpu; 297 }; 298 299 /* Take this CPU down. */ 300 static int __ref take_cpu_down(void *_param) 301 { 302 struct take_cpu_down_param *param = _param; 303 int err; 304 305 /* Ensure this CPU doesn't handle any more interrupts. */ 306 err = __cpu_disable(); 307 if (err < 0) 308 return err; 309 310 cpu_notify(CPU_DYING | param->mod, param->hcpu); 311 /* Park the stopper thread */ 312 kthread_park(current); 313 return 0; 314 } 315 316 /* Requires cpu_add_remove_lock to be held */ 317 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 318 { 319 int err, nr_calls = 0; 320 void *hcpu = (void *)(long)cpu; 321 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 322 struct take_cpu_down_param tcd_param = { 323 .mod = mod, 324 .hcpu = hcpu, 325 }; 326 327 if (num_online_cpus() == 1) 328 return -EBUSY; 329 330 if (!cpu_online(cpu)) 331 return -EINVAL; 332 333 cpu_hotplug_begin(); 334 335 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 336 if (err) { 337 nr_calls--; 338 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 339 printk("%s: attempt to take down CPU %u failed\n", 340 __func__, cpu); 341 goto out_release; 342 } 343 344 /* 345 * By now we've cleared cpu_active_mask, wait for all preempt-disabled 346 * and RCU users of this state to go away such that all new such users 347 * will observe it. 348 * 349 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might 350 * not imply sync_sched(), so explicitly call both. 351 * 352 * Do sync before park smpboot threads to take care the rcu boost case. 353 */ 354 #ifdef CONFIG_PREEMPT 355 synchronize_sched(); 356 #endif 357 synchronize_rcu(); 358 359 smpboot_park_threads(cpu); 360 361 /* 362 * So now all preempt/rcu users must observe !cpu_active(). 363 */ 364 365 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 366 if (err) { 367 /* CPU didn't die: tell everyone. Can't complain. */ 368 smpboot_unpark_threads(cpu); 369 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 370 goto out_release; 371 } 372 BUG_ON(cpu_online(cpu)); 373 374 /* 375 * The migration_call() CPU_DYING callback will have removed all 376 * runnable tasks from the cpu, there's only the idle task left now 377 * that the migration thread is done doing the stop_machine thing. 378 * 379 * Wait for the stop thread to go away. 380 */ 381 while (!idle_cpu(cpu)) 382 cpu_relax(); 383 384 /* This actually kills the CPU. */ 385 __cpu_die(cpu); 386 387 /* CPU is completely dead: tell everyone. Too late to complain. */ 388 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 389 390 check_for_tasks(cpu); 391 392 out_release: 393 cpu_hotplug_done(); 394 if (!err) 395 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 396 return err; 397 } 398 399 int __ref cpu_down(unsigned int cpu) 400 { 401 int err; 402 403 cpu_maps_update_begin(); 404 405 if (cpu_hotplug_disabled) { 406 err = -EBUSY; 407 goto out; 408 } 409 410 err = _cpu_down(cpu, 0); 411 412 out: 413 cpu_maps_update_done(); 414 return err; 415 } 416 EXPORT_SYMBOL(cpu_down); 417 #endif /*CONFIG_HOTPLUG_CPU*/ 418 419 /* Requires cpu_add_remove_lock to be held */ 420 static int _cpu_up(unsigned int cpu, int tasks_frozen) 421 { 422 int ret, nr_calls = 0; 423 void *hcpu = (void *)(long)cpu; 424 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 425 struct task_struct *idle; 426 427 cpu_hotplug_begin(); 428 429 if (cpu_online(cpu) || !cpu_present(cpu)) { 430 ret = -EINVAL; 431 goto out; 432 } 433 434 idle = idle_thread_get(cpu); 435 if (IS_ERR(idle)) { 436 ret = PTR_ERR(idle); 437 goto out; 438 } 439 440 ret = smpboot_create_threads(cpu); 441 if (ret) 442 goto out; 443 444 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 445 if (ret) { 446 nr_calls--; 447 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", 448 __func__, cpu); 449 goto out_notify; 450 } 451 452 /* Arch-specific enabling code. */ 453 ret = __cpu_up(cpu, idle); 454 if (ret != 0) 455 goto out_notify; 456 BUG_ON(!cpu_online(cpu)); 457 458 /* Wake the per cpu threads */ 459 smpboot_unpark_threads(cpu); 460 461 /* Now call notifier in preparation. */ 462 cpu_notify(CPU_ONLINE | mod, hcpu); 463 464 out_notify: 465 if (ret != 0) 466 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 467 out: 468 cpu_hotplug_done(); 469 470 return ret; 471 } 472 473 int cpu_up(unsigned int cpu) 474 { 475 int err = 0; 476 477 if (!cpu_possible(cpu)) { 478 printk(KERN_ERR "can't online cpu %d because it is not " 479 "configured as may-hotadd at boot time\n", cpu); 480 #if defined(CONFIG_IA64) 481 printk(KERN_ERR "please check additional_cpus= boot " 482 "parameter\n"); 483 #endif 484 return -EINVAL; 485 } 486 487 err = try_online_node(cpu_to_node(cpu)); 488 if (err) 489 return err; 490 491 cpu_maps_update_begin(); 492 493 if (cpu_hotplug_disabled) { 494 err = -EBUSY; 495 goto out; 496 } 497 498 err = _cpu_up(cpu, 0); 499 500 out: 501 cpu_maps_update_done(); 502 return err; 503 } 504 EXPORT_SYMBOL_GPL(cpu_up); 505 506 #ifdef CONFIG_PM_SLEEP_SMP 507 static cpumask_var_t frozen_cpus; 508 509 int disable_nonboot_cpus(void) 510 { 511 int cpu, first_cpu, error = 0; 512 513 cpu_maps_update_begin(); 514 first_cpu = cpumask_first(cpu_online_mask); 515 /* 516 * We take down all of the non-boot CPUs in one shot to avoid races 517 * with the userspace trying to use the CPU hotplug at the same time 518 */ 519 cpumask_clear(frozen_cpus); 520 521 printk("Disabling non-boot CPUs ...\n"); 522 for_each_online_cpu(cpu) { 523 if (cpu == first_cpu) 524 continue; 525 error = _cpu_down(cpu, 1); 526 if (!error) 527 cpumask_set_cpu(cpu, frozen_cpus); 528 else { 529 printk(KERN_ERR "Error taking CPU%d down: %d\n", 530 cpu, error); 531 break; 532 } 533 } 534 535 if (!error) { 536 BUG_ON(num_online_cpus() > 1); 537 /* Make sure the CPUs won't be enabled by someone else */ 538 cpu_hotplug_disabled = 1; 539 } else { 540 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 541 } 542 cpu_maps_update_done(); 543 return error; 544 } 545 546 void __weak arch_enable_nonboot_cpus_begin(void) 547 { 548 } 549 550 void __weak arch_enable_nonboot_cpus_end(void) 551 { 552 } 553 554 void __ref enable_nonboot_cpus(void) 555 { 556 int cpu, error; 557 558 /* Allow everyone to use the CPU hotplug again */ 559 cpu_maps_update_begin(); 560 cpu_hotplug_disabled = 0; 561 if (cpumask_empty(frozen_cpus)) 562 goto out; 563 564 printk(KERN_INFO "Enabling non-boot CPUs ...\n"); 565 566 arch_enable_nonboot_cpus_begin(); 567 568 for_each_cpu(cpu, frozen_cpus) { 569 error = _cpu_up(cpu, 1); 570 if (!error) { 571 printk(KERN_INFO "CPU%d is up\n", cpu); 572 continue; 573 } 574 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 575 } 576 577 arch_enable_nonboot_cpus_end(); 578 579 cpumask_clear(frozen_cpus); 580 out: 581 cpu_maps_update_done(); 582 } 583 584 static int __init alloc_frozen_cpus(void) 585 { 586 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 587 return -ENOMEM; 588 return 0; 589 } 590 core_initcall(alloc_frozen_cpus); 591 592 /* 593 * When callbacks for CPU hotplug notifications are being executed, we must 594 * ensure that the state of the system with respect to the tasks being frozen 595 * or not, as reported by the notification, remains unchanged *throughout the 596 * duration* of the execution of the callbacks. 597 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 598 * 599 * This synchronization is implemented by mutually excluding regular CPU 600 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 601 * Hibernate notifications. 602 */ 603 static int 604 cpu_hotplug_pm_callback(struct notifier_block *nb, 605 unsigned long action, void *ptr) 606 { 607 switch (action) { 608 609 case PM_SUSPEND_PREPARE: 610 case PM_HIBERNATION_PREPARE: 611 cpu_hotplug_disable(); 612 break; 613 614 case PM_POST_SUSPEND: 615 case PM_POST_HIBERNATION: 616 cpu_hotplug_enable(); 617 break; 618 619 default: 620 return NOTIFY_DONE; 621 } 622 623 return NOTIFY_OK; 624 } 625 626 627 static int __init cpu_hotplug_pm_sync_init(void) 628 { 629 /* 630 * cpu_hotplug_pm_callback has higher priority than x86 631 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 632 * to disable cpu hotplug to avoid cpu hotplug race. 633 */ 634 pm_notifier(cpu_hotplug_pm_callback, 0); 635 return 0; 636 } 637 core_initcall(cpu_hotplug_pm_sync_init); 638 639 #endif /* CONFIG_PM_SLEEP_SMP */ 640 641 /** 642 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 643 * @cpu: cpu that just started 644 * 645 * This function calls the cpu_chain notifiers with CPU_STARTING. 646 * It must be called by the arch code on the new cpu, before the new cpu 647 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 648 */ 649 void notify_cpu_starting(unsigned int cpu) 650 { 651 unsigned long val = CPU_STARTING; 652 653 #ifdef CONFIG_PM_SLEEP_SMP 654 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 655 val = CPU_STARTING_FROZEN; 656 #endif /* CONFIG_PM_SLEEP_SMP */ 657 cpu_notify(val, (void *)(long)cpu); 658 } 659 660 #endif /* CONFIG_SMP */ 661 662 /* 663 * cpu_bit_bitmap[] is a special, "compressed" data structure that 664 * represents all NR_CPUS bits binary values of 1<<nr. 665 * 666 * It is used by cpumask_of() to get a constant address to a CPU 667 * mask value that has a single bit set only. 668 */ 669 670 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 671 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 672 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 673 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 674 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 675 676 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 677 678 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 679 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 680 #if BITS_PER_LONG > 32 681 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 682 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 683 #endif 684 }; 685 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 686 687 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 688 EXPORT_SYMBOL(cpu_all_bits); 689 690 #ifdef CONFIG_INIT_ALL_POSSIBLE 691 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 692 = CPU_BITS_ALL; 693 #else 694 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 695 #endif 696 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 697 EXPORT_SYMBOL(cpu_possible_mask); 698 699 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 700 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 701 EXPORT_SYMBOL(cpu_online_mask); 702 703 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 704 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 705 EXPORT_SYMBOL(cpu_present_mask); 706 707 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 708 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 709 EXPORT_SYMBOL(cpu_active_mask); 710 711 void set_cpu_possible(unsigned int cpu, bool possible) 712 { 713 if (possible) 714 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 715 else 716 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 717 } 718 719 void set_cpu_present(unsigned int cpu, bool present) 720 { 721 if (present) 722 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 723 else 724 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 725 } 726 727 void set_cpu_online(unsigned int cpu, bool online) 728 { 729 if (online) 730 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 731 else 732 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 733 } 734 735 void set_cpu_active(unsigned int cpu, bool active) 736 { 737 if (active) 738 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 739 else 740 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 741 } 742 743 void init_cpu_present(const struct cpumask *src) 744 { 745 cpumask_copy(to_cpumask(cpu_present_bits), src); 746 } 747 748 void init_cpu_possible(const struct cpumask *src) 749 { 750 cpumask_copy(to_cpumask(cpu_possible_bits), src); 751 } 752 753 void init_cpu_online(const struct cpumask *src) 754 { 755 cpumask_copy(to_cpumask(cpu_online_bits), src); 756 } 757