1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/oom.h> 14 #include <linux/rcupdate.h> 15 #include <linux/export.h> 16 #include <linux/bug.h> 17 #include <linux/kthread.h> 18 #include <linux/stop_machine.h> 19 #include <linux/mutex.h> 20 #include <linux/gfp.h> 21 #include <linux/suspend.h> 22 #include <linux/lockdep.h> 23 #include <trace/events/power.h> 24 25 #include "smpboot.h" 26 27 #ifdef CONFIG_SMP 28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 29 static DEFINE_MUTEX(cpu_add_remove_lock); 30 31 /* 32 * The following two APIs (cpu_maps_update_begin/done) must be used when 33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. 34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU 35 * hotplug callback (un)registration performed using __register_cpu_notifier() 36 * or __unregister_cpu_notifier(). 37 */ 38 void cpu_maps_update_begin(void) 39 { 40 mutex_lock(&cpu_add_remove_lock); 41 } 42 EXPORT_SYMBOL(cpu_notifier_register_begin); 43 44 void cpu_maps_update_done(void) 45 { 46 mutex_unlock(&cpu_add_remove_lock); 47 } 48 EXPORT_SYMBOL(cpu_notifier_register_done); 49 50 static RAW_NOTIFIER_HEAD(cpu_chain); 51 52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 53 * Should always be manipulated under cpu_add_remove_lock 54 */ 55 static int cpu_hotplug_disabled; 56 57 #ifdef CONFIG_HOTPLUG_CPU 58 59 static struct { 60 struct task_struct *active_writer; 61 struct mutex lock; /* Synchronizes accesses to refcount, */ 62 /* 63 * Also blocks the new readers during 64 * an ongoing cpu hotplug operation. 65 */ 66 int refcount; 67 /* And allows lockless put_online_cpus(). */ 68 atomic_t puts_pending; 69 70 #ifdef CONFIG_DEBUG_LOCK_ALLOC 71 struct lockdep_map dep_map; 72 #endif 73 } cpu_hotplug = { 74 .active_writer = NULL, 75 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 76 .refcount = 0, 77 #ifdef CONFIG_DEBUG_LOCK_ALLOC 78 .dep_map = {.name = "cpu_hotplug.lock" }, 79 #endif 80 }; 81 82 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ 83 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) 84 #define cpuhp_lock_acquire_tryread() \ 85 lock_map_acquire_tryread(&cpu_hotplug.dep_map) 86 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) 87 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) 88 89 void get_online_cpus(void) 90 { 91 might_sleep(); 92 if (cpu_hotplug.active_writer == current) 93 return; 94 cpuhp_lock_acquire_read(); 95 mutex_lock(&cpu_hotplug.lock); 96 cpu_hotplug.refcount++; 97 mutex_unlock(&cpu_hotplug.lock); 98 } 99 EXPORT_SYMBOL_GPL(get_online_cpus); 100 101 bool try_get_online_cpus(void) 102 { 103 if (cpu_hotplug.active_writer == current) 104 return true; 105 if (!mutex_trylock(&cpu_hotplug.lock)) 106 return false; 107 cpuhp_lock_acquire_tryread(); 108 cpu_hotplug.refcount++; 109 mutex_unlock(&cpu_hotplug.lock); 110 return true; 111 } 112 EXPORT_SYMBOL_GPL(try_get_online_cpus); 113 114 void put_online_cpus(void) 115 { 116 if (cpu_hotplug.active_writer == current) 117 return; 118 if (!mutex_trylock(&cpu_hotplug.lock)) { 119 atomic_inc(&cpu_hotplug.puts_pending); 120 cpuhp_lock_release(); 121 return; 122 } 123 124 if (WARN_ON(!cpu_hotplug.refcount)) 125 cpu_hotplug.refcount++; /* try to fix things up */ 126 127 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 128 wake_up_process(cpu_hotplug.active_writer); 129 mutex_unlock(&cpu_hotplug.lock); 130 cpuhp_lock_release(); 131 132 } 133 EXPORT_SYMBOL_GPL(put_online_cpus); 134 135 /* 136 * This ensures that the hotplug operation can begin only when the 137 * refcount goes to zero. 138 * 139 * Note that during a cpu-hotplug operation, the new readers, if any, 140 * will be blocked by the cpu_hotplug.lock 141 * 142 * Since cpu_hotplug_begin() is always called after invoking 143 * cpu_maps_update_begin(), we can be sure that only one writer is active. 144 * 145 * Note that theoretically, there is a possibility of a livelock: 146 * - Refcount goes to zero, last reader wakes up the sleeping 147 * writer. 148 * - Last reader unlocks the cpu_hotplug.lock. 149 * - A new reader arrives at this moment, bumps up the refcount. 150 * - The writer acquires the cpu_hotplug.lock finds the refcount 151 * non zero and goes to sleep again. 152 * 153 * However, this is very difficult to achieve in practice since 154 * get_online_cpus() not an api which is called all that often. 155 * 156 */ 157 void cpu_hotplug_begin(void) 158 { 159 cpu_hotplug.active_writer = current; 160 161 cpuhp_lock_acquire(); 162 for (;;) { 163 mutex_lock(&cpu_hotplug.lock); 164 if (atomic_read(&cpu_hotplug.puts_pending)) { 165 int delta; 166 167 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); 168 cpu_hotplug.refcount -= delta; 169 } 170 if (likely(!cpu_hotplug.refcount)) 171 break; 172 __set_current_state(TASK_UNINTERRUPTIBLE); 173 mutex_unlock(&cpu_hotplug.lock); 174 schedule(); 175 } 176 } 177 178 void cpu_hotplug_done(void) 179 { 180 cpu_hotplug.active_writer = NULL; 181 mutex_unlock(&cpu_hotplug.lock); 182 cpuhp_lock_release(); 183 } 184 185 /* 186 * Wait for currently running CPU hotplug operations to complete (if any) and 187 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects 188 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the 189 * hotplug path before performing hotplug operations. So acquiring that lock 190 * guarantees mutual exclusion from any currently running hotplug operations. 191 */ 192 void cpu_hotplug_disable(void) 193 { 194 cpu_maps_update_begin(); 195 cpu_hotplug_disabled = 1; 196 cpu_maps_update_done(); 197 } 198 199 void cpu_hotplug_enable(void) 200 { 201 cpu_maps_update_begin(); 202 cpu_hotplug_disabled = 0; 203 cpu_maps_update_done(); 204 } 205 206 #endif /* CONFIG_HOTPLUG_CPU */ 207 208 /* Need to know about CPUs going up/down? */ 209 int __ref register_cpu_notifier(struct notifier_block *nb) 210 { 211 int ret; 212 cpu_maps_update_begin(); 213 ret = raw_notifier_chain_register(&cpu_chain, nb); 214 cpu_maps_update_done(); 215 return ret; 216 } 217 218 int __ref __register_cpu_notifier(struct notifier_block *nb) 219 { 220 return raw_notifier_chain_register(&cpu_chain, nb); 221 } 222 223 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 224 int *nr_calls) 225 { 226 int ret; 227 228 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 229 nr_calls); 230 231 return notifier_to_errno(ret); 232 } 233 234 static int cpu_notify(unsigned long val, void *v) 235 { 236 return __cpu_notify(val, v, -1, NULL); 237 } 238 239 #ifdef CONFIG_HOTPLUG_CPU 240 241 static void cpu_notify_nofail(unsigned long val, void *v) 242 { 243 BUG_ON(cpu_notify(val, v)); 244 } 245 EXPORT_SYMBOL(register_cpu_notifier); 246 EXPORT_SYMBOL(__register_cpu_notifier); 247 248 void __ref unregister_cpu_notifier(struct notifier_block *nb) 249 { 250 cpu_maps_update_begin(); 251 raw_notifier_chain_unregister(&cpu_chain, nb); 252 cpu_maps_update_done(); 253 } 254 EXPORT_SYMBOL(unregister_cpu_notifier); 255 256 void __ref __unregister_cpu_notifier(struct notifier_block *nb) 257 { 258 raw_notifier_chain_unregister(&cpu_chain, nb); 259 } 260 EXPORT_SYMBOL(__unregister_cpu_notifier); 261 262 /** 263 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 264 * @cpu: a CPU id 265 * 266 * This function walks all processes, finds a valid mm struct for each one and 267 * then clears a corresponding bit in mm's cpumask. While this all sounds 268 * trivial, there are various non-obvious corner cases, which this function 269 * tries to solve in a safe manner. 270 * 271 * Also note that the function uses a somewhat relaxed locking scheme, so it may 272 * be called only for an already offlined CPU. 273 */ 274 void clear_tasks_mm_cpumask(int cpu) 275 { 276 struct task_struct *p; 277 278 /* 279 * This function is called after the cpu is taken down and marked 280 * offline, so its not like new tasks will ever get this cpu set in 281 * their mm mask. -- Peter Zijlstra 282 * Thus, we may use rcu_read_lock() here, instead of grabbing 283 * full-fledged tasklist_lock. 284 */ 285 WARN_ON(cpu_online(cpu)); 286 rcu_read_lock(); 287 for_each_process(p) { 288 struct task_struct *t; 289 290 /* 291 * Main thread might exit, but other threads may still have 292 * a valid mm. Find one. 293 */ 294 t = find_lock_task_mm(p); 295 if (!t) 296 continue; 297 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 298 task_unlock(t); 299 } 300 rcu_read_unlock(); 301 } 302 303 static inline void check_for_tasks(int dead_cpu) 304 { 305 struct task_struct *g, *p; 306 307 read_lock_irq(&tasklist_lock); 308 do_each_thread(g, p) { 309 if (!p->on_rq) 310 continue; 311 /* 312 * We do the check with unlocked task_rq(p)->lock. 313 * Order the reading to do not warn about a task, 314 * which was running on this cpu in the past, and 315 * it's just been woken on another cpu. 316 */ 317 rmb(); 318 if (task_cpu(p) != dead_cpu) 319 continue; 320 321 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n", 322 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); 323 } while_each_thread(g, p); 324 read_unlock_irq(&tasklist_lock); 325 } 326 327 struct take_cpu_down_param { 328 unsigned long mod; 329 void *hcpu; 330 }; 331 332 /* Take this CPU down. */ 333 static int __ref take_cpu_down(void *_param) 334 { 335 struct take_cpu_down_param *param = _param; 336 int err; 337 338 /* Ensure this CPU doesn't handle any more interrupts. */ 339 err = __cpu_disable(); 340 if (err < 0) 341 return err; 342 343 cpu_notify(CPU_DYING | param->mod, param->hcpu); 344 /* Park the stopper thread */ 345 kthread_park(current); 346 return 0; 347 } 348 349 /* Requires cpu_add_remove_lock to be held */ 350 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 351 { 352 int err, nr_calls = 0; 353 void *hcpu = (void *)(long)cpu; 354 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 355 struct take_cpu_down_param tcd_param = { 356 .mod = mod, 357 .hcpu = hcpu, 358 }; 359 360 if (num_online_cpus() == 1) 361 return -EBUSY; 362 363 if (!cpu_online(cpu)) 364 return -EINVAL; 365 366 cpu_hotplug_begin(); 367 368 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 369 if (err) { 370 nr_calls--; 371 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 372 pr_warn("%s: attempt to take down CPU %u failed\n", 373 __func__, cpu); 374 goto out_release; 375 } 376 377 /* 378 * By now we've cleared cpu_active_mask, wait for all preempt-disabled 379 * and RCU users of this state to go away such that all new such users 380 * will observe it. 381 * 382 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might 383 * not imply sync_sched(), so explicitly call both. 384 * 385 * Do sync before park smpboot threads to take care the rcu boost case. 386 */ 387 #ifdef CONFIG_PREEMPT 388 synchronize_sched(); 389 #endif 390 synchronize_rcu(); 391 392 smpboot_park_threads(cpu); 393 394 /* 395 * So now all preempt/rcu users must observe !cpu_active(). 396 */ 397 398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 399 if (err) { 400 /* CPU didn't die: tell everyone. Can't complain. */ 401 smpboot_unpark_threads(cpu); 402 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 403 goto out_release; 404 } 405 BUG_ON(cpu_online(cpu)); 406 407 /* 408 * The migration_call() CPU_DYING callback will have removed all 409 * runnable tasks from the cpu, there's only the idle task left now 410 * that the migration thread is done doing the stop_machine thing. 411 * 412 * Wait for the stop thread to go away. 413 */ 414 while (!idle_cpu(cpu)) 415 cpu_relax(); 416 417 /* This actually kills the CPU. */ 418 __cpu_die(cpu); 419 420 /* CPU is completely dead: tell everyone. Too late to complain. */ 421 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 422 423 check_for_tasks(cpu); 424 425 out_release: 426 cpu_hotplug_done(); 427 if (!err) 428 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 429 return err; 430 } 431 432 int __ref cpu_down(unsigned int cpu) 433 { 434 int err; 435 436 cpu_maps_update_begin(); 437 438 if (cpu_hotplug_disabled) { 439 err = -EBUSY; 440 goto out; 441 } 442 443 err = _cpu_down(cpu, 0); 444 445 out: 446 cpu_maps_update_done(); 447 return err; 448 } 449 EXPORT_SYMBOL(cpu_down); 450 #endif /*CONFIG_HOTPLUG_CPU*/ 451 452 /* Requires cpu_add_remove_lock to be held */ 453 static int _cpu_up(unsigned int cpu, int tasks_frozen) 454 { 455 int ret, nr_calls = 0; 456 void *hcpu = (void *)(long)cpu; 457 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 458 struct task_struct *idle; 459 460 cpu_hotplug_begin(); 461 462 if (cpu_online(cpu) || !cpu_present(cpu)) { 463 ret = -EINVAL; 464 goto out; 465 } 466 467 idle = idle_thread_get(cpu); 468 if (IS_ERR(idle)) { 469 ret = PTR_ERR(idle); 470 goto out; 471 } 472 473 ret = smpboot_create_threads(cpu); 474 if (ret) 475 goto out; 476 477 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 478 if (ret) { 479 nr_calls--; 480 pr_warn("%s: attempt to bring up CPU %u failed\n", 481 __func__, cpu); 482 goto out_notify; 483 } 484 485 /* Arch-specific enabling code. */ 486 ret = __cpu_up(cpu, idle); 487 if (ret != 0) 488 goto out_notify; 489 BUG_ON(!cpu_online(cpu)); 490 491 /* Wake the per cpu threads */ 492 smpboot_unpark_threads(cpu); 493 494 /* Now call notifier in preparation. */ 495 cpu_notify(CPU_ONLINE | mod, hcpu); 496 497 out_notify: 498 if (ret != 0) 499 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 500 out: 501 cpu_hotplug_done(); 502 503 return ret; 504 } 505 506 int cpu_up(unsigned int cpu) 507 { 508 int err = 0; 509 510 if (!cpu_possible(cpu)) { 511 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", 512 cpu); 513 #if defined(CONFIG_IA64) 514 pr_err("please check additional_cpus= boot parameter\n"); 515 #endif 516 return -EINVAL; 517 } 518 519 err = try_online_node(cpu_to_node(cpu)); 520 if (err) 521 return err; 522 523 cpu_maps_update_begin(); 524 525 if (cpu_hotplug_disabled) { 526 err = -EBUSY; 527 goto out; 528 } 529 530 err = _cpu_up(cpu, 0); 531 532 out: 533 cpu_maps_update_done(); 534 return err; 535 } 536 EXPORT_SYMBOL_GPL(cpu_up); 537 538 #ifdef CONFIG_PM_SLEEP_SMP 539 static cpumask_var_t frozen_cpus; 540 541 int disable_nonboot_cpus(void) 542 { 543 int cpu, first_cpu, error = 0; 544 545 cpu_maps_update_begin(); 546 first_cpu = cpumask_first(cpu_online_mask); 547 /* 548 * We take down all of the non-boot CPUs in one shot to avoid races 549 * with the userspace trying to use the CPU hotplug at the same time 550 */ 551 cpumask_clear(frozen_cpus); 552 553 pr_info("Disabling non-boot CPUs ...\n"); 554 for_each_online_cpu(cpu) { 555 if (cpu == first_cpu) 556 continue; 557 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); 558 error = _cpu_down(cpu, 1); 559 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); 560 if (!error) 561 cpumask_set_cpu(cpu, frozen_cpus); 562 else { 563 pr_err("Error taking CPU%d down: %d\n", cpu, error); 564 break; 565 } 566 } 567 568 if (!error) { 569 BUG_ON(num_online_cpus() > 1); 570 /* Make sure the CPUs won't be enabled by someone else */ 571 cpu_hotplug_disabled = 1; 572 } else { 573 pr_err("Non-boot CPUs are not disabled\n"); 574 } 575 cpu_maps_update_done(); 576 return error; 577 } 578 579 void __weak arch_enable_nonboot_cpus_begin(void) 580 { 581 } 582 583 void __weak arch_enable_nonboot_cpus_end(void) 584 { 585 } 586 587 void __ref enable_nonboot_cpus(void) 588 { 589 int cpu, error; 590 591 /* Allow everyone to use the CPU hotplug again */ 592 cpu_maps_update_begin(); 593 cpu_hotplug_disabled = 0; 594 if (cpumask_empty(frozen_cpus)) 595 goto out; 596 597 pr_info("Enabling non-boot CPUs ...\n"); 598 599 arch_enable_nonboot_cpus_begin(); 600 601 for_each_cpu(cpu, frozen_cpus) { 602 trace_suspend_resume(TPS("CPU_ON"), cpu, true); 603 error = _cpu_up(cpu, 1); 604 trace_suspend_resume(TPS("CPU_ON"), cpu, false); 605 if (!error) { 606 pr_info("CPU%d is up\n", cpu); 607 continue; 608 } 609 pr_warn("Error taking CPU%d up: %d\n", cpu, error); 610 } 611 612 arch_enable_nonboot_cpus_end(); 613 614 cpumask_clear(frozen_cpus); 615 out: 616 cpu_maps_update_done(); 617 } 618 619 static int __init alloc_frozen_cpus(void) 620 { 621 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 622 return -ENOMEM; 623 return 0; 624 } 625 core_initcall(alloc_frozen_cpus); 626 627 /* 628 * When callbacks for CPU hotplug notifications are being executed, we must 629 * ensure that the state of the system with respect to the tasks being frozen 630 * or not, as reported by the notification, remains unchanged *throughout the 631 * duration* of the execution of the callbacks. 632 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 633 * 634 * This synchronization is implemented by mutually excluding regular CPU 635 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 636 * Hibernate notifications. 637 */ 638 static int 639 cpu_hotplug_pm_callback(struct notifier_block *nb, 640 unsigned long action, void *ptr) 641 { 642 switch (action) { 643 644 case PM_SUSPEND_PREPARE: 645 case PM_HIBERNATION_PREPARE: 646 cpu_hotplug_disable(); 647 break; 648 649 case PM_POST_SUSPEND: 650 case PM_POST_HIBERNATION: 651 cpu_hotplug_enable(); 652 break; 653 654 default: 655 return NOTIFY_DONE; 656 } 657 658 return NOTIFY_OK; 659 } 660 661 662 static int __init cpu_hotplug_pm_sync_init(void) 663 { 664 /* 665 * cpu_hotplug_pm_callback has higher priority than x86 666 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 667 * to disable cpu hotplug to avoid cpu hotplug race. 668 */ 669 pm_notifier(cpu_hotplug_pm_callback, 0); 670 return 0; 671 } 672 core_initcall(cpu_hotplug_pm_sync_init); 673 674 #endif /* CONFIG_PM_SLEEP_SMP */ 675 676 /** 677 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 678 * @cpu: cpu that just started 679 * 680 * This function calls the cpu_chain notifiers with CPU_STARTING. 681 * It must be called by the arch code on the new cpu, before the new cpu 682 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 683 */ 684 void notify_cpu_starting(unsigned int cpu) 685 { 686 unsigned long val = CPU_STARTING; 687 688 #ifdef CONFIG_PM_SLEEP_SMP 689 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 690 val = CPU_STARTING_FROZEN; 691 #endif /* CONFIG_PM_SLEEP_SMP */ 692 cpu_notify(val, (void *)(long)cpu); 693 } 694 695 #endif /* CONFIG_SMP */ 696 697 /* 698 * cpu_bit_bitmap[] is a special, "compressed" data structure that 699 * represents all NR_CPUS bits binary values of 1<<nr. 700 * 701 * It is used by cpumask_of() to get a constant address to a CPU 702 * mask value that has a single bit set only. 703 */ 704 705 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 706 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 707 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 708 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 709 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 710 711 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 712 713 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 714 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 715 #if BITS_PER_LONG > 32 716 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 717 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 718 #endif 719 }; 720 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 721 722 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 723 EXPORT_SYMBOL(cpu_all_bits); 724 725 #ifdef CONFIG_INIT_ALL_POSSIBLE 726 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 727 = CPU_BITS_ALL; 728 #else 729 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 730 #endif 731 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 732 EXPORT_SYMBOL(cpu_possible_mask); 733 734 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 735 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 736 EXPORT_SYMBOL(cpu_online_mask); 737 738 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 739 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 740 EXPORT_SYMBOL(cpu_present_mask); 741 742 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 743 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 744 EXPORT_SYMBOL(cpu_active_mask); 745 746 void set_cpu_possible(unsigned int cpu, bool possible) 747 { 748 if (possible) 749 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 750 else 751 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 752 } 753 754 void set_cpu_present(unsigned int cpu, bool present) 755 { 756 if (present) 757 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 758 else 759 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 760 } 761 762 void set_cpu_online(unsigned int cpu, bool online) 763 { 764 if (online) { 765 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 766 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 767 } else { 768 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 769 } 770 } 771 772 void set_cpu_active(unsigned int cpu, bool active) 773 { 774 if (active) 775 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 776 else 777 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 778 } 779 780 void init_cpu_present(const struct cpumask *src) 781 { 782 cpumask_copy(to_cpumask(cpu_present_bits), src); 783 } 784 785 void init_cpu_possible(const struct cpumask *src) 786 { 787 cpumask_copy(to_cpumask(cpu_possible_bits), src); 788 } 789 790 void init_cpu_online(const struct cpumask *src) 791 { 792 cpumask_copy(to_cpumask(cpu_online_bits), src); 793 } 794