1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/oom.h> 14 #include <linux/rcupdate.h> 15 #include <linux/export.h> 16 #include <linux/bug.h> 17 #include <linux/kthread.h> 18 #include <linux/stop_machine.h> 19 #include <linux/mutex.h> 20 #include <linux/gfp.h> 21 #include <linux/suspend.h> 22 23 #include "smpboot.h" 24 25 #ifdef CONFIG_SMP 26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 27 static DEFINE_MUTEX(cpu_add_remove_lock); 28 29 /* 30 * The following two API's must be used when attempting 31 * to serialize the updates to cpu_online_mask, cpu_present_mask. 32 */ 33 void cpu_maps_update_begin(void) 34 { 35 mutex_lock(&cpu_add_remove_lock); 36 } 37 38 void cpu_maps_update_done(void) 39 { 40 mutex_unlock(&cpu_add_remove_lock); 41 } 42 43 static RAW_NOTIFIER_HEAD(cpu_chain); 44 45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 46 * Should always be manipulated under cpu_add_remove_lock 47 */ 48 static int cpu_hotplug_disabled; 49 50 #ifdef CONFIG_HOTPLUG_CPU 51 52 static struct { 53 struct task_struct *active_writer; 54 struct mutex lock; /* Synchronizes accesses to refcount, */ 55 /* 56 * Also blocks the new readers during 57 * an ongoing cpu hotplug operation. 58 */ 59 int refcount; 60 } cpu_hotplug = { 61 .active_writer = NULL, 62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 63 .refcount = 0, 64 }; 65 66 void get_online_cpus(void) 67 { 68 might_sleep(); 69 if (cpu_hotplug.active_writer == current) 70 return; 71 mutex_lock(&cpu_hotplug.lock); 72 cpu_hotplug.refcount++; 73 mutex_unlock(&cpu_hotplug.lock); 74 75 } 76 EXPORT_SYMBOL_GPL(get_online_cpus); 77 78 void put_online_cpus(void) 79 { 80 if (cpu_hotplug.active_writer == current) 81 return; 82 mutex_lock(&cpu_hotplug.lock); 83 84 if (WARN_ON(!cpu_hotplug.refcount)) 85 cpu_hotplug.refcount++; /* try to fix things up */ 86 87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 88 wake_up_process(cpu_hotplug.active_writer); 89 mutex_unlock(&cpu_hotplug.lock); 90 91 } 92 EXPORT_SYMBOL_GPL(put_online_cpus); 93 94 /* 95 * This ensures that the hotplug operation can begin only when the 96 * refcount goes to zero. 97 * 98 * Note that during a cpu-hotplug operation, the new readers, if any, 99 * will be blocked by the cpu_hotplug.lock 100 * 101 * Since cpu_hotplug_begin() is always called after invoking 102 * cpu_maps_update_begin(), we can be sure that only one writer is active. 103 * 104 * Note that theoretically, there is a possibility of a livelock: 105 * - Refcount goes to zero, last reader wakes up the sleeping 106 * writer. 107 * - Last reader unlocks the cpu_hotplug.lock. 108 * - A new reader arrives at this moment, bumps up the refcount. 109 * - The writer acquires the cpu_hotplug.lock finds the refcount 110 * non zero and goes to sleep again. 111 * 112 * However, this is very difficult to achieve in practice since 113 * get_online_cpus() not an api which is called all that often. 114 * 115 */ 116 static void cpu_hotplug_begin(void) 117 { 118 cpu_hotplug.active_writer = current; 119 120 for (;;) { 121 mutex_lock(&cpu_hotplug.lock); 122 if (likely(!cpu_hotplug.refcount)) 123 break; 124 __set_current_state(TASK_UNINTERRUPTIBLE); 125 mutex_unlock(&cpu_hotplug.lock); 126 schedule(); 127 } 128 } 129 130 static void cpu_hotplug_done(void) 131 { 132 cpu_hotplug.active_writer = NULL; 133 mutex_unlock(&cpu_hotplug.lock); 134 } 135 136 #else /* #if CONFIG_HOTPLUG_CPU */ 137 static void cpu_hotplug_begin(void) {} 138 static void cpu_hotplug_done(void) {} 139 #endif /* #else #if CONFIG_HOTPLUG_CPU */ 140 141 /* Need to know about CPUs going up/down? */ 142 int __ref register_cpu_notifier(struct notifier_block *nb) 143 { 144 int ret; 145 cpu_maps_update_begin(); 146 ret = raw_notifier_chain_register(&cpu_chain, nb); 147 cpu_maps_update_done(); 148 return ret; 149 } 150 151 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 152 int *nr_calls) 153 { 154 int ret; 155 156 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 157 nr_calls); 158 159 return notifier_to_errno(ret); 160 } 161 162 static int cpu_notify(unsigned long val, void *v) 163 { 164 return __cpu_notify(val, v, -1, NULL); 165 } 166 167 #ifdef CONFIG_HOTPLUG_CPU 168 169 static void cpu_notify_nofail(unsigned long val, void *v) 170 { 171 BUG_ON(cpu_notify(val, v)); 172 } 173 EXPORT_SYMBOL(register_cpu_notifier); 174 175 void __ref unregister_cpu_notifier(struct notifier_block *nb) 176 { 177 cpu_maps_update_begin(); 178 raw_notifier_chain_unregister(&cpu_chain, nb); 179 cpu_maps_update_done(); 180 } 181 EXPORT_SYMBOL(unregister_cpu_notifier); 182 183 /** 184 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 185 * @cpu: a CPU id 186 * 187 * This function walks all processes, finds a valid mm struct for each one and 188 * then clears a corresponding bit in mm's cpumask. While this all sounds 189 * trivial, there are various non-obvious corner cases, which this function 190 * tries to solve in a safe manner. 191 * 192 * Also note that the function uses a somewhat relaxed locking scheme, so it may 193 * be called only for an already offlined CPU. 194 */ 195 void clear_tasks_mm_cpumask(int cpu) 196 { 197 struct task_struct *p; 198 199 /* 200 * This function is called after the cpu is taken down and marked 201 * offline, so its not like new tasks will ever get this cpu set in 202 * their mm mask. -- Peter Zijlstra 203 * Thus, we may use rcu_read_lock() here, instead of grabbing 204 * full-fledged tasklist_lock. 205 */ 206 WARN_ON(cpu_online(cpu)); 207 rcu_read_lock(); 208 for_each_process(p) { 209 struct task_struct *t; 210 211 /* 212 * Main thread might exit, but other threads may still have 213 * a valid mm. Find one. 214 */ 215 t = find_lock_task_mm(p); 216 if (!t) 217 continue; 218 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 219 task_unlock(t); 220 } 221 rcu_read_unlock(); 222 } 223 224 static inline void check_for_tasks(int cpu) 225 { 226 struct task_struct *p; 227 228 write_lock_irq(&tasklist_lock); 229 for_each_process(p) { 230 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 231 (p->utime || p->stime)) 232 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 233 "(state = %ld, flags = %x)\n", 234 p->comm, task_pid_nr(p), cpu, 235 p->state, p->flags); 236 } 237 write_unlock_irq(&tasklist_lock); 238 } 239 240 struct take_cpu_down_param { 241 unsigned long mod; 242 void *hcpu; 243 }; 244 245 /* Take this CPU down. */ 246 static int __ref take_cpu_down(void *_param) 247 { 248 struct take_cpu_down_param *param = _param; 249 int err; 250 251 /* Ensure this CPU doesn't handle any more interrupts. */ 252 err = __cpu_disable(); 253 if (err < 0) 254 return err; 255 256 cpu_notify(CPU_DYING | param->mod, param->hcpu); 257 return 0; 258 } 259 260 /* Requires cpu_add_remove_lock to be held */ 261 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 262 { 263 int err, nr_calls = 0; 264 void *hcpu = (void *)(long)cpu; 265 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 266 struct take_cpu_down_param tcd_param = { 267 .mod = mod, 268 .hcpu = hcpu, 269 }; 270 271 if (num_online_cpus() == 1) 272 return -EBUSY; 273 274 if (!cpu_online(cpu)) 275 return -EINVAL; 276 277 cpu_hotplug_begin(); 278 279 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 280 if (err) { 281 nr_calls--; 282 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 283 printk("%s: attempt to take down CPU %u failed\n", 284 __func__, cpu); 285 goto out_release; 286 } 287 smpboot_park_threads(cpu); 288 289 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 290 if (err) { 291 /* CPU didn't die: tell everyone. Can't complain. */ 292 smpboot_unpark_threads(cpu); 293 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 294 goto out_release; 295 } 296 BUG_ON(cpu_online(cpu)); 297 298 /* 299 * The migration_call() CPU_DYING callback will have removed all 300 * runnable tasks from the cpu, there's only the idle task left now 301 * that the migration thread is done doing the stop_machine thing. 302 * 303 * Wait for the stop thread to go away. 304 */ 305 while (!idle_cpu(cpu)) 306 cpu_relax(); 307 308 /* This actually kills the CPU. */ 309 __cpu_die(cpu); 310 311 /* CPU is completely dead: tell everyone. Too late to complain. */ 312 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 313 314 check_for_tasks(cpu); 315 316 out_release: 317 cpu_hotplug_done(); 318 if (!err) 319 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 320 return err; 321 } 322 323 int __ref cpu_down(unsigned int cpu) 324 { 325 int err; 326 327 cpu_maps_update_begin(); 328 329 if (cpu_hotplug_disabled) { 330 err = -EBUSY; 331 goto out; 332 } 333 334 err = _cpu_down(cpu, 0); 335 336 out: 337 cpu_maps_update_done(); 338 return err; 339 } 340 EXPORT_SYMBOL(cpu_down); 341 #endif /*CONFIG_HOTPLUG_CPU*/ 342 343 /* Requires cpu_add_remove_lock to be held */ 344 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 345 { 346 int ret, nr_calls = 0; 347 void *hcpu = (void *)(long)cpu; 348 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 349 struct task_struct *idle; 350 351 cpu_hotplug_begin(); 352 353 if (cpu_online(cpu) || !cpu_present(cpu)) { 354 ret = -EINVAL; 355 goto out; 356 } 357 358 idle = idle_thread_get(cpu); 359 if (IS_ERR(idle)) { 360 ret = PTR_ERR(idle); 361 goto out; 362 } 363 364 ret = smpboot_create_threads(cpu); 365 if (ret) 366 goto out; 367 368 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 369 if (ret) { 370 nr_calls--; 371 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", 372 __func__, cpu); 373 goto out_notify; 374 } 375 376 /* Arch-specific enabling code. */ 377 ret = __cpu_up(cpu, idle); 378 if (ret != 0) 379 goto out_notify; 380 BUG_ON(!cpu_online(cpu)); 381 382 /* Wake the per cpu threads */ 383 smpboot_unpark_threads(cpu); 384 385 /* Now call notifier in preparation. */ 386 cpu_notify(CPU_ONLINE | mod, hcpu); 387 388 out_notify: 389 if (ret != 0) 390 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 391 out: 392 cpu_hotplug_done(); 393 394 return ret; 395 } 396 397 int __cpuinit cpu_up(unsigned int cpu) 398 { 399 int err = 0; 400 401 #ifdef CONFIG_MEMORY_HOTPLUG 402 int nid; 403 pg_data_t *pgdat; 404 #endif 405 406 if (!cpu_possible(cpu)) { 407 printk(KERN_ERR "can't online cpu %d because it is not " 408 "configured as may-hotadd at boot time\n", cpu); 409 #if defined(CONFIG_IA64) 410 printk(KERN_ERR "please check additional_cpus= boot " 411 "parameter\n"); 412 #endif 413 return -EINVAL; 414 } 415 416 #ifdef CONFIG_MEMORY_HOTPLUG 417 nid = cpu_to_node(cpu); 418 if (!node_online(nid)) { 419 err = mem_online_node(nid); 420 if (err) 421 return err; 422 } 423 424 pgdat = NODE_DATA(nid); 425 if (!pgdat) { 426 printk(KERN_ERR 427 "Can't online cpu %d due to NULL pgdat\n", cpu); 428 return -ENOMEM; 429 } 430 431 if (pgdat->node_zonelists->_zonerefs->zone == NULL) { 432 mutex_lock(&zonelists_mutex); 433 build_all_zonelists(NULL, NULL); 434 mutex_unlock(&zonelists_mutex); 435 } 436 #endif 437 438 cpu_maps_update_begin(); 439 440 if (cpu_hotplug_disabled) { 441 err = -EBUSY; 442 goto out; 443 } 444 445 err = _cpu_up(cpu, 0); 446 447 out: 448 cpu_maps_update_done(); 449 return err; 450 } 451 EXPORT_SYMBOL_GPL(cpu_up); 452 453 #ifdef CONFIG_PM_SLEEP_SMP 454 static cpumask_var_t frozen_cpus; 455 456 int disable_nonboot_cpus(void) 457 { 458 int cpu, first_cpu, error = 0; 459 460 cpu_maps_update_begin(); 461 first_cpu = cpumask_first(cpu_online_mask); 462 /* 463 * We take down all of the non-boot CPUs in one shot to avoid races 464 * with the userspace trying to use the CPU hotplug at the same time 465 */ 466 cpumask_clear(frozen_cpus); 467 468 printk("Disabling non-boot CPUs ...\n"); 469 for_each_online_cpu(cpu) { 470 if (cpu == first_cpu) 471 continue; 472 error = _cpu_down(cpu, 1); 473 if (!error) 474 cpumask_set_cpu(cpu, frozen_cpus); 475 else { 476 printk(KERN_ERR "Error taking CPU%d down: %d\n", 477 cpu, error); 478 break; 479 } 480 } 481 482 if (!error) { 483 BUG_ON(num_online_cpus() > 1); 484 /* Make sure the CPUs won't be enabled by someone else */ 485 cpu_hotplug_disabled = 1; 486 } else { 487 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 488 } 489 cpu_maps_update_done(); 490 return error; 491 } 492 493 void __weak arch_enable_nonboot_cpus_begin(void) 494 { 495 } 496 497 void __weak arch_enable_nonboot_cpus_end(void) 498 { 499 } 500 501 void __ref enable_nonboot_cpus(void) 502 { 503 int cpu, error; 504 505 /* Allow everyone to use the CPU hotplug again */ 506 cpu_maps_update_begin(); 507 cpu_hotplug_disabled = 0; 508 if (cpumask_empty(frozen_cpus)) 509 goto out; 510 511 printk(KERN_INFO "Enabling non-boot CPUs ...\n"); 512 513 arch_enable_nonboot_cpus_begin(); 514 515 for_each_cpu(cpu, frozen_cpus) { 516 error = _cpu_up(cpu, 1); 517 if (!error) { 518 printk(KERN_INFO "CPU%d is up\n", cpu); 519 continue; 520 } 521 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 522 } 523 524 arch_enable_nonboot_cpus_end(); 525 526 cpumask_clear(frozen_cpus); 527 out: 528 cpu_maps_update_done(); 529 } 530 531 static int __init alloc_frozen_cpus(void) 532 { 533 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 534 return -ENOMEM; 535 return 0; 536 } 537 core_initcall(alloc_frozen_cpus); 538 539 /* 540 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU 541 * hotplug when tasks are about to be frozen. Also, don't allow the freezer 542 * to continue until any currently running CPU hotplug operation gets 543 * completed. 544 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the 545 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular 546 * CPU hotplug path and released only after it is complete. Thus, we 547 * (and hence the freezer) will block here until any currently running CPU 548 * hotplug operation gets completed. 549 */ 550 void cpu_hotplug_disable_before_freeze(void) 551 { 552 cpu_maps_update_begin(); 553 cpu_hotplug_disabled = 1; 554 cpu_maps_update_done(); 555 } 556 557 558 /* 559 * When tasks have been thawed, re-enable regular CPU hotplug (which had been 560 * disabled while beginning to freeze tasks). 561 */ 562 void cpu_hotplug_enable_after_thaw(void) 563 { 564 cpu_maps_update_begin(); 565 cpu_hotplug_disabled = 0; 566 cpu_maps_update_done(); 567 } 568 569 /* 570 * When callbacks for CPU hotplug notifications are being executed, we must 571 * ensure that the state of the system with respect to the tasks being frozen 572 * or not, as reported by the notification, remains unchanged *throughout the 573 * duration* of the execution of the callbacks. 574 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 575 * 576 * This synchronization is implemented by mutually excluding regular CPU 577 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 578 * Hibernate notifications. 579 */ 580 static int 581 cpu_hotplug_pm_callback(struct notifier_block *nb, 582 unsigned long action, void *ptr) 583 { 584 switch (action) { 585 586 case PM_SUSPEND_PREPARE: 587 case PM_HIBERNATION_PREPARE: 588 cpu_hotplug_disable_before_freeze(); 589 break; 590 591 case PM_POST_SUSPEND: 592 case PM_POST_HIBERNATION: 593 cpu_hotplug_enable_after_thaw(); 594 break; 595 596 default: 597 return NOTIFY_DONE; 598 } 599 600 return NOTIFY_OK; 601 } 602 603 604 static int __init cpu_hotplug_pm_sync_init(void) 605 { 606 /* 607 * cpu_hotplug_pm_callback has higher priority than x86 608 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 609 * to disable cpu hotplug to avoid cpu hotplug race. 610 */ 611 pm_notifier(cpu_hotplug_pm_callback, 0); 612 return 0; 613 } 614 core_initcall(cpu_hotplug_pm_sync_init); 615 616 #endif /* CONFIG_PM_SLEEP_SMP */ 617 618 /** 619 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 620 * @cpu: cpu that just started 621 * 622 * This function calls the cpu_chain notifiers with CPU_STARTING. 623 * It must be called by the arch code on the new cpu, before the new cpu 624 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 625 */ 626 void __cpuinit notify_cpu_starting(unsigned int cpu) 627 { 628 unsigned long val = CPU_STARTING; 629 630 #ifdef CONFIG_PM_SLEEP_SMP 631 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 632 val = CPU_STARTING_FROZEN; 633 #endif /* CONFIG_PM_SLEEP_SMP */ 634 cpu_notify(val, (void *)(long)cpu); 635 } 636 637 #endif /* CONFIG_SMP */ 638 639 /* 640 * cpu_bit_bitmap[] is a special, "compressed" data structure that 641 * represents all NR_CPUS bits binary values of 1<<nr. 642 * 643 * It is used by cpumask_of() to get a constant address to a CPU 644 * mask value that has a single bit set only. 645 */ 646 647 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 648 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 649 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 650 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 651 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 652 653 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 654 655 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 656 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 657 #if BITS_PER_LONG > 32 658 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 659 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 660 #endif 661 }; 662 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 663 664 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 665 EXPORT_SYMBOL(cpu_all_bits); 666 667 #ifdef CONFIG_INIT_ALL_POSSIBLE 668 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 669 = CPU_BITS_ALL; 670 #else 671 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 672 #endif 673 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 674 EXPORT_SYMBOL(cpu_possible_mask); 675 676 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 677 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 678 EXPORT_SYMBOL(cpu_online_mask); 679 680 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 681 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 682 EXPORT_SYMBOL(cpu_present_mask); 683 684 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 685 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 686 EXPORT_SYMBOL(cpu_active_mask); 687 688 void set_cpu_possible(unsigned int cpu, bool possible) 689 { 690 if (possible) 691 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 692 else 693 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 694 } 695 696 void set_cpu_present(unsigned int cpu, bool present) 697 { 698 if (present) 699 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 700 else 701 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 702 } 703 704 void set_cpu_online(unsigned int cpu, bool online) 705 { 706 if (online) 707 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 708 else 709 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 710 } 711 712 void set_cpu_active(unsigned int cpu, bool active) 713 { 714 if (active) 715 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 716 else 717 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 718 } 719 720 void init_cpu_present(const struct cpumask *src) 721 { 722 cpumask_copy(to_cpumask(cpu_present_bits), src); 723 } 724 725 void init_cpu_possible(const struct cpumask *src) 726 { 727 cpumask_copy(to_cpumask(cpu_possible_bits), src); 728 } 729 730 void init_cpu_online(const struct cpumask *src) 731 { 732 cpumask_copy(to_cpumask(cpu_online_bits), src); 733 } 734