1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/oom.h> 14 #include <linux/rcupdate.h> 15 #include <linux/export.h> 16 #include <linux/bug.h> 17 #include <linux/kthread.h> 18 #include <linux/stop_machine.h> 19 #include <linux/mutex.h> 20 #include <linux/gfp.h> 21 #include <linux/suspend.h> 22 23 #include "smpboot.h" 24 25 #ifdef CONFIG_SMP 26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 27 static DEFINE_MUTEX(cpu_add_remove_lock); 28 29 /* 30 * The following two API's must be used when attempting 31 * to serialize the updates to cpu_online_mask, cpu_present_mask. 32 */ 33 void cpu_maps_update_begin(void) 34 { 35 mutex_lock(&cpu_add_remove_lock); 36 } 37 38 void cpu_maps_update_done(void) 39 { 40 mutex_unlock(&cpu_add_remove_lock); 41 } 42 43 static RAW_NOTIFIER_HEAD(cpu_chain); 44 45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 46 * Should always be manipulated under cpu_add_remove_lock 47 */ 48 static int cpu_hotplug_disabled; 49 50 #ifdef CONFIG_HOTPLUG_CPU 51 52 static struct { 53 struct task_struct *active_writer; 54 struct mutex lock; /* Synchronizes accesses to refcount, */ 55 /* 56 * Also blocks the new readers during 57 * an ongoing cpu hotplug operation. 58 */ 59 int refcount; 60 } cpu_hotplug = { 61 .active_writer = NULL, 62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 63 .refcount = 0, 64 }; 65 66 void get_online_cpus(void) 67 { 68 might_sleep(); 69 if (cpu_hotplug.active_writer == current) 70 return; 71 mutex_lock(&cpu_hotplug.lock); 72 cpu_hotplug.refcount++; 73 mutex_unlock(&cpu_hotplug.lock); 74 75 } 76 EXPORT_SYMBOL_GPL(get_online_cpus); 77 78 void put_online_cpus(void) 79 { 80 if (cpu_hotplug.active_writer == current) 81 return; 82 mutex_lock(&cpu_hotplug.lock); 83 84 if (WARN_ON(!cpu_hotplug.refcount)) 85 cpu_hotplug.refcount++; /* try to fix things up */ 86 87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 88 wake_up_process(cpu_hotplug.active_writer); 89 mutex_unlock(&cpu_hotplug.lock); 90 91 } 92 EXPORT_SYMBOL_GPL(put_online_cpus); 93 94 /* 95 * This ensures that the hotplug operation can begin only when the 96 * refcount goes to zero. 97 * 98 * Note that during a cpu-hotplug operation, the new readers, if any, 99 * will be blocked by the cpu_hotplug.lock 100 * 101 * Since cpu_hotplug_begin() is always called after invoking 102 * cpu_maps_update_begin(), we can be sure that only one writer is active. 103 * 104 * Note that theoretically, there is a possibility of a livelock: 105 * - Refcount goes to zero, last reader wakes up the sleeping 106 * writer. 107 * - Last reader unlocks the cpu_hotplug.lock. 108 * - A new reader arrives at this moment, bumps up the refcount. 109 * - The writer acquires the cpu_hotplug.lock finds the refcount 110 * non zero and goes to sleep again. 111 * 112 * However, this is very difficult to achieve in practice since 113 * get_online_cpus() not an api which is called all that often. 114 * 115 */ 116 static void cpu_hotplug_begin(void) 117 { 118 cpu_hotplug.active_writer = current; 119 120 for (;;) { 121 mutex_lock(&cpu_hotplug.lock); 122 if (likely(!cpu_hotplug.refcount)) 123 break; 124 __set_current_state(TASK_UNINTERRUPTIBLE); 125 mutex_unlock(&cpu_hotplug.lock); 126 schedule(); 127 } 128 } 129 130 static void cpu_hotplug_done(void) 131 { 132 cpu_hotplug.active_writer = NULL; 133 mutex_unlock(&cpu_hotplug.lock); 134 } 135 136 #else /* #if CONFIG_HOTPLUG_CPU */ 137 static void cpu_hotplug_begin(void) {} 138 static void cpu_hotplug_done(void) {} 139 #endif /* #else #if CONFIG_HOTPLUG_CPU */ 140 141 /* Need to know about CPUs going up/down? */ 142 int __ref register_cpu_notifier(struct notifier_block *nb) 143 { 144 int ret; 145 cpu_maps_update_begin(); 146 ret = raw_notifier_chain_register(&cpu_chain, nb); 147 cpu_maps_update_done(); 148 return ret; 149 } 150 151 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 152 int *nr_calls) 153 { 154 int ret; 155 156 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 157 nr_calls); 158 159 return notifier_to_errno(ret); 160 } 161 162 static int cpu_notify(unsigned long val, void *v) 163 { 164 return __cpu_notify(val, v, -1, NULL); 165 } 166 167 #ifdef CONFIG_HOTPLUG_CPU 168 169 static void cpu_notify_nofail(unsigned long val, void *v) 170 { 171 BUG_ON(cpu_notify(val, v)); 172 } 173 EXPORT_SYMBOL(register_cpu_notifier); 174 175 void __ref unregister_cpu_notifier(struct notifier_block *nb) 176 { 177 cpu_maps_update_begin(); 178 raw_notifier_chain_unregister(&cpu_chain, nb); 179 cpu_maps_update_done(); 180 } 181 EXPORT_SYMBOL(unregister_cpu_notifier); 182 183 /** 184 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 185 * @cpu: a CPU id 186 * 187 * This function walks all processes, finds a valid mm struct for each one and 188 * then clears a corresponding bit in mm's cpumask. While this all sounds 189 * trivial, there are various non-obvious corner cases, which this function 190 * tries to solve in a safe manner. 191 * 192 * Also note that the function uses a somewhat relaxed locking scheme, so it may 193 * be called only for an already offlined CPU. 194 */ 195 void clear_tasks_mm_cpumask(int cpu) 196 { 197 struct task_struct *p; 198 199 /* 200 * This function is called after the cpu is taken down and marked 201 * offline, so its not like new tasks will ever get this cpu set in 202 * their mm mask. -- Peter Zijlstra 203 * Thus, we may use rcu_read_lock() here, instead of grabbing 204 * full-fledged tasklist_lock. 205 */ 206 WARN_ON(cpu_online(cpu)); 207 rcu_read_lock(); 208 for_each_process(p) { 209 struct task_struct *t; 210 211 /* 212 * Main thread might exit, but other threads may still have 213 * a valid mm. Find one. 214 */ 215 t = find_lock_task_mm(p); 216 if (!t) 217 continue; 218 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 219 task_unlock(t); 220 } 221 rcu_read_unlock(); 222 } 223 224 static inline void check_for_tasks(int cpu) 225 { 226 struct task_struct *p; 227 228 write_lock_irq(&tasklist_lock); 229 for_each_process(p) { 230 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 231 (p->utime || p->stime)) 232 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 233 "(state = %ld, flags = %x)\n", 234 p->comm, task_pid_nr(p), cpu, 235 p->state, p->flags); 236 } 237 write_unlock_irq(&tasklist_lock); 238 } 239 240 struct take_cpu_down_param { 241 unsigned long mod; 242 void *hcpu; 243 }; 244 245 /* Take this CPU down. */ 246 static int __ref take_cpu_down(void *_param) 247 { 248 struct take_cpu_down_param *param = _param; 249 int err; 250 251 /* Ensure this CPU doesn't handle any more interrupts. */ 252 err = __cpu_disable(); 253 if (err < 0) 254 return err; 255 256 cpu_notify(CPU_DYING | param->mod, param->hcpu); 257 return 0; 258 } 259 260 /* Requires cpu_add_remove_lock to be held */ 261 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 262 { 263 int err, nr_calls = 0; 264 void *hcpu = (void *)(long)cpu; 265 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 266 struct take_cpu_down_param tcd_param = { 267 .mod = mod, 268 .hcpu = hcpu, 269 }; 270 271 if (num_online_cpus() == 1) 272 return -EBUSY; 273 274 if (!cpu_online(cpu)) 275 return -EINVAL; 276 277 cpu_hotplug_begin(); 278 279 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 280 if (err) { 281 nr_calls--; 282 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 283 printk("%s: attempt to take down CPU %u failed\n", 284 __func__, cpu); 285 goto out_release; 286 } 287 smpboot_park_threads(cpu); 288 289 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 290 if (err) { 291 /* CPU didn't die: tell everyone. Can't complain. */ 292 smpboot_unpark_threads(cpu); 293 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 294 goto out_release; 295 } 296 BUG_ON(cpu_online(cpu)); 297 298 /* 299 * The migration_call() CPU_DYING callback will have removed all 300 * runnable tasks from the cpu, there's only the idle task left now 301 * that the migration thread is done doing the stop_machine thing. 302 * 303 * Wait for the stop thread to go away. 304 */ 305 while (!idle_cpu(cpu)) 306 cpu_relax(); 307 308 /* This actually kills the CPU. */ 309 __cpu_die(cpu); 310 311 /* CPU is completely dead: tell everyone. Too late to complain. */ 312 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 313 314 check_for_tasks(cpu); 315 316 out_release: 317 cpu_hotplug_done(); 318 if (!err) 319 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 320 return err; 321 } 322 323 int __ref cpu_down(unsigned int cpu) 324 { 325 int err; 326 327 cpu_maps_update_begin(); 328 329 if (cpu_hotplug_disabled) { 330 err = -EBUSY; 331 goto out; 332 } 333 334 err = _cpu_down(cpu, 0); 335 336 out: 337 cpu_maps_update_done(); 338 return err; 339 } 340 EXPORT_SYMBOL(cpu_down); 341 #endif /*CONFIG_HOTPLUG_CPU*/ 342 343 /* Requires cpu_add_remove_lock to be held */ 344 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 345 { 346 int ret, nr_calls = 0; 347 void *hcpu = (void *)(long)cpu; 348 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 349 struct task_struct *idle; 350 351 if (cpu_online(cpu) || !cpu_present(cpu)) 352 return -EINVAL; 353 354 cpu_hotplug_begin(); 355 356 idle = idle_thread_get(cpu); 357 if (IS_ERR(idle)) { 358 ret = PTR_ERR(idle); 359 goto out; 360 } 361 362 ret = smpboot_create_threads(cpu); 363 if (ret) 364 goto out; 365 366 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 367 if (ret) { 368 nr_calls--; 369 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", 370 __func__, cpu); 371 goto out_notify; 372 } 373 374 /* Arch-specific enabling code. */ 375 ret = __cpu_up(cpu, idle); 376 if (ret != 0) 377 goto out_notify; 378 BUG_ON(!cpu_online(cpu)); 379 380 /* Wake the per cpu threads */ 381 smpboot_unpark_threads(cpu); 382 383 /* Now call notifier in preparation. */ 384 cpu_notify(CPU_ONLINE | mod, hcpu); 385 386 out_notify: 387 if (ret != 0) 388 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 389 out: 390 cpu_hotplug_done(); 391 392 return ret; 393 } 394 395 int __cpuinit cpu_up(unsigned int cpu) 396 { 397 int err = 0; 398 399 #ifdef CONFIG_MEMORY_HOTPLUG 400 int nid; 401 pg_data_t *pgdat; 402 #endif 403 404 if (!cpu_possible(cpu)) { 405 printk(KERN_ERR "can't online cpu %d because it is not " 406 "configured as may-hotadd at boot time\n", cpu); 407 #if defined(CONFIG_IA64) 408 printk(KERN_ERR "please check additional_cpus= boot " 409 "parameter\n"); 410 #endif 411 return -EINVAL; 412 } 413 414 #ifdef CONFIG_MEMORY_HOTPLUG 415 nid = cpu_to_node(cpu); 416 if (!node_online(nid)) { 417 err = mem_online_node(nid); 418 if (err) 419 return err; 420 } 421 422 pgdat = NODE_DATA(nid); 423 if (!pgdat) { 424 printk(KERN_ERR 425 "Can't online cpu %d due to NULL pgdat\n", cpu); 426 return -ENOMEM; 427 } 428 429 if (pgdat->node_zonelists->_zonerefs->zone == NULL) { 430 mutex_lock(&zonelists_mutex); 431 build_all_zonelists(NULL, NULL); 432 mutex_unlock(&zonelists_mutex); 433 } 434 #endif 435 436 cpu_maps_update_begin(); 437 438 if (cpu_hotplug_disabled) { 439 err = -EBUSY; 440 goto out; 441 } 442 443 err = _cpu_up(cpu, 0); 444 445 out: 446 cpu_maps_update_done(); 447 return err; 448 } 449 EXPORT_SYMBOL_GPL(cpu_up); 450 451 #ifdef CONFIG_PM_SLEEP_SMP 452 static cpumask_var_t frozen_cpus; 453 454 int disable_nonboot_cpus(void) 455 { 456 int cpu, first_cpu, error = 0; 457 458 cpu_maps_update_begin(); 459 first_cpu = cpumask_first(cpu_online_mask); 460 /* 461 * We take down all of the non-boot CPUs in one shot to avoid races 462 * with the userspace trying to use the CPU hotplug at the same time 463 */ 464 cpumask_clear(frozen_cpus); 465 466 printk("Disabling non-boot CPUs ...\n"); 467 for_each_online_cpu(cpu) { 468 if (cpu == first_cpu) 469 continue; 470 error = _cpu_down(cpu, 1); 471 if (!error) 472 cpumask_set_cpu(cpu, frozen_cpus); 473 else { 474 printk(KERN_ERR "Error taking CPU%d down: %d\n", 475 cpu, error); 476 break; 477 } 478 } 479 480 if (!error) { 481 BUG_ON(num_online_cpus() > 1); 482 /* Make sure the CPUs won't be enabled by someone else */ 483 cpu_hotplug_disabled = 1; 484 } else { 485 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 486 } 487 cpu_maps_update_done(); 488 return error; 489 } 490 491 void __weak arch_enable_nonboot_cpus_begin(void) 492 { 493 } 494 495 void __weak arch_enable_nonboot_cpus_end(void) 496 { 497 } 498 499 void __ref enable_nonboot_cpus(void) 500 { 501 int cpu, error; 502 503 /* Allow everyone to use the CPU hotplug again */ 504 cpu_maps_update_begin(); 505 cpu_hotplug_disabled = 0; 506 if (cpumask_empty(frozen_cpus)) 507 goto out; 508 509 printk(KERN_INFO "Enabling non-boot CPUs ...\n"); 510 511 arch_enable_nonboot_cpus_begin(); 512 513 for_each_cpu(cpu, frozen_cpus) { 514 error = _cpu_up(cpu, 1); 515 if (!error) { 516 printk(KERN_INFO "CPU%d is up\n", cpu); 517 continue; 518 } 519 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 520 } 521 522 arch_enable_nonboot_cpus_end(); 523 524 cpumask_clear(frozen_cpus); 525 out: 526 cpu_maps_update_done(); 527 } 528 529 static int __init alloc_frozen_cpus(void) 530 { 531 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 532 return -ENOMEM; 533 return 0; 534 } 535 core_initcall(alloc_frozen_cpus); 536 537 /* 538 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU 539 * hotplug when tasks are about to be frozen. Also, don't allow the freezer 540 * to continue until any currently running CPU hotplug operation gets 541 * completed. 542 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the 543 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular 544 * CPU hotplug path and released only after it is complete. Thus, we 545 * (and hence the freezer) will block here until any currently running CPU 546 * hotplug operation gets completed. 547 */ 548 void cpu_hotplug_disable_before_freeze(void) 549 { 550 cpu_maps_update_begin(); 551 cpu_hotplug_disabled = 1; 552 cpu_maps_update_done(); 553 } 554 555 556 /* 557 * When tasks have been thawed, re-enable regular CPU hotplug (which had been 558 * disabled while beginning to freeze tasks). 559 */ 560 void cpu_hotplug_enable_after_thaw(void) 561 { 562 cpu_maps_update_begin(); 563 cpu_hotplug_disabled = 0; 564 cpu_maps_update_done(); 565 } 566 567 /* 568 * When callbacks for CPU hotplug notifications are being executed, we must 569 * ensure that the state of the system with respect to the tasks being frozen 570 * or not, as reported by the notification, remains unchanged *throughout the 571 * duration* of the execution of the callbacks. 572 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 573 * 574 * This synchronization is implemented by mutually excluding regular CPU 575 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 576 * Hibernate notifications. 577 */ 578 static int 579 cpu_hotplug_pm_callback(struct notifier_block *nb, 580 unsigned long action, void *ptr) 581 { 582 switch (action) { 583 584 case PM_SUSPEND_PREPARE: 585 case PM_HIBERNATION_PREPARE: 586 cpu_hotplug_disable_before_freeze(); 587 break; 588 589 case PM_POST_SUSPEND: 590 case PM_POST_HIBERNATION: 591 cpu_hotplug_enable_after_thaw(); 592 break; 593 594 default: 595 return NOTIFY_DONE; 596 } 597 598 return NOTIFY_OK; 599 } 600 601 602 static int __init cpu_hotplug_pm_sync_init(void) 603 { 604 pm_notifier(cpu_hotplug_pm_callback, 0); 605 return 0; 606 } 607 core_initcall(cpu_hotplug_pm_sync_init); 608 609 #endif /* CONFIG_PM_SLEEP_SMP */ 610 611 /** 612 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 613 * @cpu: cpu that just started 614 * 615 * This function calls the cpu_chain notifiers with CPU_STARTING. 616 * It must be called by the arch code on the new cpu, before the new cpu 617 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 618 */ 619 void __cpuinit notify_cpu_starting(unsigned int cpu) 620 { 621 unsigned long val = CPU_STARTING; 622 623 #ifdef CONFIG_PM_SLEEP_SMP 624 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 625 val = CPU_STARTING_FROZEN; 626 #endif /* CONFIG_PM_SLEEP_SMP */ 627 cpu_notify(val, (void *)(long)cpu); 628 } 629 630 #endif /* CONFIG_SMP */ 631 632 /* 633 * cpu_bit_bitmap[] is a special, "compressed" data structure that 634 * represents all NR_CPUS bits binary values of 1<<nr. 635 * 636 * It is used by cpumask_of() to get a constant address to a CPU 637 * mask value that has a single bit set only. 638 */ 639 640 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 641 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 642 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 643 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 644 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 645 646 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 647 648 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 649 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 650 #if BITS_PER_LONG > 32 651 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 652 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 653 #endif 654 }; 655 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 656 657 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 658 EXPORT_SYMBOL(cpu_all_bits); 659 660 #ifdef CONFIG_INIT_ALL_POSSIBLE 661 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 662 = CPU_BITS_ALL; 663 #else 664 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 665 #endif 666 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 667 EXPORT_SYMBOL(cpu_possible_mask); 668 669 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 670 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 671 EXPORT_SYMBOL(cpu_online_mask); 672 673 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 674 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 675 EXPORT_SYMBOL(cpu_present_mask); 676 677 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 678 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 679 EXPORT_SYMBOL(cpu_active_mask); 680 681 void set_cpu_possible(unsigned int cpu, bool possible) 682 { 683 if (possible) 684 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 685 else 686 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 687 } 688 689 void set_cpu_present(unsigned int cpu, bool present) 690 { 691 if (present) 692 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 693 else 694 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 695 } 696 697 void set_cpu_online(unsigned int cpu, bool online) 698 { 699 if (online) 700 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 701 else 702 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 703 } 704 705 void set_cpu_active(unsigned int cpu, bool active) 706 { 707 if (active) 708 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 709 else 710 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 711 } 712 713 void init_cpu_present(const struct cpumask *src) 714 { 715 cpumask_copy(to_cpumask(cpu_present_bits), src); 716 } 717 718 void init_cpu_possible(const struct cpumask *src) 719 { 720 cpumask_copy(to_cpumask(cpu_possible_bits), src); 721 } 722 723 void init_cpu_online(const struct cpumask *src) 724 { 725 cpumask_copy(to_cpumask(cpu_online_bits), src); 726 } 727