1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/oom.h> 14 #include <linux/rcupdate.h> 15 #include <linux/export.h> 16 #include <linux/bug.h> 17 #include <linux/kthread.h> 18 #include <linux/stop_machine.h> 19 #include <linux/mutex.h> 20 #include <linux/gfp.h> 21 #include <linux/suspend.h> 22 23 #include "smpboot.h" 24 25 #ifdef CONFIG_SMP 26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 27 static DEFINE_MUTEX(cpu_add_remove_lock); 28 29 /* 30 * The following two API's must be used when attempting 31 * to serialize the updates to cpu_online_mask, cpu_present_mask. 32 */ 33 void cpu_maps_update_begin(void) 34 { 35 mutex_lock(&cpu_add_remove_lock); 36 } 37 38 void cpu_maps_update_done(void) 39 { 40 mutex_unlock(&cpu_add_remove_lock); 41 } 42 43 static RAW_NOTIFIER_HEAD(cpu_chain); 44 45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 46 * Should always be manipulated under cpu_add_remove_lock 47 */ 48 static int cpu_hotplug_disabled; 49 50 #ifdef CONFIG_HOTPLUG_CPU 51 52 static struct { 53 struct task_struct *active_writer; 54 struct mutex lock; /* Synchronizes accesses to refcount, */ 55 /* 56 * Also blocks the new readers during 57 * an ongoing cpu hotplug operation. 58 */ 59 int refcount; 60 } cpu_hotplug = { 61 .active_writer = NULL, 62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 63 .refcount = 0, 64 }; 65 66 void get_online_cpus(void) 67 { 68 might_sleep(); 69 if (cpu_hotplug.active_writer == current) 70 return; 71 mutex_lock(&cpu_hotplug.lock); 72 cpu_hotplug.refcount++; 73 mutex_unlock(&cpu_hotplug.lock); 74 75 } 76 EXPORT_SYMBOL_GPL(get_online_cpus); 77 78 void put_online_cpus(void) 79 { 80 if (cpu_hotplug.active_writer == current) 81 return; 82 mutex_lock(&cpu_hotplug.lock); 83 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 84 wake_up_process(cpu_hotplug.active_writer); 85 mutex_unlock(&cpu_hotplug.lock); 86 87 } 88 EXPORT_SYMBOL_GPL(put_online_cpus); 89 90 /* 91 * This ensures that the hotplug operation can begin only when the 92 * refcount goes to zero. 93 * 94 * Note that during a cpu-hotplug operation, the new readers, if any, 95 * will be blocked by the cpu_hotplug.lock 96 * 97 * Since cpu_hotplug_begin() is always called after invoking 98 * cpu_maps_update_begin(), we can be sure that only one writer is active. 99 * 100 * Note that theoretically, there is a possibility of a livelock: 101 * - Refcount goes to zero, last reader wakes up the sleeping 102 * writer. 103 * - Last reader unlocks the cpu_hotplug.lock. 104 * - A new reader arrives at this moment, bumps up the refcount. 105 * - The writer acquires the cpu_hotplug.lock finds the refcount 106 * non zero and goes to sleep again. 107 * 108 * However, this is very difficult to achieve in practice since 109 * get_online_cpus() not an api which is called all that often. 110 * 111 */ 112 static void cpu_hotplug_begin(void) 113 { 114 cpu_hotplug.active_writer = current; 115 116 for (;;) { 117 mutex_lock(&cpu_hotplug.lock); 118 if (likely(!cpu_hotplug.refcount)) 119 break; 120 __set_current_state(TASK_UNINTERRUPTIBLE); 121 mutex_unlock(&cpu_hotplug.lock); 122 schedule(); 123 } 124 } 125 126 static void cpu_hotplug_done(void) 127 { 128 cpu_hotplug.active_writer = NULL; 129 mutex_unlock(&cpu_hotplug.lock); 130 } 131 132 #else /* #if CONFIG_HOTPLUG_CPU */ 133 static void cpu_hotplug_begin(void) {} 134 static void cpu_hotplug_done(void) {} 135 #endif /* #else #if CONFIG_HOTPLUG_CPU */ 136 137 /* Need to know about CPUs going up/down? */ 138 int __ref register_cpu_notifier(struct notifier_block *nb) 139 { 140 int ret; 141 cpu_maps_update_begin(); 142 ret = raw_notifier_chain_register(&cpu_chain, nb); 143 cpu_maps_update_done(); 144 return ret; 145 } 146 147 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 148 int *nr_calls) 149 { 150 int ret; 151 152 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 153 nr_calls); 154 155 return notifier_to_errno(ret); 156 } 157 158 static int cpu_notify(unsigned long val, void *v) 159 { 160 return __cpu_notify(val, v, -1, NULL); 161 } 162 163 #ifdef CONFIG_HOTPLUG_CPU 164 165 static void cpu_notify_nofail(unsigned long val, void *v) 166 { 167 BUG_ON(cpu_notify(val, v)); 168 } 169 EXPORT_SYMBOL(register_cpu_notifier); 170 171 void __ref unregister_cpu_notifier(struct notifier_block *nb) 172 { 173 cpu_maps_update_begin(); 174 raw_notifier_chain_unregister(&cpu_chain, nb); 175 cpu_maps_update_done(); 176 } 177 EXPORT_SYMBOL(unregister_cpu_notifier); 178 179 /** 180 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 181 * @cpu: a CPU id 182 * 183 * This function walks all processes, finds a valid mm struct for each one and 184 * then clears a corresponding bit in mm's cpumask. While this all sounds 185 * trivial, there are various non-obvious corner cases, which this function 186 * tries to solve in a safe manner. 187 * 188 * Also note that the function uses a somewhat relaxed locking scheme, so it may 189 * be called only for an already offlined CPU. 190 */ 191 void clear_tasks_mm_cpumask(int cpu) 192 { 193 struct task_struct *p; 194 195 /* 196 * This function is called after the cpu is taken down and marked 197 * offline, so its not like new tasks will ever get this cpu set in 198 * their mm mask. -- Peter Zijlstra 199 * Thus, we may use rcu_read_lock() here, instead of grabbing 200 * full-fledged tasklist_lock. 201 */ 202 WARN_ON(cpu_online(cpu)); 203 rcu_read_lock(); 204 for_each_process(p) { 205 struct task_struct *t; 206 207 /* 208 * Main thread might exit, but other threads may still have 209 * a valid mm. Find one. 210 */ 211 t = find_lock_task_mm(p); 212 if (!t) 213 continue; 214 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 215 task_unlock(t); 216 } 217 rcu_read_unlock(); 218 } 219 220 static inline void check_for_tasks(int cpu) 221 { 222 struct task_struct *p; 223 224 write_lock_irq(&tasklist_lock); 225 for_each_process(p) { 226 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 227 (p->utime || p->stime)) 228 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 229 "(state = %ld, flags = %x)\n", 230 p->comm, task_pid_nr(p), cpu, 231 p->state, p->flags); 232 } 233 write_unlock_irq(&tasklist_lock); 234 } 235 236 struct take_cpu_down_param { 237 unsigned long mod; 238 void *hcpu; 239 }; 240 241 /* Take this CPU down. */ 242 static int __ref take_cpu_down(void *_param) 243 { 244 struct take_cpu_down_param *param = _param; 245 int err; 246 247 /* Ensure this CPU doesn't handle any more interrupts. */ 248 err = __cpu_disable(); 249 if (err < 0) 250 return err; 251 252 cpu_notify(CPU_DYING | param->mod, param->hcpu); 253 return 0; 254 } 255 256 /* Requires cpu_add_remove_lock to be held */ 257 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 258 { 259 int err, nr_calls = 0; 260 void *hcpu = (void *)(long)cpu; 261 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 262 struct take_cpu_down_param tcd_param = { 263 .mod = mod, 264 .hcpu = hcpu, 265 }; 266 267 if (num_online_cpus() == 1) 268 return -EBUSY; 269 270 if (!cpu_online(cpu)) 271 return -EINVAL; 272 273 cpu_hotplug_begin(); 274 275 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 276 if (err) { 277 nr_calls--; 278 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 279 printk("%s: attempt to take down CPU %u failed\n", 280 __func__, cpu); 281 goto out_release; 282 } 283 smpboot_park_threads(cpu); 284 285 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 286 if (err) { 287 /* CPU didn't die: tell everyone. Can't complain. */ 288 smpboot_unpark_threads(cpu); 289 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 290 goto out_release; 291 } 292 BUG_ON(cpu_online(cpu)); 293 294 /* 295 * The migration_call() CPU_DYING callback will have removed all 296 * runnable tasks from the cpu, there's only the idle task left now 297 * that the migration thread is done doing the stop_machine thing. 298 * 299 * Wait for the stop thread to go away. 300 */ 301 while (!idle_cpu(cpu)) 302 cpu_relax(); 303 304 /* This actually kills the CPU. */ 305 __cpu_die(cpu); 306 307 /* CPU is completely dead: tell everyone. Too late to complain. */ 308 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 309 310 check_for_tasks(cpu); 311 312 out_release: 313 cpu_hotplug_done(); 314 if (!err) 315 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 316 return err; 317 } 318 319 int __ref cpu_down(unsigned int cpu) 320 { 321 int err; 322 323 cpu_maps_update_begin(); 324 325 if (cpu_hotplug_disabled) { 326 err = -EBUSY; 327 goto out; 328 } 329 330 err = _cpu_down(cpu, 0); 331 332 out: 333 cpu_maps_update_done(); 334 return err; 335 } 336 EXPORT_SYMBOL(cpu_down); 337 #endif /*CONFIG_HOTPLUG_CPU*/ 338 339 /* Requires cpu_add_remove_lock to be held */ 340 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 341 { 342 int ret, nr_calls = 0; 343 void *hcpu = (void *)(long)cpu; 344 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 345 struct task_struct *idle; 346 347 if (cpu_online(cpu) || !cpu_present(cpu)) 348 return -EINVAL; 349 350 cpu_hotplug_begin(); 351 352 idle = idle_thread_get(cpu); 353 if (IS_ERR(idle)) { 354 ret = PTR_ERR(idle); 355 goto out; 356 } 357 358 ret = smpboot_create_threads(cpu); 359 if (ret) 360 goto out; 361 362 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 363 if (ret) { 364 nr_calls--; 365 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", 366 __func__, cpu); 367 goto out_notify; 368 } 369 370 /* Arch-specific enabling code. */ 371 ret = __cpu_up(cpu, idle); 372 if (ret != 0) 373 goto out_notify; 374 BUG_ON(!cpu_online(cpu)); 375 376 /* Wake the per cpu threads */ 377 smpboot_unpark_threads(cpu); 378 379 /* Now call notifier in preparation. */ 380 cpu_notify(CPU_ONLINE | mod, hcpu); 381 382 out_notify: 383 if (ret != 0) 384 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 385 out: 386 cpu_hotplug_done(); 387 388 return ret; 389 } 390 391 int __cpuinit cpu_up(unsigned int cpu) 392 { 393 int err = 0; 394 395 #ifdef CONFIG_MEMORY_HOTPLUG 396 int nid; 397 pg_data_t *pgdat; 398 #endif 399 400 if (!cpu_possible(cpu)) { 401 printk(KERN_ERR "can't online cpu %d because it is not " 402 "configured as may-hotadd at boot time\n", cpu); 403 #if defined(CONFIG_IA64) 404 printk(KERN_ERR "please check additional_cpus= boot " 405 "parameter\n"); 406 #endif 407 return -EINVAL; 408 } 409 410 #ifdef CONFIG_MEMORY_HOTPLUG 411 nid = cpu_to_node(cpu); 412 if (!node_online(nid)) { 413 err = mem_online_node(nid); 414 if (err) 415 return err; 416 } 417 418 pgdat = NODE_DATA(nid); 419 if (!pgdat) { 420 printk(KERN_ERR 421 "Can't online cpu %d due to NULL pgdat\n", cpu); 422 return -ENOMEM; 423 } 424 425 if (pgdat->node_zonelists->_zonerefs->zone == NULL) { 426 mutex_lock(&zonelists_mutex); 427 build_all_zonelists(NULL, NULL); 428 mutex_unlock(&zonelists_mutex); 429 } 430 #endif 431 432 cpu_maps_update_begin(); 433 434 if (cpu_hotplug_disabled) { 435 err = -EBUSY; 436 goto out; 437 } 438 439 err = _cpu_up(cpu, 0); 440 441 out: 442 cpu_maps_update_done(); 443 return err; 444 } 445 EXPORT_SYMBOL_GPL(cpu_up); 446 447 #ifdef CONFIG_PM_SLEEP_SMP 448 static cpumask_var_t frozen_cpus; 449 450 int disable_nonboot_cpus(void) 451 { 452 int cpu, first_cpu, error = 0; 453 454 cpu_maps_update_begin(); 455 first_cpu = cpumask_first(cpu_online_mask); 456 /* 457 * We take down all of the non-boot CPUs in one shot to avoid races 458 * with the userspace trying to use the CPU hotplug at the same time 459 */ 460 cpumask_clear(frozen_cpus); 461 462 printk("Disabling non-boot CPUs ...\n"); 463 for_each_online_cpu(cpu) { 464 if (cpu == first_cpu) 465 continue; 466 error = _cpu_down(cpu, 1); 467 if (!error) 468 cpumask_set_cpu(cpu, frozen_cpus); 469 else { 470 printk(KERN_ERR "Error taking CPU%d down: %d\n", 471 cpu, error); 472 break; 473 } 474 } 475 476 if (!error) { 477 BUG_ON(num_online_cpus() > 1); 478 /* Make sure the CPUs won't be enabled by someone else */ 479 cpu_hotplug_disabled = 1; 480 } else { 481 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 482 } 483 cpu_maps_update_done(); 484 return error; 485 } 486 487 void __weak arch_enable_nonboot_cpus_begin(void) 488 { 489 } 490 491 void __weak arch_enable_nonboot_cpus_end(void) 492 { 493 } 494 495 void __ref enable_nonboot_cpus(void) 496 { 497 int cpu, error; 498 499 /* Allow everyone to use the CPU hotplug again */ 500 cpu_maps_update_begin(); 501 cpu_hotplug_disabled = 0; 502 if (cpumask_empty(frozen_cpus)) 503 goto out; 504 505 printk(KERN_INFO "Enabling non-boot CPUs ...\n"); 506 507 arch_enable_nonboot_cpus_begin(); 508 509 for_each_cpu(cpu, frozen_cpus) { 510 error = _cpu_up(cpu, 1); 511 if (!error) { 512 printk(KERN_INFO "CPU%d is up\n", cpu); 513 continue; 514 } 515 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 516 } 517 518 arch_enable_nonboot_cpus_end(); 519 520 cpumask_clear(frozen_cpus); 521 out: 522 cpu_maps_update_done(); 523 } 524 525 static int __init alloc_frozen_cpus(void) 526 { 527 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 528 return -ENOMEM; 529 return 0; 530 } 531 core_initcall(alloc_frozen_cpus); 532 533 /* 534 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU 535 * hotplug when tasks are about to be frozen. Also, don't allow the freezer 536 * to continue until any currently running CPU hotplug operation gets 537 * completed. 538 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the 539 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular 540 * CPU hotplug path and released only after it is complete. Thus, we 541 * (and hence the freezer) will block here until any currently running CPU 542 * hotplug operation gets completed. 543 */ 544 void cpu_hotplug_disable_before_freeze(void) 545 { 546 cpu_maps_update_begin(); 547 cpu_hotplug_disabled = 1; 548 cpu_maps_update_done(); 549 } 550 551 552 /* 553 * When tasks have been thawed, re-enable regular CPU hotplug (which had been 554 * disabled while beginning to freeze tasks). 555 */ 556 void cpu_hotplug_enable_after_thaw(void) 557 { 558 cpu_maps_update_begin(); 559 cpu_hotplug_disabled = 0; 560 cpu_maps_update_done(); 561 } 562 563 /* 564 * When callbacks for CPU hotplug notifications are being executed, we must 565 * ensure that the state of the system with respect to the tasks being frozen 566 * or not, as reported by the notification, remains unchanged *throughout the 567 * duration* of the execution of the callbacks. 568 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 569 * 570 * This synchronization is implemented by mutually excluding regular CPU 571 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 572 * Hibernate notifications. 573 */ 574 static int 575 cpu_hotplug_pm_callback(struct notifier_block *nb, 576 unsigned long action, void *ptr) 577 { 578 switch (action) { 579 580 case PM_SUSPEND_PREPARE: 581 case PM_HIBERNATION_PREPARE: 582 cpu_hotplug_disable_before_freeze(); 583 break; 584 585 case PM_POST_SUSPEND: 586 case PM_POST_HIBERNATION: 587 cpu_hotplug_enable_after_thaw(); 588 break; 589 590 default: 591 return NOTIFY_DONE; 592 } 593 594 return NOTIFY_OK; 595 } 596 597 598 static int __init cpu_hotplug_pm_sync_init(void) 599 { 600 pm_notifier(cpu_hotplug_pm_callback, 0); 601 return 0; 602 } 603 core_initcall(cpu_hotplug_pm_sync_init); 604 605 #endif /* CONFIG_PM_SLEEP_SMP */ 606 607 /** 608 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 609 * @cpu: cpu that just started 610 * 611 * This function calls the cpu_chain notifiers with CPU_STARTING. 612 * It must be called by the arch code on the new cpu, before the new cpu 613 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 614 */ 615 void __cpuinit notify_cpu_starting(unsigned int cpu) 616 { 617 unsigned long val = CPU_STARTING; 618 619 #ifdef CONFIG_PM_SLEEP_SMP 620 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 621 val = CPU_STARTING_FROZEN; 622 #endif /* CONFIG_PM_SLEEP_SMP */ 623 cpu_notify(val, (void *)(long)cpu); 624 } 625 626 #endif /* CONFIG_SMP */ 627 628 /* 629 * cpu_bit_bitmap[] is a special, "compressed" data structure that 630 * represents all NR_CPUS bits binary values of 1<<nr. 631 * 632 * It is used by cpumask_of() to get a constant address to a CPU 633 * mask value that has a single bit set only. 634 */ 635 636 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 637 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 638 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 639 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 640 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 641 642 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 643 644 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 645 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 646 #if BITS_PER_LONG > 32 647 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 648 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 649 #endif 650 }; 651 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 652 653 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 654 EXPORT_SYMBOL(cpu_all_bits); 655 656 #ifdef CONFIG_INIT_ALL_POSSIBLE 657 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 658 = CPU_BITS_ALL; 659 #else 660 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 661 #endif 662 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 663 EXPORT_SYMBOL(cpu_possible_mask); 664 665 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 666 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 667 EXPORT_SYMBOL(cpu_online_mask); 668 669 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 670 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 671 EXPORT_SYMBOL(cpu_present_mask); 672 673 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 674 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 675 EXPORT_SYMBOL(cpu_active_mask); 676 677 void set_cpu_possible(unsigned int cpu, bool possible) 678 { 679 if (possible) 680 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 681 else 682 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 683 } 684 685 void set_cpu_present(unsigned int cpu, bool present) 686 { 687 if (present) 688 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 689 else 690 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 691 } 692 693 void set_cpu_online(unsigned int cpu, bool online) 694 { 695 if (online) 696 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 697 else 698 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 699 } 700 701 void set_cpu_active(unsigned int cpu, bool active) 702 { 703 if (active) 704 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 705 else 706 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 707 } 708 709 void init_cpu_present(const struct cpumask *src) 710 { 711 cpumask_copy(to_cpumask(cpu_present_bits), src); 712 } 713 714 void init_cpu_possible(const struct cpumask *src) 715 { 716 cpumask_copy(to_cpumask(cpu_possible_bits), src); 717 } 718 719 void init_cpu_online(const struct cpumask *src) 720 { 721 cpumask_copy(to_cpumask(cpu_online_bits), src); 722 } 723