1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/export.h> 14 #include <linux/kthread.h> 15 #include <linux/stop_machine.h> 16 #include <linux/mutex.h> 17 #include <linux/gfp.h> 18 #include <linux/suspend.h> 19 20 #ifdef CONFIG_SMP 21 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 22 static DEFINE_MUTEX(cpu_add_remove_lock); 23 24 /* 25 * The following two API's must be used when attempting 26 * to serialize the updates to cpu_online_mask, cpu_present_mask. 27 */ 28 void cpu_maps_update_begin(void) 29 { 30 mutex_lock(&cpu_add_remove_lock); 31 } 32 33 void cpu_maps_update_done(void) 34 { 35 mutex_unlock(&cpu_add_remove_lock); 36 } 37 38 static RAW_NOTIFIER_HEAD(cpu_chain); 39 40 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 41 * Should always be manipulated under cpu_add_remove_lock 42 */ 43 static int cpu_hotplug_disabled; 44 45 #ifdef CONFIG_HOTPLUG_CPU 46 47 static struct { 48 struct task_struct *active_writer; 49 struct mutex lock; /* Synchronizes accesses to refcount, */ 50 /* 51 * Also blocks the new readers during 52 * an ongoing cpu hotplug operation. 53 */ 54 int refcount; 55 } cpu_hotplug = { 56 .active_writer = NULL, 57 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 58 .refcount = 0, 59 }; 60 61 void get_online_cpus(void) 62 { 63 might_sleep(); 64 if (cpu_hotplug.active_writer == current) 65 return; 66 mutex_lock(&cpu_hotplug.lock); 67 cpu_hotplug.refcount++; 68 mutex_unlock(&cpu_hotplug.lock); 69 70 } 71 EXPORT_SYMBOL_GPL(get_online_cpus); 72 73 void put_online_cpus(void) 74 { 75 if (cpu_hotplug.active_writer == current) 76 return; 77 mutex_lock(&cpu_hotplug.lock); 78 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 79 wake_up_process(cpu_hotplug.active_writer); 80 mutex_unlock(&cpu_hotplug.lock); 81 82 } 83 EXPORT_SYMBOL_GPL(put_online_cpus); 84 85 /* 86 * This ensures that the hotplug operation can begin only when the 87 * refcount goes to zero. 88 * 89 * Note that during a cpu-hotplug operation, the new readers, if any, 90 * will be blocked by the cpu_hotplug.lock 91 * 92 * Since cpu_hotplug_begin() is always called after invoking 93 * cpu_maps_update_begin(), we can be sure that only one writer is active. 94 * 95 * Note that theoretically, there is a possibility of a livelock: 96 * - Refcount goes to zero, last reader wakes up the sleeping 97 * writer. 98 * - Last reader unlocks the cpu_hotplug.lock. 99 * - A new reader arrives at this moment, bumps up the refcount. 100 * - The writer acquires the cpu_hotplug.lock finds the refcount 101 * non zero and goes to sleep again. 102 * 103 * However, this is very difficult to achieve in practice since 104 * get_online_cpus() not an api which is called all that often. 105 * 106 */ 107 static void cpu_hotplug_begin(void) 108 { 109 cpu_hotplug.active_writer = current; 110 111 for (;;) { 112 mutex_lock(&cpu_hotplug.lock); 113 if (likely(!cpu_hotplug.refcount)) 114 break; 115 __set_current_state(TASK_UNINTERRUPTIBLE); 116 mutex_unlock(&cpu_hotplug.lock); 117 schedule(); 118 } 119 } 120 121 static void cpu_hotplug_done(void) 122 { 123 cpu_hotplug.active_writer = NULL; 124 mutex_unlock(&cpu_hotplug.lock); 125 } 126 127 #else /* #if CONFIG_HOTPLUG_CPU */ 128 static void cpu_hotplug_begin(void) {} 129 static void cpu_hotplug_done(void) {} 130 #endif /* #else #if CONFIG_HOTPLUG_CPU */ 131 132 /* Need to know about CPUs going up/down? */ 133 int __ref register_cpu_notifier(struct notifier_block *nb) 134 { 135 int ret; 136 cpu_maps_update_begin(); 137 ret = raw_notifier_chain_register(&cpu_chain, nb); 138 cpu_maps_update_done(); 139 return ret; 140 } 141 142 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 143 int *nr_calls) 144 { 145 int ret; 146 147 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 148 nr_calls); 149 150 return notifier_to_errno(ret); 151 } 152 153 static int cpu_notify(unsigned long val, void *v) 154 { 155 return __cpu_notify(val, v, -1, NULL); 156 } 157 158 #ifdef CONFIG_HOTPLUG_CPU 159 160 static void cpu_notify_nofail(unsigned long val, void *v) 161 { 162 BUG_ON(cpu_notify(val, v)); 163 } 164 EXPORT_SYMBOL(register_cpu_notifier); 165 166 void __ref unregister_cpu_notifier(struct notifier_block *nb) 167 { 168 cpu_maps_update_begin(); 169 raw_notifier_chain_unregister(&cpu_chain, nb); 170 cpu_maps_update_done(); 171 } 172 EXPORT_SYMBOL(unregister_cpu_notifier); 173 174 static inline void check_for_tasks(int cpu) 175 { 176 struct task_struct *p; 177 178 write_lock_irq(&tasklist_lock); 179 for_each_process(p) { 180 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 181 (p->utime || p->stime)) 182 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 183 "(state = %ld, flags = %x)\n", 184 p->comm, task_pid_nr(p), cpu, 185 p->state, p->flags); 186 } 187 write_unlock_irq(&tasklist_lock); 188 } 189 190 struct take_cpu_down_param { 191 unsigned long mod; 192 void *hcpu; 193 }; 194 195 /* Take this CPU down. */ 196 static int __ref take_cpu_down(void *_param) 197 { 198 struct take_cpu_down_param *param = _param; 199 int err; 200 201 /* Ensure this CPU doesn't handle any more interrupts. */ 202 err = __cpu_disable(); 203 if (err < 0) 204 return err; 205 206 cpu_notify(CPU_DYING | param->mod, param->hcpu); 207 return 0; 208 } 209 210 /* Requires cpu_add_remove_lock to be held */ 211 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 212 { 213 int err, nr_calls = 0; 214 void *hcpu = (void *)(long)cpu; 215 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 216 struct take_cpu_down_param tcd_param = { 217 .mod = mod, 218 .hcpu = hcpu, 219 }; 220 221 if (num_online_cpus() == 1) 222 return -EBUSY; 223 224 if (!cpu_online(cpu)) 225 return -EINVAL; 226 227 cpu_hotplug_begin(); 228 229 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 230 if (err) { 231 nr_calls--; 232 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 233 printk("%s: attempt to take down CPU %u failed\n", 234 __func__, cpu); 235 goto out_release; 236 } 237 238 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 239 if (err) { 240 /* CPU didn't die: tell everyone. Can't complain. */ 241 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 242 243 goto out_release; 244 } 245 BUG_ON(cpu_online(cpu)); 246 247 /* 248 * The migration_call() CPU_DYING callback will have removed all 249 * runnable tasks from the cpu, there's only the idle task left now 250 * that the migration thread is done doing the stop_machine thing. 251 * 252 * Wait for the stop thread to go away. 253 */ 254 while (!idle_cpu(cpu)) 255 cpu_relax(); 256 257 /* This actually kills the CPU. */ 258 __cpu_die(cpu); 259 260 /* CPU is completely dead: tell everyone. Too late to complain. */ 261 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 262 263 check_for_tasks(cpu); 264 265 out_release: 266 cpu_hotplug_done(); 267 if (!err) 268 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 269 return err; 270 } 271 272 int __ref cpu_down(unsigned int cpu) 273 { 274 int err; 275 276 cpu_maps_update_begin(); 277 278 if (cpu_hotplug_disabled) { 279 err = -EBUSY; 280 goto out; 281 } 282 283 err = _cpu_down(cpu, 0); 284 285 out: 286 cpu_maps_update_done(); 287 return err; 288 } 289 EXPORT_SYMBOL(cpu_down); 290 #endif /*CONFIG_HOTPLUG_CPU*/ 291 292 /* Requires cpu_add_remove_lock to be held */ 293 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 294 { 295 int ret, nr_calls = 0; 296 void *hcpu = (void *)(long)cpu; 297 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 298 299 if (cpu_online(cpu) || !cpu_present(cpu)) 300 return -EINVAL; 301 302 cpu_hotplug_begin(); 303 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 304 if (ret) { 305 nr_calls--; 306 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", 307 __func__, cpu); 308 goto out_notify; 309 } 310 311 /* Arch-specific enabling code. */ 312 ret = __cpu_up(cpu); 313 if (ret != 0) 314 goto out_notify; 315 BUG_ON(!cpu_online(cpu)); 316 317 /* Now call notifier in preparation. */ 318 cpu_notify(CPU_ONLINE | mod, hcpu); 319 320 out_notify: 321 if (ret != 0) 322 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 323 cpu_hotplug_done(); 324 325 return ret; 326 } 327 328 int __cpuinit cpu_up(unsigned int cpu) 329 { 330 int err = 0; 331 332 #ifdef CONFIG_MEMORY_HOTPLUG 333 int nid; 334 pg_data_t *pgdat; 335 #endif 336 337 if (!cpu_possible(cpu)) { 338 printk(KERN_ERR "can't online cpu %d because it is not " 339 "configured as may-hotadd at boot time\n", cpu); 340 #if defined(CONFIG_IA64) 341 printk(KERN_ERR "please check additional_cpus= boot " 342 "parameter\n"); 343 #endif 344 return -EINVAL; 345 } 346 347 #ifdef CONFIG_MEMORY_HOTPLUG 348 nid = cpu_to_node(cpu); 349 if (!node_online(nid)) { 350 err = mem_online_node(nid); 351 if (err) 352 return err; 353 } 354 355 pgdat = NODE_DATA(nid); 356 if (!pgdat) { 357 printk(KERN_ERR 358 "Can't online cpu %d due to NULL pgdat\n", cpu); 359 return -ENOMEM; 360 } 361 362 if (pgdat->node_zonelists->_zonerefs->zone == NULL) { 363 mutex_lock(&zonelists_mutex); 364 build_all_zonelists(NULL); 365 mutex_unlock(&zonelists_mutex); 366 } 367 #endif 368 369 cpu_maps_update_begin(); 370 371 if (cpu_hotplug_disabled) { 372 err = -EBUSY; 373 goto out; 374 } 375 376 err = _cpu_up(cpu, 0); 377 378 out: 379 cpu_maps_update_done(); 380 return err; 381 } 382 EXPORT_SYMBOL_GPL(cpu_up); 383 384 #ifdef CONFIG_PM_SLEEP_SMP 385 static cpumask_var_t frozen_cpus; 386 387 void __weak arch_disable_nonboot_cpus_begin(void) 388 { 389 } 390 391 void __weak arch_disable_nonboot_cpus_end(void) 392 { 393 } 394 395 int disable_nonboot_cpus(void) 396 { 397 int cpu, first_cpu, error = 0; 398 399 cpu_maps_update_begin(); 400 first_cpu = cpumask_first(cpu_online_mask); 401 /* 402 * We take down all of the non-boot CPUs in one shot to avoid races 403 * with the userspace trying to use the CPU hotplug at the same time 404 */ 405 cpumask_clear(frozen_cpus); 406 arch_disable_nonboot_cpus_begin(); 407 408 printk("Disabling non-boot CPUs ...\n"); 409 for_each_online_cpu(cpu) { 410 if (cpu == first_cpu) 411 continue; 412 error = _cpu_down(cpu, 1); 413 if (!error) 414 cpumask_set_cpu(cpu, frozen_cpus); 415 else { 416 printk(KERN_ERR "Error taking CPU%d down: %d\n", 417 cpu, error); 418 break; 419 } 420 } 421 422 arch_disable_nonboot_cpus_end(); 423 424 if (!error) { 425 BUG_ON(num_online_cpus() > 1); 426 /* Make sure the CPUs won't be enabled by someone else */ 427 cpu_hotplug_disabled = 1; 428 } else { 429 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 430 } 431 cpu_maps_update_done(); 432 return error; 433 } 434 435 void __weak arch_enable_nonboot_cpus_begin(void) 436 { 437 } 438 439 void __weak arch_enable_nonboot_cpus_end(void) 440 { 441 } 442 443 void __ref enable_nonboot_cpus(void) 444 { 445 int cpu, error; 446 447 /* Allow everyone to use the CPU hotplug again */ 448 cpu_maps_update_begin(); 449 cpu_hotplug_disabled = 0; 450 if (cpumask_empty(frozen_cpus)) 451 goto out; 452 453 printk(KERN_INFO "Enabling non-boot CPUs ...\n"); 454 455 arch_enable_nonboot_cpus_begin(); 456 457 for_each_cpu(cpu, frozen_cpus) { 458 error = _cpu_up(cpu, 1); 459 if (!error) { 460 printk(KERN_INFO "CPU%d is up\n", cpu); 461 continue; 462 } 463 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 464 } 465 466 arch_enable_nonboot_cpus_end(); 467 468 cpumask_clear(frozen_cpus); 469 out: 470 cpu_maps_update_done(); 471 } 472 473 static int __init alloc_frozen_cpus(void) 474 { 475 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 476 return -ENOMEM; 477 return 0; 478 } 479 core_initcall(alloc_frozen_cpus); 480 481 /* 482 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU 483 * hotplug when tasks are about to be frozen. Also, don't allow the freezer 484 * to continue until any currently running CPU hotplug operation gets 485 * completed. 486 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the 487 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular 488 * CPU hotplug path and released only after it is complete. Thus, we 489 * (and hence the freezer) will block here until any currently running CPU 490 * hotplug operation gets completed. 491 */ 492 void cpu_hotplug_disable_before_freeze(void) 493 { 494 cpu_maps_update_begin(); 495 cpu_hotplug_disabled = 1; 496 cpu_maps_update_done(); 497 } 498 499 500 /* 501 * When tasks have been thawed, re-enable regular CPU hotplug (which had been 502 * disabled while beginning to freeze tasks). 503 */ 504 void cpu_hotplug_enable_after_thaw(void) 505 { 506 cpu_maps_update_begin(); 507 cpu_hotplug_disabled = 0; 508 cpu_maps_update_done(); 509 } 510 511 /* 512 * When callbacks for CPU hotplug notifications are being executed, we must 513 * ensure that the state of the system with respect to the tasks being frozen 514 * or not, as reported by the notification, remains unchanged *throughout the 515 * duration* of the execution of the callbacks. 516 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 517 * 518 * This synchronization is implemented by mutually excluding regular CPU 519 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 520 * Hibernate notifications. 521 */ 522 static int 523 cpu_hotplug_pm_callback(struct notifier_block *nb, 524 unsigned long action, void *ptr) 525 { 526 switch (action) { 527 528 case PM_SUSPEND_PREPARE: 529 case PM_HIBERNATION_PREPARE: 530 cpu_hotplug_disable_before_freeze(); 531 break; 532 533 case PM_POST_SUSPEND: 534 case PM_POST_HIBERNATION: 535 cpu_hotplug_enable_after_thaw(); 536 break; 537 538 default: 539 return NOTIFY_DONE; 540 } 541 542 return NOTIFY_OK; 543 } 544 545 546 static int __init cpu_hotplug_pm_sync_init(void) 547 { 548 pm_notifier(cpu_hotplug_pm_callback, 0); 549 return 0; 550 } 551 core_initcall(cpu_hotplug_pm_sync_init); 552 553 #endif /* CONFIG_PM_SLEEP_SMP */ 554 555 /** 556 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 557 * @cpu: cpu that just started 558 * 559 * This function calls the cpu_chain notifiers with CPU_STARTING. 560 * It must be called by the arch code on the new cpu, before the new cpu 561 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 562 */ 563 void __cpuinit notify_cpu_starting(unsigned int cpu) 564 { 565 unsigned long val = CPU_STARTING; 566 567 #ifdef CONFIG_PM_SLEEP_SMP 568 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 569 val = CPU_STARTING_FROZEN; 570 #endif /* CONFIG_PM_SLEEP_SMP */ 571 cpu_notify(val, (void *)(long)cpu); 572 } 573 574 #endif /* CONFIG_SMP */ 575 576 /* 577 * cpu_bit_bitmap[] is a special, "compressed" data structure that 578 * represents all NR_CPUS bits binary values of 1<<nr. 579 * 580 * It is used by cpumask_of() to get a constant address to a CPU 581 * mask value that has a single bit set only. 582 */ 583 584 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 585 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 586 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 587 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 588 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 589 590 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 591 592 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 593 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 594 #if BITS_PER_LONG > 32 595 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 596 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 597 #endif 598 }; 599 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 600 601 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 602 EXPORT_SYMBOL(cpu_all_bits); 603 604 #ifdef CONFIG_INIT_ALL_POSSIBLE 605 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 606 = CPU_BITS_ALL; 607 #else 608 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 609 #endif 610 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 611 EXPORT_SYMBOL(cpu_possible_mask); 612 613 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 614 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 615 EXPORT_SYMBOL(cpu_online_mask); 616 617 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 618 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 619 EXPORT_SYMBOL(cpu_present_mask); 620 621 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 622 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 623 EXPORT_SYMBOL(cpu_active_mask); 624 625 void set_cpu_possible(unsigned int cpu, bool possible) 626 { 627 if (possible) 628 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 629 else 630 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 631 } 632 633 void set_cpu_present(unsigned int cpu, bool present) 634 { 635 if (present) 636 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 637 else 638 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 639 } 640 641 void set_cpu_online(unsigned int cpu, bool online) 642 { 643 if (online) 644 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 645 else 646 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 647 } 648 649 void set_cpu_active(unsigned int cpu, bool active) 650 { 651 if (active) 652 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 653 else 654 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 655 } 656 657 void init_cpu_present(const struct cpumask *src) 658 { 659 cpumask_copy(to_cpumask(cpu_present_bits), src); 660 } 661 662 void init_cpu_possible(const struct cpumask *src) 663 { 664 cpumask_copy(to_cpumask(cpu_possible_bits), src); 665 } 666 667 void init_cpu_online(const struct cpumask *src) 668 { 669 cpumask_copy(to_cpumask(cpu_online_bits), src); 670 } 671