1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/reboot.c 4 * 5 * Copyright (C) 2013 Linus Torvalds 6 */ 7 8 #define pr_fmt(fmt) "reboot: " fmt 9 10 #include <linux/atomic.h> 11 #include <linux/ctype.h> 12 #include <linux/export.h> 13 #include <linux/kexec.h> 14 #include <linux/kmod.h> 15 #include <linux/kmsg_dump.h> 16 #include <linux/reboot.h> 17 #include <linux/suspend.h> 18 #include <linux/syscalls.h> 19 #include <linux/syscore_ops.h> 20 #include <linux/uaccess.h> 21 22 /* 23 * this indicates whether you can reboot with ctrl-alt-del: the default is yes 24 */ 25 26 static int C_A_D = 1; 27 struct pid *cad_pid; 28 EXPORT_SYMBOL(cad_pid); 29 30 #if defined(CONFIG_ARM) 31 #define DEFAULT_REBOOT_MODE = REBOOT_HARD 32 #else 33 #define DEFAULT_REBOOT_MODE 34 #endif 35 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; 36 EXPORT_SYMBOL_GPL(reboot_mode); 37 enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED; 38 39 /* 40 * This variable is used privately to keep track of whether or not 41 * reboot_type is still set to its default value (i.e., reboot= hasn't 42 * been set on the command line). This is needed so that we can 43 * suppress DMI scanning for reboot quirks. Without it, it's 44 * impossible to override a faulty reboot quirk without recompiling. 45 */ 46 int reboot_default = 1; 47 int reboot_cpu; 48 enum reboot_type reboot_type = BOOT_ACPI; 49 int reboot_force; 50 51 struct sys_off_handler { 52 struct notifier_block nb; 53 int (*sys_off_cb)(struct sys_off_data *data); 54 void *cb_data; 55 enum sys_off_mode mode; 56 bool blocking; 57 void *list; 58 }; 59 60 /* 61 * Temporary stub that prevents linkage failure while we're in process 62 * of removing all uses of legacy pm_power_off() around the kernel. 63 */ 64 void __weak (*pm_power_off)(void); 65 66 /** 67 * emergency_restart - reboot the system 68 * 69 * Without shutting down any hardware or taking any locks 70 * reboot the system. This is called when we know we are in 71 * trouble so this is our best effort to reboot. This is 72 * safe to call in interrupt context. 73 */ 74 void emergency_restart(void) 75 { 76 kmsg_dump(KMSG_DUMP_EMERG); 77 machine_emergency_restart(); 78 } 79 EXPORT_SYMBOL_GPL(emergency_restart); 80 81 void kernel_restart_prepare(char *cmd) 82 { 83 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 84 system_state = SYSTEM_RESTART; 85 try_block_console_kthreads(10000); 86 usermodehelper_disable(); 87 device_shutdown(); 88 } 89 90 /** 91 * register_reboot_notifier - Register function to be called at reboot time 92 * @nb: Info about notifier function to be called 93 * 94 * Registers a function with the list of functions 95 * to be called at reboot time. 96 * 97 * Currently always returns zero, as blocking_notifier_chain_register() 98 * always returns zero. 99 */ 100 int register_reboot_notifier(struct notifier_block *nb) 101 { 102 return blocking_notifier_chain_register(&reboot_notifier_list, nb); 103 } 104 EXPORT_SYMBOL(register_reboot_notifier); 105 106 /** 107 * unregister_reboot_notifier - Unregister previously registered reboot notifier 108 * @nb: Hook to be unregistered 109 * 110 * Unregisters a previously registered reboot 111 * notifier function. 112 * 113 * Returns zero on success, or %-ENOENT on failure. 114 */ 115 int unregister_reboot_notifier(struct notifier_block *nb) 116 { 117 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); 118 } 119 EXPORT_SYMBOL(unregister_reboot_notifier); 120 121 static void devm_unregister_reboot_notifier(struct device *dev, void *res) 122 { 123 WARN_ON(unregister_reboot_notifier(*(struct notifier_block **)res)); 124 } 125 126 int devm_register_reboot_notifier(struct device *dev, struct notifier_block *nb) 127 { 128 struct notifier_block **rcnb; 129 int ret; 130 131 rcnb = devres_alloc(devm_unregister_reboot_notifier, 132 sizeof(*rcnb), GFP_KERNEL); 133 if (!rcnb) 134 return -ENOMEM; 135 136 ret = register_reboot_notifier(nb); 137 if (!ret) { 138 *rcnb = nb; 139 devres_add(dev, rcnb); 140 } else { 141 devres_free(rcnb); 142 } 143 144 return ret; 145 } 146 EXPORT_SYMBOL(devm_register_reboot_notifier); 147 148 /* 149 * Notifier list for kernel code which wants to be called 150 * to restart the system. 151 */ 152 static ATOMIC_NOTIFIER_HEAD(restart_handler_list); 153 154 /** 155 * register_restart_handler - Register function to be called to reset 156 * the system 157 * @nb: Info about handler function to be called 158 * @nb->priority: Handler priority. Handlers should follow the 159 * following guidelines for setting priorities. 160 * 0: Restart handler of last resort, 161 * with limited restart capabilities 162 * 128: Default restart handler; use if no other 163 * restart handler is expected to be available, 164 * and/or if restart functionality is 165 * sufficient to restart the entire system 166 * 255: Highest priority restart handler, will 167 * preempt all other restart handlers 168 * 169 * Registers a function with code to be called to restart the 170 * system. 171 * 172 * Registered functions will be called from machine_restart as last 173 * step of the restart sequence (if the architecture specific 174 * machine_restart function calls do_kernel_restart - see below 175 * for details). 176 * Registered functions are expected to restart the system immediately. 177 * If more than one function is registered, the restart handler priority 178 * selects which function will be called first. 179 * 180 * Restart handlers are expected to be registered from non-architecture 181 * code, typically from drivers. A typical use case would be a system 182 * where restart functionality is provided through a watchdog. Multiple 183 * restart handlers may exist; for example, one restart handler might 184 * restart the entire system, while another only restarts the CPU. 185 * In such cases, the restart handler which only restarts part of the 186 * hardware is expected to register with low priority to ensure that 187 * it only runs if no other means to restart the system is available. 188 * 189 * Currently always returns zero, as atomic_notifier_chain_register() 190 * always returns zero. 191 */ 192 int register_restart_handler(struct notifier_block *nb) 193 { 194 return atomic_notifier_chain_register(&restart_handler_list, nb); 195 } 196 EXPORT_SYMBOL(register_restart_handler); 197 198 /** 199 * unregister_restart_handler - Unregister previously registered 200 * restart handler 201 * @nb: Hook to be unregistered 202 * 203 * Unregisters a previously registered restart handler function. 204 * 205 * Returns zero on success, or %-ENOENT on failure. 206 */ 207 int unregister_restart_handler(struct notifier_block *nb) 208 { 209 return atomic_notifier_chain_unregister(&restart_handler_list, nb); 210 } 211 EXPORT_SYMBOL(unregister_restart_handler); 212 213 /** 214 * do_kernel_restart - Execute kernel restart handler call chain 215 * 216 * Calls functions registered with register_restart_handler. 217 * 218 * Expected to be called from machine_restart as last step of the restart 219 * sequence. 220 * 221 * Restarts the system immediately if a restart handler function has been 222 * registered. Otherwise does nothing. 223 */ 224 void do_kernel_restart(char *cmd) 225 { 226 atomic_notifier_call_chain(&restart_handler_list, reboot_mode, cmd); 227 } 228 229 void migrate_to_reboot_cpu(void) 230 { 231 /* The boot cpu is always logical cpu 0 */ 232 int cpu = reboot_cpu; 233 234 cpu_hotplug_disable(); 235 236 /* Make certain the cpu I'm about to reboot on is online */ 237 if (!cpu_online(cpu)) 238 cpu = cpumask_first(cpu_online_mask); 239 240 /* Prevent races with other tasks migrating this task */ 241 current->flags |= PF_NO_SETAFFINITY; 242 243 /* Make certain I only run on the appropriate processor */ 244 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 245 } 246 247 /** 248 * kernel_restart - reboot the system 249 * @cmd: pointer to buffer containing command to execute for restart 250 * or %NULL 251 * 252 * Shutdown everything and perform a clean reboot. 253 * This is not safe to call in interrupt context. 254 */ 255 void kernel_restart(char *cmd) 256 { 257 kernel_restart_prepare(cmd); 258 migrate_to_reboot_cpu(); 259 syscore_shutdown(); 260 if (!cmd) 261 pr_emerg("Restarting system\n"); 262 else 263 pr_emerg("Restarting system with command '%s'\n", cmd); 264 kmsg_dump(KMSG_DUMP_SHUTDOWN); 265 machine_restart(cmd); 266 } 267 EXPORT_SYMBOL_GPL(kernel_restart); 268 269 static void kernel_shutdown_prepare(enum system_states state) 270 { 271 blocking_notifier_call_chain(&reboot_notifier_list, 272 (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL); 273 system_state = state; 274 try_block_console_kthreads(10000); 275 usermodehelper_disable(); 276 device_shutdown(); 277 } 278 /** 279 * kernel_halt - halt the system 280 * 281 * Shutdown everything and perform a clean system halt. 282 */ 283 void kernel_halt(void) 284 { 285 kernel_shutdown_prepare(SYSTEM_HALT); 286 migrate_to_reboot_cpu(); 287 syscore_shutdown(); 288 pr_emerg("System halted\n"); 289 kmsg_dump(KMSG_DUMP_SHUTDOWN); 290 machine_halt(); 291 } 292 EXPORT_SYMBOL_GPL(kernel_halt); 293 294 /* 295 * Notifier list for kernel code which wants to be called 296 * to prepare system for power off. 297 */ 298 static BLOCKING_NOTIFIER_HEAD(power_off_prep_handler_list); 299 300 /* 301 * Notifier list for kernel code which wants to be called 302 * to power off system. 303 */ 304 static ATOMIC_NOTIFIER_HEAD(power_off_handler_list); 305 306 static int sys_off_notify(struct notifier_block *nb, 307 unsigned long mode, void *cmd) 308 { 309 struct sys_off_handler *handler; 310 struct sys_off_data data = {}; 311 312 handler = container_of(nb, struct sys_off_handler, nb); 313 data.cb_data = handler->cb_data; 314 data.mode = mode; 315 data.cmd = cmd; 316 317 return handler->sys_off_cb(&data); 318 } 319 320 static struct sys_off_handler platform_sys_off_handler; 321 322 static struct sys_off_handler *alloc_sys_off_handler(int priority) 323 { 324 struct sys_off_handler *handler; 325 gfp_t flags; 326 327 /* 328 * Platforms like m68k can't allocate sys_off handler dynamically 329 * at the early boot time because memory allocator isn't available yet. 330 */ 331 if (priority == SYS_OFF_PRIO_PLATFORM) { 332 handler = &platform_sys_off_handler; 333 if (handler->cb_data) 334 return ERR_PTR(-EBUSY); 335 } else { 336 if (system_state > SYSTEM_RUNNING) 337 flags = GFP_ATOMIC; 338 else 339 flags = GFP_KERNEL; 340 341 handler = kzalloc(sizeof(*handler), flags); 342 if (!handler) 343 return ERR_PTR(-ENOMEM); 344 } 345 346 return handler; 347 } 348 349 static void free_sys_off_handler(struct sys_off_handler *handler) 350 { 351 if (handler == &platform_sys_off_handler) 352 memset(handler, 0, sizeof(*handler)); 353 else 354 kfree(handler); 355 } 356 357 /** 358 * register_sys_off_handler - Register sys-off handler 359 * @mode: Sys-off mode 360 * @priority: Handler priority 361 * @callback: Callback function 362 * @cb_data: Callback argument 363 * 364 * Registers system power-off or restart handler that will be invoked 365 * at the step corresponding to the given sys-off mode. Handler's callback 366 * should return NOTIFY_DONE to permit execution of the next handler in 367 * the call chain or NOTIFY_STOP to break the chain (in error case for 368 * example). 369 * 370 * Multiple handlers can be registered at the default priority level. 371 * 372 * Only one handler can be registered at the non-default priority level, 373 * otherwise ERR_PTR(-EBUSY) is returned. 374 * 375 * Returns a new instance of struct sys_off_handler on success, or 376 * an ERR_PTR()-encoded error code otherwise. 377 */ 378 struct sys_off_handler * 379 register_sys_off_handler(enum sys_off_mode mode, 380 int priority, 381 int (*callback)(struct sys_off_data *data), 382 void *cb_data) 383 { 384 struct sys_off_handler *handler; 385 int err; 386 387 handler = alloc_sys_off_handler(priority); 388 if (IS_ERR(handler)) 389 return handler; 390 391 switch (mode) { 392 case SYS_OFF_MODE_POWER_OFF_PREPARE: 393 handler->list = &power_off_prep_handler_list; 394 handler->blocking = true; 395 break; 396 397 case SYS_OFF_MODE_POWER_OFF: 398 handler->list = &power_off_handler_list; 399 break; 400 401 case SYS_OFF_MODE_RESTART: 402 handler->list = &restart_handler_list; 403 break; 404 405 default: 406 free_sys_off_handler(handler); 407 return ERR_PTR(-EINVAL); 408 } 409 410 handler->nb.notifier_call = sys_off_notify; 411 handler->nb.priority = priority; 412 handler->sys_off_cb = callback; 413 handler->cb_data = cb_data; 414 handler->mode = mode; 415 416 if (handler->blocking) { 417 if (priority == SYS_OFF_PRIO_DEFAULT) 418 err = blocking_notifier_chain_register(handler->list, 419 &handler->nb); 420 else 421 err = blocking_notifier_chain_register_unique_prio(handler->list, 422 &handler->nb); 423 } else { 424 if (priority == SYS_OFF_PRIO_DEFAULT) 425 err = atomic_notifier_chain_register(handler->list, 426 &handler->nb); 427 else 428 err = atomic_notifier_chain_register_unique_prio(handler->list, 429 &handler->nb); 430 } 431 432 if (err) { 433 free_sys_off_handler(handler); 434 return ERR_PTR(err); 435 } 436 437 return handler; 438 } 439 EXPORT_SYMBOL_GPL(register_sys_off_handler); 440 441 /** 442 * unregister_sys_off_handler - Unregister sys-off handler 443 * @handler: Sys-off handler 444 * 445 * Unregisters given sys-off handler. 446 */ 447 void unregister_sys_off_handler(struct sys_off_handler *handler) 448 { 449 int err; 450 451 if (IS_ERR_OR_NULL(handler)) 452 return; 453 454 if (handler->blocking) 455 err = blocking_notifier_chain_unregister(handler->list, 456 &handler->nb); 457 else 458 err = atomic_notifier_chain_unregister(handler->list, 459 &handler->nb); 460 461 /* sanity check, shall never happen */ 462 WARN_ON(err); 463 464 free_sys_off_handler(handler); 465 } 466 EXPORT_SYMBOL_GPL(unregister_sys_off_handler); 467 468 static void devm_unregister_sys_off_handler(void *data) 469 { 470 struct sys_off_handler *handler = data; 471 472 unregister_sys_off_handler(handler); 473 } 474 475 /** 476 * devm_register_sys_off_handler - Register sys-off handler 477 * @dev: Device that registers handler 478 * @mode: Sys-off mode 479 * @priority: Handler priority 480 * @callback: Callback function 481 * @cb_data: Callback argument 482 * 483 * Registers resource-managed sys-off handler. 484 * 485 * Returns zero on success, or error code on failure. 486 */ 487 int devm_register_sys_off_handler(struct device *dev, 488 enum sys_off_mode mode, 489 int priority, 490 int (*callback)(struct sys_off_data *data), 491 void *cb_data) 492 { 493 struct sys_off_handler *handler; 494 495 handler = register_sys_off_handler(mode, priority, callback, cb_data); 496 if (IS_ERR(handler)) 497 return PTR_ERR(handler); 498 499 return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler, 500 handler); 501 } 502 EXPORT_SYMBOL_GPL(devm_register_sys_off_handler); 503 504 /** 505 * devm_register_power_off_handler - Register power-off handler 506 * @dev: Device that registers callback 507 * @callback: Callback function 508 * @cb_data: Callback's argument 509 * 510 * Registers resource-managed sys-off handler with a default priority 511 * and using power-off mode. 512 * 513 * Returns zero on success, or error code on failure. 514 */ 515 int devm_register_power_off_handler(struct device *dev, 516 int (*callback)(struct sys_off_data *data), 517 void *cb_data) 518 { 519 return devm_register_sys_off_handler(dev, 520 SYS_OFF_MODE_POWER_OFF, 521 SYS_OFF_PRIO_DEFAULT, 522 callback, cb_data); 523 } 524 EXPORT_SYMBOL_GPL(devm_register_power_off_handler); 525 526 /** 527 * devm_register_restart_handler - Register restart handler 528 * @dev: Device that registers callback 529 * @callback: Callback function 530 * @cb_data: Callback's argument 531 * 532 * Registers resource-managed sys-off handler with a default priority 533 * and using restart mode. 534 * 535 * Returns zero on success, or error code on failure. 536 */ 537 int devm_register_restart_handler(struct device *dev, 538 int (*callback)(struct sys_off_data *data), 539 void *cb_data) 540 { 541 return devm_register_sys_off_handler(dev, 542 SYS_OFF_MODE_RESTART, 543 SYS_OFF_PRIO_DEFAULT, 544 callback, cb_data); 545 } 546 EXPORT_SYMBOL_GPL(devm_register_restart_handler); 547 548 static struct sys_off_handler *platform_power_off_handler; 549 550 static int platform_power_off_notify(struct sys_off_data *data) 551 { 552 void (*platform_power_power_off_cb)(void) = data->cb_data; 553 554 platform_power_power_off_cb(); 555 556 return NOTIFY_DONE; 557 } 558 559 /** 560 * register_platform_power_off - Register platform-level power-off callback 561 * @power_off: Power-off callback 562 * 563 * Registers power-off callback that will be called as last step 564 * of the power-off sequence. This callback is expected to be invoked 565 * for the last resort. Only one platform power-off callback is allowed 566 * to be registered at a time. 567 * 568 * Returns zero on success, or error code on failure. 569 */ 570 int register_platform_power_off(void (*power_off)(void)) 571 { 572 struct sys_off_handler *handler; 573 574 handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, 575 SYS_OFF_PRIO_PLATFORM, 576 platform_power_off_notify, 577 power_off); 578 if (IS_ERR(handler)) 579 return PTR_ERR(handler); 580 581 platform_power_off_handler = handler; 582 583 return 0; 584 } 585 EXPORT_SYMBOL_GPL(register_platform_power_off); 586 587 /** 588 * unregister_platform_power_off - Unregister platform-level power-off callback 589 * @power_off: Power-off callback 590 * 591 * Unregisters previously registered platform power-off callback. 592 */ 593 void unregister_platform_power_off(void (*power_off)(void)) 594 { 595 if (platform_power_off_handler && 596 platform_power_off_handler->cb_data == power_off) { 597 unregister_sys_off_handler(platform_power_off_handler); 598 platform_power_off_handler = NULL; 599 } 600 } 601 EXPORT_SYMBOL_GPL(unregister_platform_power_off); 602 603 static int legacy_pm_power_off(struct sys_off_data *data) 604 { 605 if (pm_power_off) 606 pm_power_off(); 607 608 return NOTIFY_DONE; 609 } 610 611 static void do_kernel_power_off_prepare(void) 612 { 613 blocking_notifier_call_chain(&power_off_prep_handler_list, 0, NULL); 614 } 615 616 /** 617 * do_kernel_power_off - Execute kernel power-off handler call chain 618 * 619 * Expected to be called as last step of the power-off sequence. 620 * 621 * Powers off the system immediately if a power-off handler function has 622 * been registered. Otherwise does nothing. 623 */ 624 void do_kernel_power_off(void) 625 { 626 struct sys_off_handler *sys_off = NULL; 627 628 /* 629 * Register sys-off handlers for legacy PM callback. This allows 630 * legacy PM callbacks temporary co-exist with the new sys-off API. 631 * 632 * TODO: Remove legacy handlers once all legacy PM users will be 633 * switched to the sys-off based APIs. 634 */ 635 if (pm_power_off) 636 sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, 637 SYS_OFF_PRIO_DEFAULT, 638 legacy_pm_power_off, NULL); 639 640 atomic_notifier_call_chain(&power_off_handler_list, 0, NULL); 641 642 unregister_sys_off_handler(sys_off); 643 } 644 645 /** 646 * kernel_can_power_off - check whether system can be powered off 647 * 648 * Returns true if power-off handler is registered and system can be 649 * powered off, false otherwise. 650 */ 651 bool kernel_can_power_off(void) 652 { 653 return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) || 654 pm_power_off; 655 } 656 EXPORT_SYMBOL_GPL(kernel_can_power_off); 657 658 /** 659 * kernel_power_off - power_off the system 660 * 661 * Shutdown everything and perform a clean system power_off. 662 */ 663 void kernel_power_off(void) 664 { 665 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 666 do_kernel_power_off_prepare(); 667 migrate_to_reboot_cpu(); 668 syscore_shutdown(); 669 pr_emerg("Power down\n"); 670 kmsg_dump(KMSG_DUMP_SHUTDOWN); 671 machine_power_off(); 672 } 673 EXPORT_SYMBOL_GPL(kernel_power_off); 674 675 DEFINE_MUTEX(system_transition_mutex); 676 677 /* 678 * Reboot system call: for obvious reasons only root may call it, 679 * and even root needs to set up some magic numbers in the registers 680 * so that some mistake won't make this reboot the whole machine. 681 * You can also set the meaning of the ctrl-alt-del-key here. 682 * 683 * reboot doesn't sync: do that yourself before calling this. 684 */ 685 SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, 686 void __user *, arg) 687 { 688 struct pid_namespace *pid_ns = task_active_pid_ns(current); 689 char buffer[256]; 690 int ret = 0; 691 692 /* We only trust the superuser with rebooting the system. */ 693 if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT)) 694 return -EPERM; 695 696 /* For safety, we require "magic" arguments. */ 697 if (magic1 != LINUX_REBOOT_MAGIC1 || 698 (magic2 != LINUX_REBOOT_MAGIC2 && 699 magic2 != LINUX_REBOOT_MAGIC2A && 700 magic2 != LINUX_REBOOT_MAGIC2B && 701 magic2 != LINUX_REBOOT_MAGIC2C)) 702 return -EINVAL; 703 704 /* 705 * If pid namespaces are enabled and the current task is in a child 706 * pid_namespace, the command is handled by reboot_pid_ns() which will 707 * call do_exit(). 708 */ 709 ret = reboot_pid_ns(pid_ns, cmd); 710 if (ret) 711 return ret; 712 713 /* Instead of trying to make the power_off code look like 714 * halt when pm_power_off is not set do it the easy way. 715 */ 716 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off()) 717 cmd = LINUX_REBOOT_CMD_HALT; 718 719 mutex_lock(&system_transition_mutex); 720 switch (cmd) { 721 case LINUX_REBOOT_CMD_RESTART: 722 kernel_restart(NULL); 723 break; 724 725 case LINUX_REBOOT_CMD_CAD_ON: 726 C_A_D = 1; 727 break; 728 729 case LINUX_REBOOT_CMD_CAD_OFF: 730 C_A_D = 0; 731 break; 732 733 case LINUX_REBOOT_CMD_HALT: 734 kernel_halt(); 735 do_exit(0); 736 737 case LINUX_REBOOT_CMD_POWER_OFF: 738 kernel_power_off(); 739 do_exit(0); 740 break; 741 742 case LINUX_REBOOT_CMD_RESTART2: 743 ret = strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1); 744 if (ret < 0) { 745 ret = -EFAULT; 746 break; 747 } 748 buffer[sizeof(buffer) - 1] = '\0'; 749 750 kernel_restart(buffer); 751 break; 752 753 #ifdef CONFIG_KEXEC_CORE 754 case LINUX_REBOOT_CMD_KEXEC: 755 ret = kernel_kexec(); 756 break; 757 #endif 758 759 #ifdef CONFIG_HIBERNATION 760 case LINUX_REBOOT_CMD_SW_SUSPEND: 761 ret = hibernate(); 762 break; 763 #endif 764 765 default: 766 ret = -EINVAL; 767 break; 768 } 769 mutex_unlock(&system_transition_mutex); 770 return ret; 771 } 772 773 static void deferred_cad(struct work_struct *dummy) 774 { 775 kernel_restart(NULL); 776 } 777 778 /* 779 * This function gets called by ctrl-alt-del - ie the keyboard interrupt. 780 * As it's called within an interrupt, it may NOT sync: the only choice 781 * is whether to reboot at once, or just ignore the ctrl-alt-del. 782 */ 783 void ctrl_alt_del(void) 784 { 785 static DECLARE_WORK(cad_work, deferred_cad); 786 787 if (C_A_D) 788 schedule_work(&cad_work); 789 else 790 kill_cad_pid(SIGINT, 1); 791 } 792 793 #define POWEROFF_CMD_PATH_LEN 256 794 static char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; 795 static const char reboot_cmd[] = "/sbin/reboot"; 796 797 static int run_cmd(const char *cmd) 798 { 799 char **argv; 800 static char *envp[] = { 801 "HOME=/", 802 "PATH=/sbin:/bin:/usr/sbin:/usr/bin", 803 NULL 804 }; 805 int ret; 806 argv = argv_split(GFP_KERNEL, cmd, NULL); 807 if (argv) { 808 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 809 argv_free(argv); 810 } else { 811 ret = -ENOMEM; 812 } 813 814 return ret; 815 } 816 817 static int __orderly_reboot(void) 818 { 819 int ret; 820 821 ret = run_cmd(reboot_cmd); 822 823 if (ret) { 824 printk_prefer_direct_enter(); 825 pr_warn("Failed to start orderly reboot: forcing the issue\n"); 826 emergency_sync(); 827 kernel_restart(NULL); 828 printk_prefer_direct_exit(); 829 } 830 831 return ret; 832 } 833 834 static int __orderly_poweroff(bool force) 835 { 836 int ret; 837 838 ret = run_cmd(poweroff_cmd); 839 840 if (ret && force) { 841 printk_prefer_direct_enter(); 842 pr_warn("Failed to start orderly shutdown: forcing the issue\n"); 843 844 /* 845 * I guess this should try to kick off some daemon to sync and 846 * poweroff asap. Or not even bother syncing if we're doing an 847 * emergency shutdown? 848 */ 849 emergency_sync(); 850 kernel_power_off(); 851 printk_prefer_direct_exit(); 852 } 853 854 return ret; 855 } 856 857 static bool poweroff_force; 858 859 static void poweroff_work_func(struct work_struct *work) 860 { 861 __orderly_poweroff(poweroff_force); 862 } 863 864 static DECLARE_WORK(poweroff_work, poweroff_work_func); 865 866 /** 867 * orderly_poweroff - Trigger an orderly system poweroff 868 * @force: force poweroff if command execution fails 869 * 870 * This may be called from any context to trigger a system shutdown. 871 * If the orderly shutdown fails, it will force an immediate shutdown. 872 */ 873 void orderly_poweroff(bool force) 874 { 875 if (force) /* do not override the pending "true" */ 876 poweroff_force = true; 877 schedule_work(&poweroff_work); 878 } 879 EXPORT_SYMBOL_GPL(orderly_poweroff); 880 881 static void reboot_work_func(struct work_struct *work) 882 { 883 __orderly_reboot(); 884 } 885 886 static DECLARE_WORK(reboot_work, reboot_work_func); 887 888 /** 889 * orderly_reboot - Trigger an orderly system reboot 890 * 891 * This may be called from any context to trigger a system reboot. 892 * If the orderly reboot fails, it will force an immediate reboot. 893 */ 894 void orderly_reboot(void) 895 { 896 schedule_work(&reboot_work); 897 } 898 EXPORT_SYMBOL_GPL(orderly_reboot); 899 900 /** 901 * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay 902 * @work: work_struct associated with the emergency poweroff function 903 * 904 * This function is called in very critical situations to force 905 * a kernel poweroff after a configurable timeout value. 906 */ 907 static void hw_failure_emergency_poweroff_func(struct work_struct *work) 908 { 909 printk_prefer_direct_enter(); 910 911 /* 912 * We have reached here after the emergency shutdown waiting period has 913 * expired. This means orderly_poweroff has not been able to shut off 914 * the system for some reason. 915 * 916 * Try to shut down the system immediately using kernel_power_off 917 * if populated 918 */ 919 pr_emerg("Hardware protection timed-out. Trying forced poweroff\n"); 920 kernel_power_off(); 921 922 /* 923 * Worst of the worst case trigger emergency restart 924 */ 925 pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); 926 emergency_restart(); 927 928 printk_prefer_direct_exit(); 929 } 930 931 static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, 932 hw_failure_emergency_poweroff_func); 933 934 /** 935 * hw_failure_emergency_poweroff - Trigger an emergency system poweroff 936 * 937 * This may be called from any critical situation to trigger a system shutdown 938 * after a given period of time. If time is negative this is not scheduled. 939 */ 940 static void hw_failure_emergency_poweroff(int poweroff_delay_ms) 941 { 942 if (poweroff_delay_ms <= 0) 943 return; 944 schedule_delayed_work(&hw_failure_emergency_poweroff_work, 945 msecs_to_jiffies(poweroff_delay_ms)); 946 } 947 948 /** 949 * hw_protection_shutdown - Trigger an emergency system poweroff 950 * 951 * @reason: Reason of emergency shutdown to be printed. 952 * @ms_until_forced: Time to wait for orderly shutdown before tiggering a 953 * forced shudown. Negative value disables the forced 954 * shutdown. 955 * 956 * Initiate an emergency system shutdown in order to protect hardware from 957 * further damage. Usage examples include a thermal protection or a voltage or 958 * current regulator failures. 959 * NOTE: The request is ignored if protection shutdown is already pending even 960 * if the previous request has given a large timeout for forced shutdown. 961 * Can be called from any context. 962 */ 963 void hw_protection_shutdown(const char *reason, int ms_until_forced) 964 { 965 static atomic_t allow_proceed = ATOMIC_INIT(1); 966 967 printk_prefer_direct_enter(); 968 969 pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); 970 971 /* Shutdown should be initiated only once. */ 972 if (!atomic_dec_and_test(&allow_proceed)) 973 goto out; 974 975 /* 976 * Queue a backup emergency shutdown in the event of 977 * orderly_poweroff failure 978 */ 979 hw_failure_emergency_poweroff(ms_until_forced); 980 orderly_poweroff(true); 981 out: 982 printk_prefer_direct_exit(); 983 } 984 EXPORT_SYMBOL_GPL(hw_protection_shutdown); 985 986 static int __init reboot_setup(char *str) 987 { 988 for (;;) { 989 enum reboot_mode *mode; 990 991 /* 992 * Having anything passed on the command line via 993 * reboot= will cause us to disable DMI checking 994 * below. 995 */ 996 reboot_default = 0; 997 998 if (!strncmp(str, "panic_", 6)) { 999 mode = &panic_reboot_mode; 1000 str += 6; 1001 } else { 1002 mode = &reboot_mode; 1003 } 1004 1005 switch (*str) { 1006 case 'w': 1007 *mode = REBOOT_WARM; 1008 break; 1009 1010 case 'c': 1011 *mode = REBOOT_COLD; 1012 break; 1013 1014 case 'h': 1015 *mode = REBOOT_HARD; 1016 break; 1017 1018 case 's': 1019 /* 1020 * reboot_cpu is s[mp]#### with #### being the processor 1021 * to be used for rebooting. Skip 's' or 'smp' prefix. 1022 */ 1023 str += str[1] == 'm' && str[2] == 'p' ? 3 : 1; 1024 1025 if (isdigit(str[0])) { 1026 int cpu = simple_strtoul(str, NULL, 0); 1027 1028 if (cpu >= num_possible_cpus()) { 1029 pr_err("Ignoring the CPU number in reboot= option. " 1030 "CPU %d exceeds possible cpu number %d\n", 1031 cpu, num_possible_cpus()); 1032 break; 1033 } 1034 reboot_cpu = cpu; 1035 } else 1036 *mode = REBOOT_SOFT; 1037 break; 1038 1039 case 'g': 1040 *mode = REBOOT_GPIO; 1041 break; 1042 1043 case 'b': 1044 case 'a': 1045 case 'k': 1046 case 't': 1047 case 'e': 1048 case 'p': 1049 reboot_type = *str; 1050 break; 1051 1052 case 'f': 1053 reboot_force = 1; 1054 break; 1055 } 1056 1057 str = strchr(str, ','); 1058 if (str) 1059 str++; 1060 else 1061 break; 1062 } 1063 return 1; 1064 } 1065 __setup("reboot=", reboot_setup); 1066 1067 #ifdef CONFIG_SYSFS 1068 1069 #define REBOOT_COLD_STR "cold" 1070 #define REBOOT_WARM_STR "warm" 1071 #define REBOOT_HARD_STR "hard" 1072 #define REBOOT_SOFT_STR "soft" 1073 #define REBOOT_GPIO_STR "gpio" 1074 #define REBOOT_UNDEFINED_STR "undefined" 1075 1076 #define BOOT_TRIPLE_STR "triple" 1077 #define BOOT_KBD_STR "kbd" 1078 #define BOOT_BIOS_STR "bios" 1079 #define BOOT_ACPI_STR "acpi" 1080 #define BOOT_EFI_STR "efi" 1081 #define BOOT_PCI_STR "pci" 1082 1083 static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 1084 { 1085 const char *val; 1086 1087 switch (reboot_mode) { 1088 case REBOOT_COLD: 1089 val = REBOOT_COLD_STR; 1090 break; 1091 case REBOOT_WARM: 1092 val = REBOOT_WARM_STR; 1093 break; 1094 case REBOOT_HARD: 1095 val = REBOOT_HARD_STR; 1096 break; 1097 case REBOOT_SOFT: 1098 val = REBOOT_SOFT_STR; 1099 break; 1100 case REBOOT_GPIO: 1101 val = REBOOT_GPIO_STR; 1102 break; 1103 default: 1104 val = REBOOT_UNDEFINED_STR; 1105 } 1106 1107 return sprintf(buf, "%s\n", val); 1108 } 1109 static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr, 1110 const char *buf, size_t count) 1111 { 1112 if (!capable(CAP_SYS_BOOT)) 1113 return -EPERM; 1114 1115 if (!strncmp(buf, REBOOT_COLD_STR, strlen(REBOOT_COLD_STR))) 1116 reboot_mode = REBOOT_COLD; 1117 else if (!strncmp(buf, REBOOT_WARM_STR, strlen(REBOOT_WARM_STR))) 1118 reboot_mode = REBOOT_WARM; 1119 else if (!strncmp(buf, REBOOT_HARD_STR, strlen(REBOOT_HARD_STR))) 1120 reboot_mode = REBOOT_HARD; 1121 else if (!strncmp(buf, REBOOT_SOFT_STR, strlen(REBOOT_SOFT_STR))) 1122 reboot_mode = REBOOT_SOFT; 1123 else if (!strncmp(buf, REBOOT_GPIO_STR, strlen(REBOOT_GPIO_STR))) 1124 reboot_mode = REBOOT_GPIO; 1125 else 1126 return -EINVAL; 1127 1128 reboot_default = 0; 1129 1130 return count; 1131 } 1132 static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode); 1133 1134 #ifdef CONFIG_X86 1135 static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 1136 { 1137 return sprintf(buf, "%d\n", reboot_force); 1138 } 1139 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, 1140 const char *buf, size_t count) 1141 { 1142 bool res; 1143 1144 if (!capable(CAP_SYS_BOOT)) 1145 return -EPERM; 1146 1147 if (kstrtobool(buf, &res)) 1148 return -EINVAL; 1149 1150 reboot_default = 0; 1151 reboot_force = res; 1152 1153 return count; 1154 } 1155 static struct kobj_attribute reboot_force_attr = __ATTR_RW(force); 1156 1157 static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 1158 { 1159 const char *val; 1160 1161 switch (reboot_type) { 1162 case BOOT_TRIPLE: 1163 val = BOOT_TRIPLE_STR; 1164 break; 1165 case BOOT_KBD: 1166 val = BOOT_KBD_STR; 1167 break; 1168 case BOOT_BIOS: 1169 val = BOOT_BIOS_STR; 1170 break; 1171 case BOOT_ACPI: 1172 val = BOOT_ACPI_STR; 1173 break; 1174 case BOOT_EFI: 1175 val = BOOT_EFI_STR; 1176 break; 1177 case BOOT_CF9_FORCE: 1178 val = BOOT_PCI_STR; 1179 break; 1180 default: 1181 val = REBOOT_UNDEFINED_STR; 1182 } 1183 1184 return sprintf(buf, "%s\n", val); 1185 } 1186 static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, 1187 const char *buf, size_t count) 1188 { 1189 if (!capable(CAP_SYS_BOOT)) 1190 return -EPERM; 1191 1192 if (!strncmp(buf, BOOT_TRIPLE_STR, strlen(BOOT_TRIPLE_STR))) 1193 reboot_type = BOOT_TRIPLE; 1194 else if (!strncmp(buf, BOOT_KBD_STR, strlen(BOOT_KBD_STR))) 1195 reboot_type = BOOT_KBD; 1196 else if (!strncmp(buf, BOOT_BIOS_STR, strlen(BOOT_BIOS_STR))) 1197 reboot_type = BOOT_BIOS; 1198 else if (!strncmp(buf, BOOT_ACPI_STR, strlen(BOOT_ACPI_STR))) 1199 reboot_type = BOOT_ACPI; 1200 else if (!strncmp(buf, BOOT_EFI_STR, strlen(BOOT_EFI_STR))) 1201 reboot_type = BOOT_EFI; 1202 else if (!strncmp(buf, BOOT_PCI_STR, strlen(BOOT_PCI_STR))) 1203 reboot_type = BOOT_CF9_FORCE; 1204 else 1205 return -EINVAL; 1206 1207 reboot_default = 0; 1208 1209 return count; 1210 } 1211 static struct kobj_attribute reboot_type_attr = __ATTR_RW(type); 1212 #endif 1213 1214 #ifdef CONFIG_SMP 1215 static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 1216 { 1217 return sprintf(buf, "%d\n", reboot_cpu); 1218 } 1219 static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr, 1220 const char *buf, size_t count) 1221 { 1222 unsigned int cpunum; 1223 int rc; 1224 1225 if (!capable(CAP_SYS_BOOT)) 1226 return -EPERM; 1227 1228 rc = kstrtouint(buf, 0, &cpunum); 1229 1230 if (rc) 1231 return rc; 1232 1233 if (cpunum >= num_possible_cpus()) 1234 return -ERANGE; 1235 1236 reboot_default = 0; 1237 reboot_cpu = cpunum; 1238 1239 return count; 1240 } 1241 static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu); 1242 #endif 1243 1244 static struct attribute *reboot_attrs[] = { 1245 &reboot_mode_attr.attr, 1246 #ifdef CONFIG_X86 1247 &reboot_force_attr.attr, 1248 &reboot_type_attr.attr, 1249 #endif 1250 #ifdef CONFIG_SMP 1251 &reboot_cpu_attr.attr, 1252 #endif 1253 NULL, 1254 }; 1255 1256 #ifdef CONFIG_SYSCTL 1257 static struct ctl_table kern_reboot_table[] = { 1258 { 1259 .procname = "poweroff_cmd", 1260 .data = &poweroff_cmd, 1261 .maxlen = POWEROFF_CMD_PATH_LEN, 1262 .mode = 0644, 1263 .proc_handler = proc_dostring, 1264 }, 1265 { 1266 .procname = "ctrl-alt-del", 1267 .data = &C_A_D, 1268 .maxlen = sizeof(int), 1269 .mode = 0644, 1270 .proc_handler = proc_dointvec, 1271 }, 1272 { } 1273 }; 1274 1275 static void __init kernel_reboot_sysctls_init(void) 1276 { 1277 register_sysctl_init("kernel", kern_reboot_table); 1278 } 1279 #else 1280 #define kernel_reboot_sysctls_init() do { } while (0) 1281 #endif /* CONFIG_SYSCTL */ 1282 1283 static const struct attribute_group reboot_attr_group = { 1284 .attrs = reboot_attrs, 1285 }; 1286 1287 static int __init reboot_ksysfs_init(void) 1288 { 1289 struct kobject *reboot_kobj; 1290 int ret; 1291 1292 reboot_kobj = kobject_create_and_add("reboot", kernel_kobj); 1293 if (!reboot_kobj) 1294 return -ENOMEM; 1295 1296 ret = sysfs_create_group(reboot_kobj, &reboot_attr_group); 1297 if (ret) { 1298 kobject_put(reboot_kobj); 1299 return ret; 1300 } 1301 1302 kernel_reboot_sysctls_init(); 1303 1304 return 0; 1305 } 1306 late_initcall(reboot_ksysfs_init); 1307 1308 #endif 1309