1 /* 2 * kernel/power/suspend.c - Suspend to RAM and standby functionality. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 7 * 8 * This file is released under the GPLv2. 9 */ 10 11 #include <linux/string.h> 12 #include <linux/delay.h> 13 #include <linux/errno.h> 14 #include <linux/init.h> 15 #include <linux/console.h> 16 #include <linux/cpu.h> 17 #include <linux/cpuidle.h> 18 #include <linux/syscalls.h> 19 #include <linux/gfp.h> 20 #include <linux/io.h> 21 #include <linux/kernel.h> 22 #include <linux/list.h> 23 #include <linux/mm.h> 24 #include <linux/slab.h> 25 #include <linux/export.h> 26 #include <linux/suspend.h> 27 #include <linux/syscore_ops.h> 28 #include <linux/ftrace.h> 29 #include <trace/events/power.h> 30 #include <linux/compiler.h> 31 32 #include "power.h" 33 34 struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = { 35 [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE }, 36 [PM_SUSPEND_STANDBY] = { .label = "standby", }, 37 [PM_SUSPEND_MEM] = { .label = "mem", }, 38 }; 39 40 static const struct platform_suspend_ops *suspend_ops; 41 static const struct platform_freeze_ops *freeze_ops; 42 43 static bool need_suspend_ops(suspend_state_t state) 44 { 45 return state > PM_SUSPEND_FREEZE; 46 } 47 48 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); 49 static bool suspend_freeze_wake; 50 51 void freeze_set_ops(const struct platform_freeze_ops *ops) 52 { 53 lock_system_sleep(); 54 freeze_ops = ops; 55 unlock_system_sleep(); 56 } 57 58 static void freeze_begin(void) 59 { 60 suspend_freeze_wake = false; 61 } 62 63 static void freeze_enter(void) 64 { 65 cpuidle_use_deepest_state(true); 66 cpuidle_resume(); 67 wait_event(suspend_freeze_wait_head, suspend_freeze_wake); 68 cpuidle_pause(); 69 cpuidle_use_deepest_state(false); 70 } 71 72 void freeze_wake(void) 73 { 74 suspend_freeze_wake = true; 75 wake_up(&suspend_freeze_wait_head); 76 } 77 EXPORT_SYMBOL_GPL(freeze_wake); 78 79 static bool valid_state(suspend_state_t state) 80 { 81 /* 82 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level 83 * support and need to be valid to the low level 84 * implementation, no valid callback implies that none are valid. 85 */ 86 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); 87 } 88 89 /* 90 * If this is set, the "mem" label always corresponds to the deepest sleep state 91 * available, the "standby" label corresponds to the second deepest sleep state 92 * available (if any), and the "freeze" label corresponds to the remaining 93 * available sleep state (if there is one). 94 */ 95 static bool relative_states; 96 97 static int __init sleep_states_setup(char *str) 98 { 99 relative_states = !strncmp(str, "1", 1); 100 if (relative_states) { 101 pm_states[PM_SUSPEND_MEM].state = PM_SUSPEND_FREEZE; 102 pm_states[PM_SUSPEND_FREEZE].state = 0; 103 } 104 return 1; 105 } 106 107 __setup("relative_sleep_states=", sleep_states_setup); 108 109 /** 110 * suspend_set_ops - Set the global suspend method table. 111 * @ops: Suspend operations to use. 112 */ 113 void suspend_set_ops(const struct platform_suspend_ops *ops) 114 { 115 suspend_state_t i; 116 int j = PM_SUSPEND_MAX - 1; 117 118 lock_system_sleep(); 119 120 suspend_ops = ops; 121 for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--) 122 if (valid_state(i)) 123 pm_states[j--].state = i; 124 else if (!relative_states) 125 pm_states[j--].state = 0; 126 127 pm_states[j--].state = PM_SUSPEND_FREEZE; 128 while (j >= PM_SUSPEND_MIN) 129 pm_states[j--].state = 0; 130 131 unlock_system_sleep(); 132 } 133 EXPORT_SYMBOL_GPL(suspend_set_ops); 134 135 /** 136 * suspend_valid_only_mem - Generic memory-only valid callback. 137 * 138 * Platform drivers that implement mem suspend only and only need to check for 139 * that in their .valid() callback can use this instead of rolling their own 140 * .valid() callback. 141 */ 142 int suspend_valid_only_mem(suspend_state_t state) 143 { 144 return state == PM_SUSPEND_MEM; 145 } 146 EXPORT_SYMBOL_GPL(suspend_valid_only_mem); 147 148 static int suspend_test(int level) 149 { 150 #ifdef CONFIG_PM_DEBUG 151 if (pm_test_level == level) { 152 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); 153 mdelay(5000); 154 return 1; 155 } 156 #endif /* !CONFIG_PM_DEBUG */ 157 return 0; 158 } 159 160 /** 161 * suspend_prepare - Prepare for entering system sleep state. 162 * 163 * Common code run for every system sleep state that can be entered (except for 164 * hibernation). Run suspend notifiers, allocate the "suspend" console and 165 * freeze processes. 166 */ 167 static int suspend_prepare(suspend_state_t state) 168 { 169 int error; 170 171 if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter)) 172 return -EPERM; 173 174 pm_prepare_console(); 175 176 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); 177 if (error) 178 goto Finish; 179 180 trace_suspend_resume(TPS("freeze_processes"), 0, true); 181 error = suspend_freeze_processes(); 182 trace_suspend_resume(TPS("freeze_processes"), 0, false); 183 if (!error) 184 return 0; 185 186 suspend_stats.failed_freeze++; 187 dpm_save_failed_step(SUSPEND_FREEZE); 188 Finish: 189 pm_notifier_call_chain(PM_POST_SUSPEND); 190 pm_restore_console(); 191 return error; 192 } 193 194 /* default implementation */ 195 void __weak arch_suspend_disable_irqs(void) 196 { 197 local_irq_disable(); 198 } 199 200 /* default implementation */ 201 void __weak arch_suspend_enable_irqs(void) 202 { 203 local_irq_enable(); 204 } 205 206 /** 207 * suspend_enter - Make the system enter the given sleep state. 208 * @state: System sleep state to enter. 209 * @wakeup: Returns information that the sleep state should not be re-entered. 210 * 211 * This function should be called after devices have been suspended. 212 */ 213 static int suspend_enter(suspend_state_t state, bool *wakeup) 214 { 215 int error; 216 217 if (need_suspend_ops(state) && suspend_ops->prepare) { 218 error = suspend_ops->prepare(); 219 if (error) 220 goto Platform_finish; 221 } 222 223 error = dpm_suspend_end(PMSG_SUSPEND); 224 if (error) { 225 printk(KERN_ERR "PM: Some devices failed to power down\n"); 226 goto Platform_finish; 227 } 228 229 if (need_suspend_ops(state) && suspend_ops->prepare_late) { 230 error = suspend_ops->prepare_late(); 231 if (error) 232 goto Platform_wake; 233 } 234 235 if (suspend_test(TEST_PLATFORM)) 236 goto Platform_wake; 237 238 /* 239 * PM_SUSPEND_FREEZE equals 240 * frozen processes + suspended devices + idle processors. 241 * Thus we should invoke freeze_enter() soon after 242 * all the devices are suspended. 243 */ 244 if (state == PM_SUSPEND_FREEZE) { 245 trace_suspend_resume(TPS("machine_suspend"), state, true); 246 freeze_enter(); 247 trace_suspend_resume(TPS("machine_suspend"), state, false); 248 goto Platform_wake; 249 } 250 251 ftrace_stop(); 252 error = disable_nonboot_cpus(); 253 if (error || suspend_test(TEST_CPUS)) 254 goto Enable_cpus; 255 256 arch_suspend_disable_irqs(); 257 BUG_ON(!irqs_disabled()); 258 259 error = syscore_suspend(); 260 if (!error) { 261 *wakeup = pm_wakeup_pending(); 262 if (!(suspend_test(TEST_CORE) || *wakeup)) { 263 trace_suspend_resume(TPS("machine_suspend"), 264 state, true); 265 error = suspend_ops->enter(state); 266 trace_suspend_resume(TPS("machine_suspend"), 267 state, false); 268 events_check_enabled = false; 269 } 270 syscore_resume(); 271 } 272 273 arch_suspend_enable_irqs(); 274 BUG_ON(irqs_disabled()); 275 276 Enable_cpus: 277 enable_nonboot_cpus(); 278 ftrace_start(); 279 280 Platform_wake: 281 if (need_suspend_ops(state) && suspend_ops->wake) 282 suspend_ops->wake(); 283 284 dpm_resume_start(PMSG_RESUME); 285 286 Platform_finish: 287 if (need_suspend_ops(state) && suspend_ops->finish) 288 suspend_ops->finish(); 289 290 return error; 291 } 292 293 /** 294 * suspend_devices_and_enter - Suspend devices and enter system sleep state. 295 * @state: System sleep state to enter. 296 */ 297 int suspend_devices_and_enter(suspend_state_t state) 298 { 299 int error; 300 bool wakeup = false; 301 302 if (need_suspend_ops(state) && !suspend_ops) 303 return -ENOSYS; 304 305 if (need_suspend_ops(state) && suspend_ops->begin) { 306 error = suspend_ops->begin(state); 307 if (error) 308 goto Close; 309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { 310 error = freeze_ops->begin(); 311 if (error) 312 goto Close; 313 } 314 suspend_console(); 315 suspend_test_start(); 316 error = dpm_suspend_start(PMSG_SUSPEND); 317 if (error) { 318 pr_err("PM: Some devices failed to suspend, or early wake event detected\n"); 319 goto Recover_platform; 320 } 321 suspend_test_finish("suspend devices"); 322 if (suspend_test(TEST_DEVICES)) 323 goto Recover_platform; 324 325 do { 326 error = suspend_enter(state, &wakeup); 327 } while (!error && !wakeup && need_suspend_ops(state) 328 && suspend_ops->suspend_again && suspend_ops->suspend_again()); 329 330 Resume_devices: 331 suspend_test_start(); 332 dpm_resume_end(PMSG_RESUME); 333 suspend_test_finish("resume devices"); 334 resume_console(); 335 Close: 336 if (need_suspend_ops(state) && suspend_ops->end) 337 suspend_ops->end(); 338 else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) 339 freeze_ops->end(); 340 341 return error; 342 343 Recover_platform: 344 if (need_suspend_ops(state) && suspend_ops->recover) 345 suspend_ops->recover(); 346 goto Resume_devices; 347 } 348 349 /** 350 * suspend_finish - Clean up before finishing the suspend sequence. 351 * 352 * Call platform code to clean up, restart processes, and free the console that 353 * we've allocated. This routine is not called for hibernation. 354 */ 355 static void suspend_finish(void) 356 { 357 suspend_thaw_processes(); 358 pm_notifier_call_chain(PM_POST_SUSPEND); 359 pm_restore_console(); 360 } 361 362 /** 363 * enter_state - Do common work needed to enter system sleep state. 364 * @state: System sleep state to enter. 365 * 366 * Make sure that no one else is trying to put the system into a sleep state. 367 * Fail if that's not the case. Otherwise, prepare for system suspend, make the 368 * system enter the given sleep state and clean up after wakeup. 369 */ 370 static int enter_state(suspend_state_t state) 371 { 372 int error; 373 374 trace_suspend_resume(TPS("suspend_enter"), state, true); 375 if (state == PM_SUSPEND_FREEZE) { 376 #ifdef CONFIG_PM_DEBUG 377 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) { 378 pr_warning("PM: Unsupported test mode for freeze state," 379 "please choose none/freezer/devices/platform.\n"); 380 return -EAGAIN; 381 } 382 #endif 383 } else if (!valid_state(state)) { 384 return -EINVAL; 385 } 386 if (!mutex_trylock(&pm_mutex)) 387 return -EBUSY; 388 389 if (state == PM_SUSPEND_FREEZE) 390 freeze_begin(); 391 392 trace_suspend_resume(TPS("sync_filesystems"), 0, true); 393 printk(KERN_INFO "PM: Syncing filesystems ... "); 394 sys_sync(); 395 printk("done.\n"); 396 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 397 398 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label); 399 error = suspend_prepare(state); 400 if (error) 401 goto Unlock; 402 403 if (suspend_test(TEST_FREEZER)) 404 goto Finish; 405 406 trace_suspend_resume(TPS("suspend_enter"), state, false); 407 pr_debug("PM: Entering %s sleep\n", pm_states[state].label); 408 pm_restrict_gfp_mask(); 409 error = suspend_devices_and_enter(state); 410 pm_restore_gfp_mask(); 411 412 Finish: 413 pr_debug("PM: Finishing wakeup.\n"); 414 suspend_finish(); 415 Unlock: 416 mutex_unlock(&pm_mutex); 417 return error; 418 } 419 420 /** 421 * pm_suspend - Externally visible function for suspending the system. 422 * @state: System sleep state to enter. 423 * 424 * Check if the value of @state represents one of the supported states, 425 * execute enter_state() and update system suspend statistics. 426 */ 427 int pm_suspend(suspend_state_t state) 428 { 429 int error; 430 431 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) 432 return -EINVAL; 433 434 error = enter_state(state); 435 if (error) { 436 suspend_stats.fail++; 437 dpm_save_failed_errno(error); 438 } else { 439 suspend_stats.success++; 440 } 441 return error; 442 } 443 EXPORT_SYMBOL(pm_suspend); 444