1 /* 2 * kernel/power/suspend.c - Suspend to RAM and standby functionality. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 7 * 8 * This file is released under the GPLv2. 9 */ 10 11 #include <linux/string.h> 12 #include <linux/delay.h> 13 #include <linux/errno.h> 14 #include <linux/init.h> 15 #include <linux/console.h> 16 #include <linux/cpu.h> 17 #include <linux/cpuidle.h> 18 #include <linux/syscalls.h> 19 #include <linux/gfp.h> 20 #include <linux/io.h> 21 #include <linux/kernel.h> 22 #include <linux/list.h> 23 #include <linux/mm.h> 24 #include <linux/slab.h> 25 #include <linux/export.h> 26 #include <linux/suspend.h> 27 #include <linux/syscore_ops.h> 28 #include <linux/ftrace.h> 29 #include <trace/events/power.h> 30 #include <linux/compiler.h> 31 32 #include "power.h" 33 34 struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = { 35 [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE }, 36 [PM_SUSPEND_STANDBY] = { .label = "standby", }, 37 [PM_SUSPEND_MEM] = { .label = "mem", }, 38 }; 39 40 static const struct platform_suspend_ops *suspend_ops; 41 static const struct platform_freeze_ops *freeze_ops; 42 43 static bool need_suspend_ops(suspend_state_t state) 44 { 45 return state > PM_SUSPEND_FREEZE; 46 } 47 48 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); 49 static bool suspend_freeze_wake; 50 51 void freeze_set_ops(const struct platform_freeze_ops *ops) 52 { 53 lock_system_sleep(); 54 freeze_ops = ops; 55 unlock_system_sleep(); 56 } 57 58 static void freeze_begin(void) 59 { 60 suspend_freeze_wake = false; 61 } 62 63 static void freeze_enter(void) 64 { 65 cpuidle_use_deepest_state(true); 66 cpuidle_resume(); 67 wait_event(suspend_freeze_wait_head, suspend_freeze_wake); 68 cpuidle_pause(); 69 cpuidle_use_deepest_state(false); 70 } 71 72 void freeze_wake(void) 73 { 74 suspend_freeze_wake = true; 75 wake_up(&suspend_freeze_wait_head); 76 } 77 EXPORT_SYMBOL_GPL(freeze_wake); 78 79 static bool valid_state(suspend_state_t state) 80 { 81 /* 82 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level 83 * support and need to be valid to the low level 84 * implementation, no valid callback implies that none are valid. 85 */ 86 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); 87 } 88 89 /* 90 * If this is set, the "mem" label always corresponds to the deepest sleep state 91 * available, the "standby" label corresponds to the second deepest sleep state 92 * available (if any), and the "freeze" label corresponds to the remaining 93 * available sleep state (if there is one). 94 */ 95 static bool relative_states; 96 97 static int __init sleep_states_setup(char *str) 98 { 99 relative_states = !strncmp(str, "1", 1); 100 if (relative_states) { 101 pm_states[PM_SUSPEND_MEM].state = PM_SUSPEND_FREEZE; 102 pm_states[PM_SUSPEND_FREEZE].state = 0; 103 } 104 return 1; 105 } 106 107 __setup("relative_sleep_states=", sleep_states_setup); 108 109 /** 110 * suspend_set_ops - Set the global suspend method table. 111 * @ops: Suspend operations to use. 112 */ 113 void suspend_set_ops(const struct platform_suspend_ops *ops) 114 { 115 suspend_state_t i; 116 int j = PM_SUSPEND_MAX - 1; 117 118 lock_system_sleep(); 119 120 suspend_ops = ops; 121 for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--) 122 if (valid_state(i)) 123 pm_states[j--].state = i; 124 else if (!relative_states) 125 pm_states[j--].state = 0; 126 127 pm_states[j--].state = PM_SUSPEND_FREEZE; 128 while (j >= PM_SUSPEND_MIN) 129 pm_states[j--].state = 0; 130 131 unlock_system_sleep(); 132 } 133 EXPORT_SYMBOL_GPL(suspend_set_ops); 134 135 /** 136 * suspend_valid_only_mem - Generic memory-only valid callback. 137 * 138 * Platform drivers that implement mem suspend only and only need to check for 139 * that in their .valid() callback can use this instead of rolling their own 140 * .valid() callback. 141 */ 142 int suspend_valid_only_mem(suspend_state_t state) 143 { 144 return state == PM_SUSPEND_MEM; 145 } 146 EXPORT_SYMBOL_GPL(suspend_valid_only_mem); 147 148 static int suspend_test(int level) 149 { 150 #ifdef CONFIG_PM_DEBUG 151 if (pm_test_level == level) { 152 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); 153 mdelay(5000); 154 return 1; 155 } 156 #endif /* !CONFIG_PM_DEBUG */ 157 return 0; 158 } 159 160 /** 161 * suspend_prepare - Prepare for entering system sleep state. 162 * 163 * Common code run for every system sleep state that can be entered (except for 164 * hibernation). Run suspend notifiers, allocate the "suspend" console and 165 * freeze processes. 166 */ 167 static int suspend_prepare(suspend_state_t state) 168 { 169 int error; 170 171 if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter)) 172 return -EPERM; 173 174 pm_prepare_console(); 175 176 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); 177 if (error) 178 goto Finish; 179 180 trace_suspend_resume(TPS("freeze_processes"), 0, true); 181 error = suspend_freeze_processes(); 182 trace_suspend_resume(TPS("freeze_processes"), 0, false); 183 if (!error) 184 return 0; 185 186 suspend_stats.failed_freeze++; 187 dpm_save_failed_step(SUSPEND_FREEZE); 188 Finish: 189 pm_notifier_call_chain(PM_POST_SUSPEND); 190 pm_restore_console(); 191 return error; 192 } 193 194 /* default implementation */ 195 void __weak arch_suspend_disable_irqs(void) 196 { 197 local_irq_disable(); 198 } 199 200 /* default implementation */ 201 void __weak arch_suspend_enable_irqs(void) 202 { 203 local_irq_enable(); 204 } 205 206 /** 207 * suspend_enter - Make the system enter the given sleep state. 208 * @state: System sleep state to enter. 209 * @wakeup: Returns information that the sleep state should not be re-entered. 210 * 211 * This function should be called after devices have been suspended. 212 */ 213 static int suspend_enter(suspend_state_t state, bool *wakeup) 214 { 215 int error; 216 217 if (need_suspend_ops(state) && suspend_ops->prepare) { 218 error = suspend_ops->prepare(); 219 if (error) 220 goto Platform_finish; 221 } 222 223 error = dpm_suspend_end(PMSG_SUSPEND); 224 if (error) { 225 printk(KERN_ERR "PM: Some devices failed to power down\n"); 226 goto Platform_finish; 227 } 228 229 if (need_suspend_ops(state) && suspend_ops->prepare_late) { 230 error = suspend_ops->prepare_late(); 231 if (error) 232 goto Platform_wake; 233 } 234 235 if (suspend_test(TEST_PLATFORM)) 236 goto Platform_wake; 237 238 /* 239 * PM_SUSPEND_FREEZE equals 240 * frozen processes + suspended devices + idle processors. 241 * Thus we should invoke freeze_enter() soon after 242 * all the devices are suspended. 243 */ 244 if (state == PM_SUSPEND_FREEZE) { 245 trace_suspend_resume(TPS("machine_suspend"), state, true); 246 freeze_enter(); 247 trace_suspend_resume(TPS("machine_suspend"), state, false); 248 goto Platform_wake; 249 } 250 251 error = disable_nonboot_cpus(); 252 if (error || suspend_test(TEST_CPUS)) 253 goto Enable_cpus; 254 255 arch_suspend_disable_irqs(); 256 BUG_ON(!irqs_disabled()); 257 258 error = syscore_suspend(); 259 if (!error) { 260 *wakeup = pm_wakeup_pending(); 261 if (!(suspend_test(TEST_CORE) || *wakeup)) { 262 trace_suspend_resume(TPS("machine_suspend"), 263 state, true); 264 error = suspend_ops->enter(state); 265 trace_suspend_resume(TPS("machine_suspend"), 266 state, false); 267 events_check_enabled = false; 268 } 269 syscore_resume(); 270 } 271 272 arch_suspend_enable_irqs(); 273 BUG_ON(irqs_disabled()); 274 275 Enable_cpus: 276 enable_nonboot_cpus(); 277 278 Platform_wake: 279 if (need_suspend_ops(state) && suspend_ops->wake) 280 suspend_ops->wake(); 281 282 dpm_resume_start(PMSG_RESUME); 283 284 Platform_finish: 285 if (need_suspend_ops(state) && suspend_ops->finish) 286 suspend_ops->finish(); 287 288 return error; 289 } 290 291 /** 292 * suspend_devices_and_enter - Suspend devices and enter system sleep state. 293 * @state: System sleep state to enter. 294 */ 295 int suspend_devices_and_enter(suspend_state_t state) 296 { 297 int error; 298 bool wakeup = false; 299 300 if (need_suspend_ops(state) && !suspend_ops) 301 return -ENOSYS; 302 303 if (need_suspend_ops(state) && suspend_ops->begin) { 304 error = suspend_ops->begin(state); 305 if (error) 306 goto Close; 307 } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) { 308 error = freeze_ops->begin(); 309 if (error) 310 goto Close; 311 } 312 suspend_console(); 313 suspend_test_start(); 314 error = dpm_suspend_start(PMSG_SUSPEND); 315 if (error) { 316 pr_err("PM: Some devices failed to suspend, or early wake event detected\n"); 317 goto Recover_platform; 318 } 319 suspend_test_finish("suspend devices"); 320 if (suspend_test(TEST_DEVICES)) 321 goto Recover_platform; 322 323 do { 324 error = suspend_enter(state, &wakeup); 325 } while (!error && !wakeup && need_suspend_ops(state) 326 && suspend_ops->suspend_again && suspend_ops->suspend_again()); 327 328 Resume_devices: 329 suspend_test_start(); 330 dpm_resume_end(PMSG_RESUME); 331 suspend_test_finish("resume devices"); 332 resume_console(); 333 Close: 334 if (need_suspend_ops(state) && suspend_ops->end) 335 suspend_ops->end(); 336 else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) 337 freeze_ops->end(); 338 339 return error; 340 341 Recover_platform: 342 if (need_suspend_ops(state) && suspend_ops->recover) 343 suspend_ops->recover(); 344 goto Resume_devices; 345 } 346 347 /** 348 * suspend_finish - Clean up before finishing the suspend sequence. 349 * 350 * Call platform code to clean up, restart processes, and free the console that 351 * we've allocated. This routine is not called for hibernation. 352 */ 353 static void suspend_finish(void) 354 { 355 suspend_thaw_processes(); 356 pm_notifier_call_chain(PM_POST_SUSPEND); 357 pm_restore_console(); 358 } 359 360 /** 361 * enter_state - Do common work needed to enter system sleep state. 362 * @state: System sleep state to enter. 363 * 364 * Make sure that no one else is trying to put the system into a sleep state. 365 * Fail if that's not the case. Otherwise, prepare for system suspend, make the 366 * system enter the given sleep state and clean up after wakeup. 367 */ 368 static int enter_state(suspend_state_t state) 369 { 370 int error; 371 372 trace_suspend_resume(TPS("suspend_enter"), state, true); 373 if (state == PM_SUSPEND_FREEZE) { 374 #ifdef CONFIG_PM_DEBUG 375 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) { 376 pr_warning("PM: Unsupported test mode for freeze state," 377 "please choose none/freezer/devices/platform.\n"); 378 return -EAGAIN; 379 } 380 #endif 381 } else if (!valid_state(state)) { 382 return -EINVAL; 383 } 384 if (!mutex_trylock(&pm_mutex)) 385 return -EBUSY; 386 387 if (state == PM_SUSPEND_FREEZE) 388 freeze_begin(); 389 390 trace_suspend_resume(TPS("sync_filesystems"), 0, true); 391 printk(KERN_INFO "PM: Syncing filesystems ... "); 392 sys_sync(); 393 printk("done.\n"); 394 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 395 396 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label); 397 error = suspend_prepare(state); 398 if (error) 399 goto Unlock; 400 401 if (suspend_test(TEST_FREEZER)) 402 goto Finish; 403 404 trace_suspend_resume(TPS("suspend_enter"), state, false); 405 pr_debug("PM: Entering %s sleep\n", pm_states[state].label); 406 pm_restrict_gfp_mask(); 407 error = suspend_devices_and_enter(state); 408 pm_restore_gfp_mask(); 409 410 Finish: 411 pr_debug("PM: Finishing wakeup.\n"); 412 suspend_finish(); 413 Unlock: 414 mutex_unlock(&pm_mutex); 415 return error; 416 } 417 418 /** 419 * pm_suspend - Externally visible function for suspending the system. 420 * @state: System sleep state to enter. 421 * 422 * Check if the value of @state represents one of the supported states, 423 * execute enter_state() and update system suspend statistics. 424 */ 425 int pm_suspend(suspend_state_t state) 426 { 427 int error; 428 429 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) 430 return -EINVAL; 431 432 error = enter_state(state); 433 if (error) { 434 suspend_stats.fail++; 435 dpm_save_failed_errno(error); 436 } else { 437 suspend_stats.success++; 438 } 439 return error; 440 } 441 EXPORT_SYMBOL(pm_suspend); 442