xref: /openbmc/linux/kernel/power/suspend.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7  *
8  * This file is released under the GPLv2.
9  */
10 
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/syscalls.h>
18 #include <linux/gfp.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 #include <linux/suspend.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/ftrace.h>
28 #include <trace/events/power.h>
29 
30 #include "power.h"
31 
32 const char *const pm_states[PM_SUSPEND_MAX] = {
33 	[PM_SUSPEND_FREEZE]	= "freeze",
34 	[PM_SUSPEND_STANDBY]	= "standby",
35 	[PM_SUSPEND_MEM]	= "mem",
36 };
37 
38 static const struct platform_suspend_ops *suspend_ops;
39 
40 static bool need_suspend_ops(suspend_state_t state)
41 {
42 	return !!(state > PM_SUSPEND_FREEZE);
43 }
44 
45 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
46 static bool suspend_freeze_wake;
47 
48 static void freeze_begin(void)
49 {
50 	suspend_freeze_wake = false;
51 }
52 
53 static void freeze_enter(void)
54 {
55 	wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
56 }
57 
58 void freeze_wake(void)
59 {
60 	suspend_freeze_wake = true;
61 	wake_up(&suspend_freeze_wait_head);
62 }
63 EXPORT_SYMBOL_GPL(freeze_wake);
64 
65 /**
66  * suspend_set_ops - Set the global suspend method table.
67  * @ops: Suspend operations to use.
68  */
69 void suspend_set_ops(const struct platform_suspend_ops *ops)
70 {
71 	lock_system_sleep();
72 	suspend_ops = ops;
73 	unlock_system_sleep();
74 }
75 EXPORT_SYMBOL_GPL(suspend_set_ops);
76 
77 bool valid_state(suspend_state_t state)
78 {
79 	if (state == PM_SUSPEND_FREEZE)
80 		return true;
81 	/*
82 	 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
83 	 * support and need to be valid to the lowlevel
84 	 * implementation, no valid callback implies that none are valid.
85 	 */
86 	return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
87 }
88 
89 /**
90  * suspend_valid_only_mem - Generic memory-only valid callback.
91  *
92  * Platform drivers that implement mem suspend only and only need to check for
93  * that in their .valid() callback can use this instead of rolling their own
94  * .valid() callback.
95  */
96 int suspend_valid_only_mem(suspend_state_t state)
97 {
98 	return state == PM_SUSPEND_MEM;
99 }
100 EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
101 
102 static int suspend_test(int level)
103 {
104 #ifdef CONFIG_PM_DEBUG
105 	if (pm_test_level == level) {
106 		printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
107 		mdelay(5000);
108 		return 1;
109 	}
110 #endif /* !CONFIG_PM_DEBUG */
111 	return 0;
112 }
113 
114 /**
115  * suspend_prepare - Prepare for entering system sleep state.
116  *
117  * Common code run for every system sleep state that can be entered (except for
118  * hibernation).  Run suspend notifiers, allocate the "suspend" console and
119  * freeze processes.
120  */
121 static int suspend_prepare(suspend_state_t state)
122 {
123 	int error;
124 
125 	if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
126 		return -EPERM;
127 
128 	pm_prepare_console();
129 
130 	error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
131 	if (error)
132 		goto Finish;
133 
134 	error = suspend_freeze_processes();
135 	if (!error)
136 		return 0;
137 
138 	suspend_stats.failed_freeze++;
139 	dpm_save_failed_step(SUSPEND_FREEZE);
140  Finish:
141 	pm_notifier_call_chain(PM_POST_SUSPEND);
142 	pm_restore_console();
143 	return error;
144 }
145 
146 /* default implementation */
147 void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
148 {
149 	local_irq_disable();
150 }
151 
152 /* default implementation */
153 void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
154 {
155 	local_irq_enable();
156 }
157 
158 /**
159  * suspend_enter - Make the system enter the given sleep state.
160  * @state: System sleep state to enter.
161  * @wakeup: Returns information that the sleep state should not be re-entered.
162  *
163  * This function should be called after devices have been suspended.
164  */
165 static int suspend_enter(suspend_state_t state, bool *wakeup)
166 {
167 	int error;
168 
169 	if (need_suspend_ops(state) && suspend_ops->prepare) {
170 		error = suspend_ops->prepare();
171 		if (error)
172 			goto Platform_finish;
173 	}
174 
175 	error = dpm_suspend_end(PMSG_SUSPEND);
176 	if (error) {
177 		printk(KERN_ERR "PM: Some devices failed to power down\n");
178 		goto Platform_finish;
179 	}
180 
181 	if (need_suspend_ops(state) && suspend_ops->prepare_late) {
182 		error = suspend_ops->prepare_late();
183 		if (error)
184 			goto Platform_wake;
185 	}
186 
187 	/*
188 	 * PM_SUSPEND_FREEZE equals
189 	 * frozen processes + suspended devices + idle processors.
190 	 * Thus we should invoke freeze_enter() soon after
191 	 * all the devices are suspended.
192 	 */
193 	if (state == PM_SUSPEND_FREEZE) {
194 		freeze_enter();
195 		goto Platform_wake;
196 	}
197 
198 	if (suspend_test(TEST_PLATFORM))
199 		goto Platform_wake;
200 
201 	error = disable_nonboot_cpus();
202 	if (error || suspend_test(TEST_CPUS))
203 		goto Enable_cpus;
204 
205 	arch_suspend_disable_irqs();
206 	BUG_ON(!irqs_disabled());
207 
208 	error = syscore_suspend();
209 	if (!error) {
210 		*wakeup = pm_wakeup_pending();
211 		if (!(suspend_test(TEST_CORE) || *wakeup)) {
212 			error = suspend_ops->enter(state);
213 			events_check_enabled = false;
214 		}
215 		syscore_resume();
216 	}
217 
218 	arch_suspend_enable_irqs();
219 	BUG_ON(irqs_disabled());
220 
221  Enable_cpus:
222 	enable_nonboot_cpus();
223 
224  Platform_wake:
225 	if (need_suspend_ops(state) && suspend_ops->wake)
226 		suspend_ops->wake();
227 
228 	dpm_resume_start(PMSG_RESUME);
229 
230  Platform_finish:
231 	if (need_suspend_ops(state) && suspend_ops->finish)
232 		suspend_ops->finish();
233 
234 	return error;
235 }
236 
237 /**
238  * suspend_devices_and_enter - Suspend devices and enter system sleep state.
239  * @state: System sleep state to enter.
240  */
241 int suspend_devices_and_enter(suspend_state_t state)
242 {
243 	int error;
244 	bool wakeup = false;
245 
246 	if (need_suspend_ops(state) && !suspend_ops)
247 		return -ENOSYS;
248 
249 	trace_machine_suspend(state);
250 	if (need_suspend_ops(state) && suspend_ops->begin) {
251 		error = suspend_ops->begin(state);
252 		if (error)
253 			goto Close;
254 	}
255 	suspend_console();
256 	ftrace_stop();
257 	suspend_test_start();
258 	error = dpm_suspend_start(PMSG_SUSPEND);
259 	if (error) {
260 		printk(KERN_ERR "PM: Some devices failed to suspend\n");
261 		goto Recover_platform;
262 	}
263 	suspend_test_finish("suspend devices");
264 	if (suspend_test(TEST_DEVICES))
265 		goto Recover_platform;
266 
267 	do {
268 		error = suspend_enter(state, &wakeup);
269 	} while (!error && !wakeup && need_suspend_ops(state)
270 		&& suspend_ops->suspend_again && suspend_ops->suspend_again());
271 
272  Resume_devices:
273 	suspend_test_start();
274 	dpm_resume_end(PMSG_RESUME);
275 	suspend_test_finish("resume devices");
276 	ftrace_start();
277 	resume_console();
278  Close:
279 	if (need_suspend_ops(state) && suspend_ops->end)
280 		suspend_ops->end();
281 	trace_machine_suspend(PWR_EVENT_EXIT);
282 	return error;
283 
284  Recover_platform:
285 	if (need_suspend_ops(state) && suspend_ops->recover)
286 		suspend_ops->recover();
287 	goto Resume_devices;
288 }
289 
290 /**
291  * suspend_finish - Clean up before finishing the suspend sequence.
292  *
293  * Call platform code to clean up, restart processes, and free the console that
294  * we've allocated. This routine is not called for hibernation.
295  */
296 static void suspend_finish(void)
297 {
298 	suspend_thaw_processes();
299 	pm_notifier_call_chain(PM_POST_SUSPEND);
300 	pm_restore_console();
301 }
302 
303 /**
304  * enter_state - Do common work needed to enter system sleep state.
305  * @state: System sleep state to enter.
306  *
307  * Make sure that no one else is trying to put the system into a sleep state.
308  * Fail if that's not the case.  Otherwise, prepare for system suspend, make the
309  * system enter the given sleep state and clean up after wakeup.
310  */
311 static int enter_state(suspend_state_t state)
312 {
313 	int error;
314 
315 	if (!valid_state(state))
316 		return -ENODEV;
317 
318 	if (!mutex_trylock(&pm_mutex))
319 		return -EBUSY;
320 
321 	if (state == PM_SUSPEND_FREEZE)
322 		freeze_begin();
323 
324 	printk(KERN_INFO "PM: Syncing filesystems ... ");
325 	sys_sync();
326 	printk("done.\n");
327 
328 	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
329 	error = suspend_prepare(state);
330 	if (error)
331 		goto Unlock;
332 
333 	if (suspend_test(TEST_FREEZER))
334 		goto Finish;
335 
336 	pr_debug("PM: Entering %s sleep\n", pm_states[state]);
337 	pm_restrict_gfp_mask();
338 	error = suspend_devices_and_enter(state);
339 	pm_restore_gfp_mask();
340 
341  Finish:
342 	pr_debug("PM: Finishing wakeup.\n");
343 	suspend_finish();
344  Unlock:
345 	mutex_unlock(&pm_mutex);
346 	return error;
347 }
348 
349 /**
350  * pm_suspend - Externally visible function for suspending the system.
351  * @state: System sleep state to enter.
352  *
353  * Check if the value of @state represents one of the supported states,
354  * execute enter_state() and update system suspend statistics.
355  */
356 int pm_suspend(suspend_state_t state)
357 {
358 	int error;
359 
360 	if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
361 		return -EINVAL;
362 
363 	error = enter_state(state);
364 	if (error) {
365 		suspend_stats.fail++;
366 		dpm_save_failed_errno(error);
367 	} else {
368 		suspend_stats.success++;
369 	}
370 	return error;
371 }
372 EXPORT_SYMBOL(pm_suspend);
373