xref: /openbmc/linux/drivers/base/power/generic_ops.c (revision bc5aa3a0)
1 /*
2  * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/pm.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/export.h>
12 #include <linux/suspend.h>
13 
14 #ifdef CONFIG_PM
15 /**
16  * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
17  * @dev: Device to suspend.
18  *
19  * If PM operations are defined for the @dev's driver and they include
20  * ->runtime_suspend(), execute it and return its error code.  Otherwise,
21  * return 0.
22  */
23 int pm_generic_runtime_suspend(struct device *dev)
24 {
25 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
26 	int ret;
27 
28 	ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
29 
30 	return ret;
31 }
32 EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
33 
34 /**
35  * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
36  * @dev: Device to resume.
37  *
38  * If PM operations are defined for the @dev's driver and they include
39  * ->runtime_resume(), execute it and return its error code.  Otherwise,
40  * return 0.
41  */
42 int pm_generic_runtime_resume(struct device *dev)
43 {
44 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
45 	int ret;
46 
47 	ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
48 
49 	return ret;
50 }
51 EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
52 #endif /* CONFIG_PM */
53 
54 #ifdef CONFIG_PM_SLEEP
55 /**
56  * pm_generic_prepare - Generic routine preparing a device for power transition.
57  * @dev: Device to prepare.
58  *
59  * Prepare a device for a system-wide power transition.
60  */
61 int pm_generic_prepare(struct device *dev)
62 {
63 	struct device_driver *drv = dev->driver;
64 	int ret = 0;
65 
66 	if (drv && drv->pm && drv->pm->prepare)
67 		ret = drv->pm->prepare(dev);
68 
69 	return ret;
70 }
71 
72 /**
73  * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
74  * @dev: Device to suspend.
75  */
76 int pm_generic_suspend_noirq(struct device *dev)
77 {
78 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
79 
80 	return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
81 }
82 EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
83 
84 /**
85  * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
86  * @dev: Device to suspend.
87  */
88 int pm_generic_suspend_late(struct device *dev)
89 {
90 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
91 
92 	return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
93 }
94 EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
95 
96 /**
97  * pm_generic_suspend - Generic suspend callback for subsystems.
98  * @dev: Device to suspend.
99  */
100 int pm_generic_suspend(struct device *dev)
101 {
102 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
103 
104 	return pm && pm->suspend ? pm->suspend(dev) : 0;
105 }
106 EXPORT_SYMBOL_GPL(pm_generic_suspend);
107 
108 /**
109  * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
110  * @dev: Device to freeze.
111  */
112 int pm_generic_freeze_noirq(struct device *dev)
113 {
114 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
115 
116 	return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
117 }
118 EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
119 
120 /**
121  * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
122  * @dev: Device to freeze.
123  */
124 int pm_generic_freeze_late(struct device *dev)
125 {
126 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
127 
128 	return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
129 }
130 EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
131 
132 /**
133  * pm_generic_freeze - Generic freeze callback for subsystems.
134  * @dev: Device to freeze.
135  */
136 int pm_generic_freeze(struct device *dev)
137 {
138 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
139 
140 	return pm && pm->freeze ? pm->freeze(dev) : 0;
141 }
142 EXPORT_SYMBOL_GPL(pm_generic_freeze);
143 
144 /**
145  * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
146  * @dev: Device to handle.
147  */
148 int pm_generic_poweroff_noirq(struct device *dev)
149 {
150 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
151 
152 	return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
153 }
154 EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
155 
156 /**
157  * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
158  * @dev: Device to handle.
159  */
160 int pm_generic_poweroff_late(struct device *dev)
161 {
162 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
163 
164 	return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
165 }
166 EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
167 
168 /**
169  * pm_generic_poweroff - Generic poweroff callback for subsystems.
170  * @dev: Device to handle.
171  */
172 int pm_generic_poweroff(struct device *dev)
173 {
174 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
175 
176 	return pm && pm->poweroff ? pm->poweroff(dev) : 0;
177 }
178 EXPORT_SYMBOL_GPL(pm_generic_poweroff);
179 
180 /**
181  * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
182  * @dev: Device to thaw.
183  */
184 int pm_generic_thaw_noirq(struct device *dev)
185 {
186 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
187 
188 	return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
189 }
190 EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
191 
192 /**
193  * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
194  * @dev: Device to thaw.
195  */
196 int pm_generic_thaw_early(struct device *dev)
197 {
198 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
199 
200 	return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
201 }
202 EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
203 
204 /**
205  * pm_generic_thaw - Generic thaw callback for subsystems.
206  * @dev: Device to thaw.
207  */
208 int pm_generic_thaw(struct device *dev)
209 {
210 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
211 
212 	return pm && pm->thaw ? pm->thaw(dev) : 0;
213 }
214 EXPORT_SYMBOL_GPL(pm_generic_thaw);
215 
216 /**
217  * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
218  * @dev: Device to resume.
219  */
220 int pm_generic_resume_noirq(struct device *dev)
221 {
222 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
223 
224 	return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
225 }
226 EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
227 
228 /**
229  * pm_generic_resume_early - Generic resume_early callback for subsystems.
230  * @dev: Device to resume.
231  */
232 int pm_generic_resume_early(struct device *dev)
233 {
234 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
235 
236 	return pm && pm->resume_early ? pm->resume_early(dev) : 0;
237 }
238 EXPORT_SYMBOL_GPL(pm_generic_resume_early);
239 
240 /**
241  * pm_generic_resume - Generic resume callback for subsystems.
242  * @dev: Device to resume.
243  */
244 int pm_generic_resume(struct device *dev)
245 {
246 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
247 
248 	return pm && pm->resume ? pm->resume(dev) : 0;
249 }
250 EXPORT_SYMBOL_GPL(pm_generic_resume);
251 
252 /**
253  * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
254  * @dev: Device to restore.
255  */
256 int pm_generic_restore_noirq(struct device *dev)
257 {
258 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
259 
260 	return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
261 }
262 EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
263 
264 /**
265  * pm_generic_restore_early - Generic restore_early callback for subsystems.
266  * @dev: Device to resume.
267  */
268 int pm_generic_restore_early(struct device *dev)
269 {
270 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
271 
272 	return pm && pm->restore_early ? pm->restore_early(dev) : 0;
273 }
274 EXPORT_SYMBOL_GPL(pm_generic_restore_early);
275 
276 /**
277  * pm_generic_restore - Generic restore callback for subsystems.
278  * @dev: Device to restore.
279  */
280 int pm_generic_restore(struct device *dev)
281 {
282 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
283 
284 	return pm && pm->restore ? pm->restore(dev) : 0;
285 }
286 EXPORT_SYMBOL_GPL(pm_generic_restore);
287 
288 /**
289  * pm_generic_complete - Generic routine completing a device power transition.
290  * @dev: Device to handle.
291  *
292  * Complete a device power transition during a system-wide power transition.
293  */
294 void pm_generic_complete(struct device *dev)
295 {
296 	struct device_driver *drv = dev->driver;
297 
298 	if (drv && drv->pm && drv->pm->complete)
299 		drv->pm->complete(dev);
300 }
301 
302 /**
303  * pm_complete_with_resume_check - Complete a device power transition.
304  * @dev: Device to handle.
305  *
306  * Complete a device power transition during a system-wide power transition and
307  * optionally schedule a runtime resume of the device if the system resume in
308  * progress has been initated by the platform firmware and the device had its
309  * power.direct_complete flag set.
310  */
311 void pm_complete_with_resume_check(struct device *dev)
312 {
313 	pm_generic_complete(dev);
314 	/*
315 	 * If the device had been runtime-suspended before the system went into
316 	 * the sleep state it is going out of and it has never been resumed till
317 	 * now, resume it in case the firmware powered it up.
318 	 */
319 	if (dev->power.direct_complete && pm_resume_via_firmware())
320 		pm_request_resume(dev);
321 }
322 EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
323 #endif /* CONFIG_PM_SLEEP */
324