xref: /openbmc/linux/drivers/base/power/generic_ops.c (revision 63dc02bd)
1 /*
2  * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/pm.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/export.h>
12 
13 #ifdef CONFIG_PM_RUNTIME
14 /**
15  * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
16  * @dev: Device to handle.
17  *
18  * If PM operations are defined for the @dev's driver and they include
19  * ->runtime_idle(), execute it and return its error code, if nonzero.
20  * Otherwise, execute pm_runtime_suspend() for the device and return 0.
21  */
22 int pm_generic_runtime_idle(struct device *dev)
23 {
24 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
25 
26 	if (pm && pm->runtime_idle) {
27 		int ret = pm->runtime_idle(dev);
28 		if (ret)
29 			return ret;
30 	}
31 
32 	pm_runtime_suspend(dev);
33 	return 0;
34 }
35 EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
36 
37 /**
38  * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
39  * @dev: Device to suspend.
40  *
41  * If PM operations are defined for the @dev's driver and they include
42  * ->runtime_suspend(), execute it and return its error code.  Otherwise,
43  * return 0.
44  */
45 int pm_generic_runtime_suspend(struct device *dev)
46 {
47 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
48 	int ret;
49 
50 	ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
51 
52 	return ret;
53 }
54 EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
55 
56 /**
57  * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
58  * @dev: Device to resume.
59  *
60  * If PM operations are defined for the @dev's driver and they include
61  * ->runtime_resume(), execute it and return its error code.  Otherwise,
62  * return 0.
63  */
64 int pm_generic_runtime_resume(struct device *dev)
65 {
66 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
67 	int ret;
68 
69 	ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
70 
71 	return ret;
72 }
73 EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
74 #endif /* CONFIG_PM_RUNTIME */
75 
76 #ifdef CONFIG_PM_SLEEP
77 /**
78  * pm_generic_prepare - Generic routine preparing a device for power transition.
79  * @dev: Device to prepare.
80  *
81  * Prepare a device for a system-wide power transition.
82  */
83 int pm_generic_prepare(struct device *dev)
84 {
85 	struct device_driver *drv = dev->driver;
86 	int ret = 0;
87 
88 	if (drv && drv->pm && drv->pm->prepare)
89 		ret = drv->pm->prepare(dev);
90 
91 	return ret;
92 }
93 
94 /**
95  * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
96  * @dev: Device to suspend.
97  */
98 int pm_generic_suspend_noirq(struct device *dev)
99 {
100 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
101 
102 	return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
103 }
104 EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
105 
106 /**
107  * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
108  * @dev: Device to suspend.
109  */
110 int pm_generic_suspend_late(struct device *dev)
111 {
112 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
113 
114 	return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
115 }
116 EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
117 
118 /**
119  * pm_generic_suspend - Generic suspend callback for subsystems.
120  * @dev: Device to suspend.
121  */
122 int pm_generic_suspend(struct device *dev)
123 {
124 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
125 
126 	return pm && pm->suspend ? pm->suspend(dev) : 0;
127 }
128 EXPORT_SYMBOL_GPL(pm_generic_suspend);
129 
130 /**
131  * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
132  * @dev: Device to freeze.
133  */
134 int pm_generic_freeze_noirq(struct device *dev)
135 {
136 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
137 
138 	return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
139 }
140 EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
141 
142 /**
143  * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
144  * @dev: Device to freeze.
145  */
146 int pm_generic_freeze_late(struct device *dev)
147 {
148 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
149 
150 	return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
151 }
152 EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
153 
154 /**
155  * pm_generic_freeze - Generic freeze callback for subsystems.
156  * @dev: Device to freeze.
157  */
158 int pm_generic_freeze(struct device *dev)
159 {
160 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
161 
162 	return pm && pm->freeze ? pm->freeze(dev) : 0;
163 }
164 EXPORT_SYMBOL_GPL(pm_generic_freeze);
165 
166 /**
167  * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
168  * @dev: Device to handle.
169  */
170 int pm_generic_poweroff_noirq(struct device *dev)
171 {
172 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
173 
174 	return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
175 }
176 EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
177 
178 /**
179  * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
180  * @dev: Device to handle.
181  */
182 int pm_generic_poweroff_late(struct device *dev)
183 {
184 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
185 
186 	return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
187 }
188 EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
189 
190 /**
191  * pm_generic_poweroff - Generic poweroff callback for subsystems.
192  * @dev: Device to handle.
193  */
194 int pm_generic_poweroff(struct device *dev)
195 {
196 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
197 
198 	return pm && pm->poweroff ? pm->poweroff(dev) : 0;
199 }
200 EXPORT_SYMBOL_GPL(pm_generic_poweroff);
201 
202 /**
203  * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
204  * @dev: Device to thaw.
205  */
206 int pm_generic_thaw_noirq(struct device *dev)
207 {
208 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
209 
210 	return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
211 }
212 EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
213 
214 /**
215  * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
216  * @dev: Device to thaw.
217  */
218 int pm_generic_thaw_early(struct device *dev)
219 {
220 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
221 
222 	return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
223 }
224 EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
225 
226 /**
227  * pm_generic_thaw - Generic thaw callback for subsystems.
228  * @dev: Device to thaw.
229  */
230 int pm_generic_thaw(struct device *dev)
231 {
232 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
233 
234 	return pm && pm->thaw ? pm->thaw(dev) : 0;
235 }
236 EXPORT_SYMBOL_GPL(pm_generic_thaw);
237 
238 /**
239  * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
240  * @dev: Device to resume.
241  */
242 int pm_generic_resume_noirq(struct device *dev)
243 {
244 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
245 
246 	return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
247 }
248 EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
249 
250 /**
251  * pm_generic_resume_early - Generic resume_early callback for subsystems.
252  * @dev: Device to resume.
253  */
254 int pm_generic_resume_early(struct device *dev)
255 {
256 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
257 
258 	return pm && pm->resume_early ? pm->resume_early(dev) : 0;
259 }
260 EXPORT_SYMBOL_GPL(pm_generic_resume_early);
261 
262 /**
263  * pm_generic_resume - Generic resume callback for subsystems.
264  * @dev: Device to resume.
265  */
266 int pm_generic_resume(struct device *dev)
267 {
268 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
269 
270 	return pm && pm->resume ? pm->resume(dev) : 0;
271 }
272 EXPORT_SYMBOL_GPL(pm_generic_resume);
273 
274 /**
275  * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
276  * @dev: Device to restore.
277  */
278 int pm_generic_restore_noirq(struct device *dev)
279 {
280 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
281 
282 	return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
283 }
284 EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
285 
286 /**
287  * pm_generic_restore_early - Generic restore_early callback for subsystems.
288  * @dev: Device to resume.
289  */
290 int pm_generic_restore_early(struct device *dev)
291 {
292 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
293 
294 	return pm && pm->restore_early ? pm->restore_early(dev) : 0;
295 }
296 EXPORT_SYMBOL_GPL(pm_generic_restore_early);
297 
298 /**
299  * pm_generic_restore - Generic restore callback for subsystems.
300  * @dev: Device to restore.
301  */
302 int pm_generic_restore(struct device *dev)
303 {
304 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
305 
306 	return pm && pm->restore ? pm->restore(dev) : 0;
307 }
308 EXPORT_SYMBOL_GPL(pm_generic_restore);
309 
310 /**
311  * pm_generic_complete - Generic routine competing a device power transition.
312  * @dev: Device to handle.
313  *
314  * Complete a device power transition during a system-wide power transition.
315  */
316 void pm_generic_complete(struct device *dev)
317 {
318 	struct device_driver *drv = dev->driver;
319 
320 	if (drv && drv->pm && drv->pm->complete)
321 		drv->pm->complete(dev);
322 
323 	/*
324 	 * Let runtime PM try to suspend devices that haven't been in use before
325 	 * going into the system-wide sleep state we're resuming from.
326 	 */
327 	pm_runtime_idle(dev);
328 }
329 #endif /* CONFIG_PM_SLEEP */
330