xref: /openbmc/linux/drivers/base/power/generic_ops.c (revision c1d45424)
1 /*
2  * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/pm.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/export.h>
12 
13 #ifdef CONFIG_PM_RUNTIME
14 /**
15  * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
16  * @dev: Device to suspend.
17  *
18  * If PM operations are defined for the @dev's driver and they include
19  * ->runtime_suspend(), execute it and return its error code.  Otherwise,
20  * return 0.
21  */
22 int pm_generic_runtime_suspend(struct device *dev)
23 {
24 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
25 	int ret;
26 
27 	ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
28 
29 	return ret;
30 }
31 EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
32 
33 /**
34  * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
35  * @dev: Device to resume.
36  *
37  * If PM operations are defined for the @dev's driver and they include
38  * ->runtime_resume(), execute it and return its error code.  Otherwise,
39  * return 0.
40  */
41 int pm_generic_runtime_resume(struct device *dev)
42 {
43 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
44 	int ret;
45 
46 	ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
47 
48 	return ret;
49 }
50 EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
51 #endif /* CONFIG_PM_RUNTIME */
52 
53 #ifdef CONFIG_PM_SLEEP
54 /**
55  * pm_generic_prepare - Generic routine preparing a device for power transition.
56  * @dev: Device to prepare.
57  *
58  * Prepare a device for a system-wide power transition.
59  */
60 int pm_generic_prepare(struct device *dev)
61 {
62 	struct device_driver *drv = dev->driver;
63 	int ret = 0;
64 
65 	if (drv && drv->pm && drv->pm->prepare)
66 		ret = drv->pm->prepare(dev);
67 
68 	return ret;
69 }
70 
71 /**
72  * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
73  * @dev: Device to suspend.
74  */
75 int pm_generic_suspend_noirq(struct device *dev)
76 {
77 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
78 
79 	return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
80 }
81 EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
82 
83 /**
84  * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
85  * @dev: Device to suspend.
86  */
87 int pm_generic_suspend_late(struct device *dev)
88 {
89 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
90 
91 	return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
92 }
93 EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
94 
95 /**
96  * pm_generic_suspend - Generic suspend callback for subsystems.
97  * @dev: Device to suspend.
98  */
99 int pm_generic_suspend(struct device *dev)
100 {
101 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
102 
103 	return pm && pm->suspend ? pm->suspend(dev) : 0;
104 }
105 EXPORT_SYMBOL_GPL(pm_generic_suspend);
106 
107 /**
108  * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
109  * @dev: Device to freeze.
110  */
111 int pm_generic_freeze_noirq(struct device *dev)
112 {
113 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
114 
115 	return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
116 }
117 EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
118 
119 /**
120  * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
121  * @dev: Device to freeze.
122  */
123 int pm_generic_freeze_late(struct device *dev)
124 {
125 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
126 
127 	return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
128 }
129 EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
130 
131 /**
132  * pm_generic_freeze - Generic freeze callback for subsystems.
133  * @dev: Device to freeze.
134  */
135 int pm_generic_freeze(struct device *dev)
136 {
137 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
138 
139 	return pm && pm->freeze ? pm->freeze(dev) : 0;
140 }
141 EXPORT_SYMBOL_GPL(pm_generic_freeze);
142 
143 /**
144  * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
145  * @dev: Device to handle.
146  */
147 int pm_generic_poweroff_noirq(struct device *dev)
148 {
149 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
150 
151 	return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
152 }
153 EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
154 
155 /**
156  * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
157  * @dev: Device to handle.
158  */
159 int pm_generic_poweroff_late(struct device *dev)
160 {
161 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
162 
163 	return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
164 }
165 EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
166 
167 /**
168  * pm_generic_poweroff - Generic poweroff callback for subsystems.
169  * @dev: Device to handle.
170  */
171 int pm_generic_poweroff(struct device *dev)
172 {
173 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
174 
175 	return pm && pm->poweroff ? pm->poweroff(dev) : 0;
176 }
177 EXPORT_SYMBOL_GPL(pm_generic_poweroff);
178 
179 /**
180  * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
181  * @dev: Device to thaw.
182  */
183 int pm_generic_thaw_noirq(struct device *dev)
184 {
185 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
186 
187 	return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
188 }
189 EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
190 
191 /**
192  * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
193  * @dev: Device to thaw.
194  */
195 int pm_generic_thaw_early(struct device *dev)
196 {
197 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
198 
199 	return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
200 }
201 EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
202 
203 /**
204  * pm_generic_thaw - Generic thaw callback for subsystems.
205  * @dev: Device to thaw.
206  */
207 int pm_generic_thaw(struct device *dev)
208 {
209 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
210 
211 	return pm && pm->thaw ? pm->thaw(dev) : 0;
212 }
213 EXPORT_SYMBOL_GPL(pm_generic_thaw);
214 
215 /**
216  * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
217  * @dev: Device to resume.
218  */
219 int pm_generic_resume_noirq(struct device *dev)
220 {
221 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
222 
223 	return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
224 }
225 EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
226 
227 /**
228  * pm_generic_resume_early - Generic resume_early callback for subsystems.
229  * @dev: Device to resume.
230  */
231 int pm_generic_resume_early(struct device *dev)
232 {
233 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
234 
235 	return pm && pm->resume_early ? pm->resume_early(dev) : 0;
236 }
237 EXPORT_SYMBOL_GPL(pm_generic_resume_early);
238 
239 /**
240  * pm_generic_resume - Generic resume callback for subsystems.
241  * @dev: Device to resume.
242  */
243 int pm_generic_resume(struct device *dev)
244 {
245 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
246 
247 	return pm && pm->resume ? pm->resume(dev) : 0;
248 }
249 EXPORT_SYMBOL_GPL(pm_generic_resume);
250 
251 /**
252  * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
253  * @dev: Device to restore.
254  */
255 int pm_generic_restore_noirq(struct device *dev)
256 {
257 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
258 
259 	return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
260 }
261 EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
262 
263 /**
264  * pm_generic_restore_early - Generic restore_early callback for subsystems.
265  * @dev: Device to resume.
266  */
267 int pm_generic_restore_early(struct device *dev)
268 {
269 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
270 
271 	return pm && pm->restore_early ? pm->restore_early(dev) : 0;
272 }
273 EXPORT_SYMBOL_GPL(pm_generic_restore_early);
274 
275 /**
276  * pm_generic_restore - Generic restore callback for subsystems.
277  * @dev: Device to restore.
278  */
279 int pm_generic_restore(struct device *dev)
280 {
281 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
282 
283 	return pm && pm->restore ? pm->restore(dev) : 0;
284 }
285 EXPORT_SYMBOL_GPL(pm_generic_restore);
286 
287 /**
288  * pm_generic_complete - Generic routine competing a device power transition.
289  * @dev: Device to handle.
290  *
291  * Complete a device power transition during a system-wide power transition.
292  */
293 void pm_generic_complete(struct device *dev)
294 {
295 	struct device_driver *drv = dev->driver;
296 
297 	if (drv && drv->pm && drv->pm->complete)
298 		drv->pm->complete(dev);
299 
300 	/*
301 	 * Let runtime PM try to suspend devices that haven't been in use before
302 	 * going into the system-wide sleep state we're resuming from.
303 	 */
304 	pm_request_idle(dev);
305 }
306 #endif /* CONFIG_PM_SLEEP */
307