xref: /openbmc/linux/drivers/base/power/wakeirq.c (revision 9a29f5fc)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Device wakeirq helper functions */
3 #include <linux/device.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq.h>
6 #include <linux/slab.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/pm_wakeirq.h>
9 
10 #include "power.h"
11 
12 /**
13  * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
14  * @dev: Device entry
15  * @wirq: Wake irq specific data
16  *
17  * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
18  */
19 static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
20 {
21 	unsigned long flags;
22 
23 	if (!dev || !wirq)
24 		return -EINVAL;
25 
26 	spin_lock_irqsave(&dev->power.lock, flags);
27 	if (dev_WARN_ONCE(dev, dev->power.wakeirq,
28 			  "wake irq already initialized\n")) {
29 		spin_unlock_irqrestore(&dev->power.lock, flags);
30 		return -EEXIST;
31 	}
32 
33 	dev->power.wakeirq = wirq;
34 	device_wakeup_attach_irq(dev, wirq);
35 
36 	spin_unlock_irqrestore(&dev->power.lock, flags);
37 	return 0;
38 }
39 
40 /**
41  * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
42  * @dev: Device entry
43  * @irq: Device IO interrupt
44  *
45  * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
46  * automatically configured for wake-up from suspend  based
47  * on the device specific sysfs wakeup entry. Typically called
48  * during driver probe after calling device_init_wakeup().
49  */
50 int dev_pm_set_wake_irq(struct device *dev, int irq)
51 {
52 	struct wake_irq *wirq;
53 	int err;
54 
55 	if (irq < 0)
56 		return -EINVAL;
57 
58 	wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
59 	if (!wirq)
60 		return -ENOMEM;
61 
62 	wirq->dev = dev;
63 	wirq->irq = irq;
64 
65 	err = dev_pm_attach_wake_irq(dev, wirq);
66 	if (err)
67 		kfree(wirq);
68 
69 	return err;
70 }
71 EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
72 
73 /**
74  * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
75  * @dev: Device entry
76  *
77  * Detach a device wake IRQ and free resources.
78  *
79  * Note that it's OK for drivers to call this without calling
80  * dev_pm_set_wake_irq() as all the driver instances may not have
81  * a wake IRQ configured. This avoid adding wake IRQ specific
82  * checks into the drivers.
83  */
84 void dev_pm_clear_wake_irq(struct device *dev)
85 {
86 	struct wake_irq *wirq = dev->power.wakeirq;
87 	unsigned long flags;
88 
89 	if (!wirq)
90 		return;
91 
92 	spin_lock_irqsave(&dev->power.lock, flags);
93 	device_wakeup_detach_irq(dev);
94 	dev->power.wakeirq = NULL;
95 	spin_unlock_irqrestore(&dev->power.lock, flags);
96 
97 	if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
98 		free_irq(wirq->irq, wirq);
99 		wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
100 	}
101 	kfree(wirq->name);
102 	kfree(wirq);
103 }
104 EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
105 
106 /**
107  * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
108  * @irq: Device specific dedicated wake-up interrupt
109  * @_wirq: Wake IRQ data
110  *
111  * Some devices have a separate wake-up interrupt in addition to the
112  * device IO interrupt. The wake-up interrupt signals that a device
113  * should be woken up from it's idle state. This handler uses device
114  * specific pm_runtime functions to wake the device, and then it's
115  * up to the device to do whatever it needs to. Note that as the
116  * device may need to restore context and start up regulators, we
117  * use a threaded IRQ.
118  *
119  * Also note that we are not resending the lost device interrupts.
120  * We assume that the wake-up interrupt just needs to wake-up the
121  * device, and then device's pm_runtime_resume() can deal with the
122  * situation.
123  */
124 static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
125 {
126 	struct wake_irq *wirq = _wirq;
127 	int res;
128 
129 	/* Maybe abort suspend? */
130 	if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
131 		pm_wakeup_event(wirq->dev, 0);
132 
133 		return IRQ_HANDLED;
134 	}
135 
136 	/* We don't want RPM_ASYNC or RPM_NOWAIT here */
137 	res = pm_runtime_resume(wirq->dev);
138 	if (res < 0)
139 		dev_warn(wirq->dev,
140 			 "wake IRQ with no resume: %i\n", res);
141 
142 	return IRQ_HANDLED;
143 }
144 
145 static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
146 {
147 	struct wake_irq *wirq;
148 	int err;
149 
150 	if (irq < 0)
151 		return -EINVAL;
152 
153 	wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
154 	if (!wirq)
155 		return -ENOMEM;
156 
157 	wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
158 	if (!wirq->name) {
159 		err = -ENOMEM;
160 		goto err_free;
161 	}
162 
163 	wirq->dev = dev;
164 	wirq->irq = irq;
165 
166 	/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
167 	irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
168 
169 	/*
170 	 * Consumer device may need to power up and restore state
171 	 * so we use a threaded irq.
172 	 */
173 	err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
174 				   IRQF_ONESHOT | IRQF_NO_AUTOEN,
175 				   wirq->name, wirq);
176 	if (err)
177 		goto err_free_name;
178 
179 	err = dev_pm_attach_wake_irq(dev, wirq);
180 	if (err)
181 		goto err_free_irq;
182 
183 	wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
184 
185 	return err;
186 
187 err_free_irq:
188 	free_irq(irq, wirq);
189 err_free_name:
190 	kfree(wirq->name);
191 err_free:
192 	kfree(wirq);
193 
194 	return err;
195 }
196 
197 
198 /**
199  * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
200  * @dev: Device entry
201  * @irq: Device wake-up interrupt
202  *
203  * Unless your hardware has separate wake-up interrupts in addition
204  * to the device IO interrupts, you don't need this.
205  *
206  * Sets up a threaded interrupt handler for a device that has
207  * a dedicated wake-up interrupt in addition to the device IO
208  * interrupt.
209  *
210  * The interrupt starts disabled, and needs to be managed for
211  * the device by the bus code or the device driver using
212  * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
213  * functions.
214  */
215 int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
216 {
217 	return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
218 }
219 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
220 
221 /**
222  * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
223  *                                         with reverse enable ordering
224  * @dev: Device entry
225  * @irq: Device wake-up interrupt
226  *
227  * Unless your hardware has separate wake-up interrupts in addition
228  * to the device IO interrupts, you don't need this.
229  *
230  * Sets up a threaded interrupt handler for a device that has a dedicated
231  * wake-up interrupt in addition to the device IO interrupt. It sets
232  * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
233  * to enable dedicated wake-up interrupt after running the runtime suspend
234  * callback for @dev.
235  *
236  * The interrupt starts disabled, and needs to be managed for
237  * the device by the bus code or the device driver using
238  * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
239  * functions.
240  */
241 int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
242 {
243 	return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
244 }
245 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
246 
247 /**
248  * dev_pm_enable_wake_irq - Enable device wake-up interrupt
249  * @dev: Device
250  *
251  * Optionally called from the bus code or the device driver for
252  * runtime_resume() to override the PM runtime core managed wake-up
253  * interrupt handling to enable the wake-up interrupt.
254  *
255  * Note that for runtime_suspend()) the wake-up interrupts
256  * should be unconditionally enabled unlike for suspend()
257  * that is conditional.
258  */
259 void dev_pm_enable_wake_irq(struct device *dev)
260 {
261 	struct wake_irq *wirq = dev->power.wakeirq;
262 
263 	if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
264 		enable_irq(wirq->irq);
265 }
266 EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
267 
268 /**
269  * dev_pm_disable_wake_irq - Disable device wake-up interrupt
270  * @dev: Device
271  *
272  * Optionally called from the bus code or the device driver for
273  * runtime_suspend() to override the PM runtime core managed wake-up
274  * interrupt handling to disable the wake-up interrupt.
275  */
276 void dev_pm_disable_wake_irq(struct device *dev)
277 {
278 	struct wake_irq *wirq = dev->power.wakeirq;
279 
280 	if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
281 		disable_irq_nosync(wirq->irq);
282 }
283 EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
284 
285 /**
286  * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
287  * @dev: Device
288  * @can_change_status: Can change wake-up interrupt status
289  *
290  * Enables wakeirq conditionally. We need to enable wake-up interrupt
291  * lazily on the first rpm_suspend(). This is needed as the consumer device
292  * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
293  * otherwise try to disable already disabled wakeirq. The wake-up interrupt
294  * starts disabled with IRQ_NOAUTOEN set.
295  *
296  * Should be only called from rpm_suspend() and rpm_resume() path.
297  * Caller must hold &dev->power.lock to change wirq->status
298  */
299 void dev_pm_enable_wake_irq_check(struct device *dev,
300 				  bool can_change_status)
301 {
302 	struct wake_irq *wirq = dev->power.wakeirq;
303 
304 	if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
305 		return;
306 
307 	if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
308 		goto enable;
309 	} else if (can_change_status) {
310 		wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
311 		goto enable;
312 	}
313 
314 	return;
315 
316 enable:
317 	if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
318 		enable_irq(wirq->irq);
319 }
320 
321 /**
322  * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
323  * @dev: Device
324  * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
325  *
326  * Disables wake-up interrupt conditionally based on status.
327  * Should be only called from rpm_suspend() and rpm_resume() path.
328  */
329 void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
330 {
331 	struct wake_irq *wirq = dev->power.wakeirq;
332 
333 	if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
334 		return;
335 
336 	if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
337 		return;
338 
339 	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
340 		disable_irq_nosync(wirq->irq);
341 }
342 
343 /**
344  * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
345  * @dev: Device using the wake IRQ
346  *
347  * Enable wake IRQ conditionally based on status, mainly used if want to
348  * enable wake IRQ after running ->runtime_suspend() which depends on
349  * WAKE_IRQ_DEDICATED_REVERSE.
350  *
351  * Should be only called from rpm_suspend() path.
352  */
353 void dev_pm_enable_wake_irq_complete(struct device *dev)
354 {
355 	struct wake_irq *wirq = dev->power.wakeirq;
356 
357 	if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
358 		return;
359 
360 	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
361 	    wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
362 		enable_irq(wirq->irq);
363 }
364 
365 /**
366  * dev_pm_arm_wake_irq - Arm device wake-up
367  * @wirq: Device wake-up interrupt
368  *
369  * Sets up the wake-up event conditionally based on the
370  * device_may_wake().
371  */
372 void dev_pm_arm_wake_irq(struct wake_irq *wirq)
373 {
374 	if (!wirq)
375 		return;
376 
377 	if (device_may_wakeup(wirq->dev)) {
378 		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
379 		    !pm_runtime_status_suspended(wirq->dev))
380 			enable_irq(wirq->irq);
381 
382 		enable_irq_wake(wirq->irq);
383 	}
384 }
385 
386 /**
387  * dev_pm_disarm_wake_irq - Disarm device wake-up
388  * @wirq: Device wake-up interrupt
389  *
390  * Clears up the wake-up event conditionally based on the
391  * device_may_wake().
392  */
393 void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
394 {
395 	if (!wirq)
396 		return;
397 
398 	if (device_may_wakeup(wirq->dev)) {
399 		disable_irq_wake(wirq->irq);
400 
401 		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
402 		    !pm_runtime_status_suspended(wirq->dev))
403 			disable_irq_nosync(wirq->irq);
404 	}
405 }
406