xref: /openbmc/linux/drivers/base/power/wakeirq.c (revision 185c8f33)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Device wakeirq helper functions */
3 #include <linux/device.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq.h>
6 #include <linux/slab.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/pm_wakeirq.h>
9 
10 #include "power.h"
11 
12 /**
13  * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
14  * @dev: Device entry
15  * @wirq: Wake irq specific data
16  *
17  * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
18  */
19 static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
20 {
21 	unsigned long flags;
22 
23 	if (!dev || !wirq)
24 		return -EINVAL;
25 
26 	spin_lock_irqsave(&dev->power.lock, flags);
27 	if (dev_WARN_ONCE(dev, dev->power.wakeirq,
28 			  "wake irq already initialized\n")) {
29 		spin_unlock_irqrestore(&dev->power.lock, flags);
30 		return -EEXIST;
31 	}
32 
33 	dev->power.wakeirq = wirq;
34 	device_wakeup_attach_irq(dev, wirq);
35 
36 	spin_unlock_irqrestore(&dev->power.lock, flags);
37 	return 0;
38 }
39 
40 /**
41  * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
42  * @dev: Device entry
43  * @irq: Device IO interrupt
44  *
45  * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
46  * automatically configured for wake-up from suspend  based
47  * on the device specific sysfs wakeup entry. Typically called
48  * during driver probe after calling device_init_wakeup().
49  */
50 int dev_pm_set_wake_irq(struct device *dev, int irq)
51 {
52 	struct wake_irq *wirq;
53 	int err;
54 
55 	if (irq < 0)
56 		return -EINVAL;
57 
58 	wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
59 	if (!wirq)
60 		return -ENOMEM;
61 
62 	wirq->dev = dev;
63 	wirq->irq = irq;
64 
65 	err = dev_pm_attach_wake_irq(dev, wirq);
66 	if (err)
67 		kfree(wirq);
68 
69 	return err;
70 }
71 EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
72 
73 /**
74  * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
75  * @dev: Device entry
76  *
77  * Detach a device wake IRQ and free resources.
78  *
79  * Note that it's OK for drivers to call this without calling
80  * dev_pm_set_wake_irq() as all the driver instances may not have
81  * a wake IRQ configured. This avoid adding wake IRQ specific
82  * checks into the drivers.
83  */
84 void dev_pm_clear_wake_irq(struct device *dev)
85 {
86 	struct wake_irq *wirq = dev->power.wakeirq;
87 	unsigned long flags;
88 
89 	if (!wirq)
90 		return;
91 
92 	spin_lock_irqsave(&dev->power.lock, flags);
93 	device_wakeup_detach_irq(dev);
94 	dev->power.wakeirq = NULL;
95 	spin_unlock_irqrestore(&dev->power.lock, flags);
96 
97 	if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
98 		free_irq(wirq->irq, wirq);
99 		wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
100 	}
101 	kfree(wirq->name);
102 	kfree(wirq);
103 }
104 EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
105 
106 /**
107  * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
108  * @irq: Device specific dedicated wake-up interrupt
109  * @_wirq: Wake IRQ data
110  *
111  * Some devices have a separate wake-up interrupt in addition to the
112  * device IO interrupt. The wake-up interrupt signals that a device
113  * should be woken up from it's idle state. This handler uses device
114  * specific pm_runtime functions to wake the device, and then it's
115  * up to the device to do whatever it needs to. Note that as the
116  * device may need to restore context and start up regulators, we
117  * use a threaded IRQ.
118  *
119  * Also note that we are not resending the lost device interrupts.
120  * We assume that the wake-up interrupt just needs to wake-up the
121  * device, and then device's pm_runtime_resume() can deal with the
122  * situation.
123  */
124 static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
125 {
126 	struct wake_irq *wirq = _wirq;
127 	int res;
128 
129 	/* Maybe abort suspend? */
130 	if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
131 		pm_wakeup_event(wirq->dev, 0);
132 
133 		return IRQ_HANDLED;
134 	}
135 
136 	/* We don't want RPM_ASYNC or RPM_NOWAIT here */
137 	res = pm_runtime_resume(wirq->dev);
138 	if (res < 0)
139 		dev_warn(wirq->dev,
140 			 "wake IRQ with no resume: %i\n", res);
141 
142 	return IRQ_HANDLED;
143 }
144 
145 static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
146 {
147 	struct wake_irq *wirq;
148 	int err;
149 
150 	if (irq < 0)
151 		return -EINVAL;
152 
153 	wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
154 	if (!wirq)
155 		return -ENOMEM;
156 
157 	wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
158 	if (!wirq->name) {
159 		err = -ENOMEM;
160 		goto err_free;
161 	}
162 
163 	wirq->dev = dev;
164 	wirq->irq = irq;
165 
166 	/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
167 	irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
168 
169 	/*
170 	 * Consumer device may need to power up and restore state
171 	 * so we use a threaded irq.
172 	 */
173 	err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
174 				   IRQF_ONESHOT | IRQF_NO_AUTOEN,
175 				   wirq->name, wirq);
176 	if (err)
177 		goto err_free_name;
178 
179 	err = dev_pm_attach_wake_irq(dev, wirq);
180 	if (err)
181 		goto err_free_irq;
182 
183 	wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
184 
185 	return err;
186 
187 err_free_irq:
188 	free_irq(irq, wirq);
189 err_free_name:
190 	kfree(wirq->name);
191 err_free:
192 	kfree(wirq);
193 
194 	return err;
195 }
196 
197 /**
198  * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
199  * @dev: Device entry
200  * @irq: Device wake-up interrupt
201  *
202  * Unless your hardware has separate wake-up interrupts in addition
203  * to the device IO interrupts, you don't need this.
204  *
205  * Sets up a threaded interrupt handler for a device that has
206  * a dedicated wake-up interrupt in addition to the device IO
207  * interrupt.
208  */
209 int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
210 {
211 	return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
212 }
213 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
214 
215 /**
216  * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
217  *                                         with reverse enable ordering
218  * @dev: Device entry
219  * @irq: Device wake-up interrupt
220  *
221  * Unless your hardware has separate wake-up interrupts in addition
222  * to the device IO interrupts, you don't need this.
223  *
224  * Sets up a threaded interrupt handler for a device that has a dedicated
225  * wake-up interrupt in addition to the device IO interrupt. It sets
226  * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
227  * to enable dedicated wake-up interrupt after running the runtime suspend
228  * callback for @dev.
229  */
230 int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
231 {
232 	return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
233 }
234 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
235 
236 /**
237  * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
238  * @dev: Device
239  * @can_change_status: Can change wake-up interrupt status
240  *
241  * Enables wakeirq conditionally. We need to enable wake-up interrupt
242  * lazily on the first rpm_suspend(). This is needed as the consumer device
243  * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
244  * otherwise try to disable already disabled wakeirq. The wake-up interrupt
245  * starts disabled with IRQ_NOAUTOEN set.
246  *
247  * Should be only called from rpm_suspend() and rpm_resume() path.
248  * Caller must hold &dev->power.lock to change wirq->status
249  */
250 void dev_pm_enable_wake_irq_check(struct device *dev,
251 				  bool can_change_status)
252 {
253 	struct wake_irq *wirq = dev->power.wakeirq;
254 
255 	if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
256 		return;
257 
258 	if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
259 		goto enable;
260 	} else if (can_change_status) {
261 		wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
262 		goto enable;
263 	}
264 
265 	return;
266 
267 enable:
268 	if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
269 		enable_irq(wirq->irq);
270 		wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
271 	}
272 }
273 
274 /**
275  * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
276  * @dev: Device
277  * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
278  *
279  * Disables wake-up interrupt conditionally based on status.
280  * Should be only called from rpm_suspend() and rpm_resume() path.
281  */
282 void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
283 {
284 	struct wake_irq *wirq = dev->power.wakeirq;
285 
286 	if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
287 		return;
288 
289 	if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
290 		return;
291 
292 	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
293 		wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
294 		disable_irq_nosync(wirq->irq);
295 	}
296 }
297 
298 /**
299  * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
300  * @dev: Device using the wake IRQ
301  *
302  * Enable wake IRQ conditionally based on status, mainly used if want to
303  * enable wake IRQ after running ->runtime_suspend() which depends on
304  * WAKE_IRQ_DEDICATED_REVERSE.
305  *
306  * Should be only called from rpm_suspend() path.
307  */
308 void dev_pm_enable_wake_irq_complete(struct device *dev)
309 {
310 	struct wake_irq *wirq = dev->power.wakeirq;
311 
312 	if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
313 		return;
314 
315 	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
316 	    wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
317 		enable_irq(wirq->irq);
318 }
319 
320 /**
321  * dev_pm_arm_wake_irq - Arm device wake-up
322  * @wirq: Device wake-up interrupt
323  *
324  * Sets up the wake-up event conditionally based on the
325  * device_may_wake().
326  */
327 void dev_pm_arm_wake_irq(struct wake_irq *wirq)
328 {
329 	if (!wirq)
330 		return;
331 
332 	if (device_may_wakeup(wirq->dev)) {
333 		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
334 		    !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
335 			enable_irq(wirq->irq);
336 
337 		enable_irq_wake(wirq->irq);
338 	}
339 }
340 
341 /**
342  * dev_pm_disarm_wake_irq - Disarm device wake-up
343  * @wirq: Device wake-up interrupt
344  *
345  * Clears up the wake-up event conditionally based on the
346  * device_may_wake().
347  */
348 void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
349 {
350 	if (!wirq)
351 		return;
352 
353 	if (device_may_wakeup(wirq->dev)) {
354 		disable_irq_wake(wirq->irq);
355 
356 		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
357 		    !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
358 			disable_irq_nosync(wirq->irq);
359 	}
360 }
361