xref: /openbmc/linux/drivers/base/power/wakeup.c (revision fb8d6c8d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/wakeup.c - System wakeup events framework
4  *
5  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <trace/events/power.h>
19 
20 #include "power.h"
21 
22 #ifndef CONFIG_SUSPEND
23 suspend_state_t pm_suspend_target_state;
24 #define pm_suspend_target_state	(PM_SUSPEND_ON)
25 #endif
26 
27 /*
28  * If set, the suspend/hibernate code will abort transitions to a sleep state
29  * if wakeup events are registered during or immediately before the transition.
30  */
31 bool events_check_enabled __read_mostly;
32 
33 /* First wakeup IRQ seen by the kernel in the last cycle. */
34 unsigned int pm_wakeup_irq __read_mostly;
35 
36 /* If greater than 0 and the system is suspending, terminate the suspend. */
37 static atomic_t pm_abort_suspend __read_mostly;
38 
39 /*
40  * Combined counters of registered wakeup events and wakeup events in progress.
41  * They need to be modified together atomically, so it's better to use one
42  * atomic variable to hold them both.
43  */
44 static atomic_t combined_event_count = ATOMIC_INIT(0);
45 
46 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
47 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
48 
49 static void split_counters(unsigned int *cnt, unsigned int *inpr)
50 {
51 	unsigned int comb = atomic_read(&combined_event_count);
52 
53 	*cnt = (comb >> IN_PROGRESS_BITS);
54 	*inpr = comb & MAX_IN_PROGRESS;
55 }
56 
57 /* A preserved old value of the events counter. */
58 static unsigned int saved_count;
59 
60 static DEFINE_RAW_SPINLOCK(events_lock);
61 
62 static void pm_wakeup_timer_fn(struct timer_list *t);
63 
64 static LIST_HEAD(wakeup_sources);
65 
66 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
67 
68 DEFINE_STATIC_SRCU(wakeup_srcu);
69 
70 static struct wakeup_source deleted_ws = {
71 	.name = "deleted",
72 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
73 };
74 
75 static DEFINE_IDA(wakeup_ida);
76 
77 /**
78  * wakeup_source_create - Create a struct wakeup_source object.
79  * @name: Name of the new wakeup source.
80  */
81 struct wakeup_source *wakeup_source_create(const char *name)
82 {
83 	struct wakeup_source *ws;
84 	const char *ws_name;
85 	int id;
86 
87 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
88 	if (!ws)
89 		goto err_ws;
90 
91 	ws_name = kstrdup_const(name, GFP_KERNEL);
92 	if (!ws_name)
93 		goto err_name;
94 	ws->name = ws_name;
95 
96 	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
97 	if (id < 0)
98 		goto err_id;
99 	ws->id = id;
100 
101 	return ws;
102 
103 err_id:
104 	kfree_const(ws->name);
105 err_name:
106 	kfree(ws);
107 err_ws:
108 	return NULL;
109 }
110 EXPORT_SYMBOL_GPL(wakeup_source_create);
111 
112 /*
113  * Record wakeup_source statistics being deleted into a dummy wakeup_source.
114  */
115 static void wakeup_source_record(struct wakeup_source *ws)
116 {
117 	unsigned long flags;
118 
119 	spin_lock_irqsave(&deleted_ws.lock, flags);
120 
121 	if (ws->event_count) {
122 		deleted_ws.total_time =
123 			ktime_add(deleted_ws.total_time, ws->total_time);
124 		deleted_ws.prevent_sleep_time =
125 			ktime_add(deleted_ws.prevent_sleep_time,
126 				  ws->prevent_sleep_time);
127 		deleted_ws.max_time =
128 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
129 				deleted_ws.max_time : ws->max_time;
130 		deleted_ws.event_count += ws->event_count;
131 		deleted_ws.active_count += ws->active_count;
132 		deleted_ws.relax_count += ws->relax_count;
133 		deleted_ws.expire_count += ws->expire_count;
134 		deleted_ws.wakeup_count += ws->wakeup_count;
135 	}
136 
137 	spin_unlock_irqrestore(&deleted_ws.lock, flags);
138 }
139 
140 static void wakeup_source_free(struct wakeup_source *ws)
141 {
142 	ida_free(&wakeup_ida, ws->id);
143 	kfree_const(ws->name);
144 	kfree(ws);
145 }
146 
147 /**
148  * wakeup_source_destroy - Destroy a struct wakeup_source object.
149  * @ws: Wakeup source to destroy.
150  *
151  * Use only for wakeup source objects created with wakeup_source_create().
152  */
153 void wakeup_source_destroy(struct wakeup_source *ws)
154 {
155 	if (!ws)
156 		return;
157 
158 	__pm_relax(ws);
159 	wakeup_source_record(ws);
160 	wakeup_source_free(ws);
161 }
162 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
163 
164 /**
165  * wakeup_source_add - Add given object to the list of wakeup sources.
166  * @ws: Wakeup source object to add to the list.
167  */
168 void wakeup_source_add(struct wakeup_source *ws)
169 {
170 	unsigned long flags;
171 
172 	if (WARN_ON(!ws))
173 		return;
174 
175 	spin_lock_init(&ws->lock);
176 	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
177 	ws->active = false;
178 
179 	raw_spin_lock_irqsave(&events_lock, flags);
180 	list_add_rcu(&ws->entry, &wakeup_sources);
181 	raw_spin_unlock_irqrestore(&events_lock, flags);
182 }
183 EXPORT_SYMBOL_GPL(wakeup_source_add);
184 
185 /**
186  * wakeup_source_remove - Remove given object from the wakeup sources list.
187  * @ws: Wakeup source object to remove from the list.
188  */
189 void wakeup_source_remove(struct wakeup_source *ws)
190 {
191 	unsigned long flags;
192 
193 	if (WARN_ON(!ws))
194 		return;
195 
196 	raw_spin_lock_irqsave(&events_lock, flags);
197 	list_del_rcu(&ws->entry);
198 	raw_spin_unlock_irqrestore(&events_lock, flags);
199 	synchronize_srcu(&wakeup_srcu);
200 
201 	del_timer_sync(&ws->timer);
202 	/*
203 	 * Clear timer.function to make wakeup_source_not_registered() treat
204 	 * this wakeup source as not registered.
205 	 */
206 	ws->timer.function = NULL;
207 }
208 EXPORT_SYMBOL_GPL(wakeup_source_remove);
209 
210 /**
211  * wakeup_source_register - Create wakeup source and add it to the list.
212  * @dev: Device this wakeup source is associated with (or NULL if virtual).
213  * @name: Name of the wakeup source to register.
214  */
215 struct wakeup_source *wakeup_source_register(struct device *dev,
216 					     const char *name)
217 {
218 	struct wakeup_source *ws;
219 	int ret;
220 
221 	ws = wakeup_source_create(name);
222 	if (ws) {
223 		if (!dev || device_is_registered(dev)) {
224 			ret = wakeup_source_sysfs_add(dev, ws);
225 			if (ret) {
226 				wakeup_source_free(ws);
227 				return NULL;
228 			}
229 		}
230 		wakeup_source_add(ws);
231 	}
232 	return ws;
233 }
234 EXPORT_SYMBOL_GPL(wakeup_source_register);
235 
236 /**
237  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
238  * @ws: Wakeup source object to unregister.
239  */
240 void wakeup_source_unregister(struct wakeup_source *ws)
241 {
242 	if (ws) {
243 		wakeup_source_remove(ws);
244 		wakeup_source_sysfs_remove(ws);
245 		wakeup_source_destroy(ws);
246 	}
247 }
248 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
249 
250 /**
251  * device_wakeup_attach - Attach a wakeup source object to a device object.
252  * @dev: Device to handle.
253  * @ws: Wakeup source object to attach to @dev.
254  *
255  * This causes @dev to be treated as a wakeup device.
256  */
257 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
258 {
259 	spin_lock_irq(&dev->power.lock);
260 	if (dev->power.wakeup) {
261 		spin_unlock_irq(&dev->power.lock);
262 		return -EEXIST;
263 	}
264 	dev->power.wakeup = ws;
265 	if (dev->power.wakeirq)
266 		device_wakeup_attach_irq(dev, dev->power.wakeirq);
267 	spin_unlock_irq(&dev->power.lock);
268 	return 0;
269 }
270 
271 /**
272  * device_wakeup_enable - Enable given device to be a wakeup source.
273  * @dev: Device to handle.
274  *
275  * Create a wakeup source object, register it and attach it to @dev.
276  */
277 int device_wakeup_enable(struct device *dev)
278 {
279 	struct wakeup_source *ws;
280 	int ret;
281 
282 	if (!dev || !dev->power.can_wakeup)
283 		return -EINVAL;
284 
285 	if (pm_suspend_target_state != PM_SUSPEND_ON)
286 		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
287 
288 	ws = wakeup_source_register(dev, dev_name(dev));
289 	if (!ws)
290 		return -ENOMEM;
291 
292 	ret = device_wakeup_attach(dev, ws);
293 	if (ret)
294 		wakeup_source_unregister(ws);
295 
296 	return ret;
297 }
298 EXPORT_SYMBOL_GPL(device_wakeup_enable);
299 
300 /**
301  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
302  * @dev: Device to handle
303  * @wakeirq: Device specific wakeirq entry
304  *
305  * Attach a device wakeirq to the wakeup source so the device
306  * wake IRQ can be configured automatically for suspend and
307  * resume.
308  *
309  * Call under the device's power.lock lock.
310  */
311 void device_wakeup_attach_irq(struct device *dev,
312 			     struct wake_irq *wakeirq)
313 {
314 	struct wakeup_source *ws;
315 
316 	ws = dev->power.wakeup;
317 	if (!ws)
318 		return;
319 
320 	if (ws->wakeirq)
321 		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
322 
323 	ws->wakeirq = wakeirq;
324 }
325 
326 /**
327  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
328  * @dev: Device to handle
329  *
330  * Removes a device wakeirq from the wakeup source.
331  *
332  * Call under the device's power.lock lock.
333  */
334 void device_wakeup_detach_irq(struct device *dev)
335 {
336 	struct wakeup_source *ws;
337 
338 	ws = dev->power.wakeup;
339 	if (ws)
340 		ws->wakeirq = NULL;
341 }
342 
343 /**
344  * device_wakeup_arm_wake_irqs(void)
345  *
346  * Itereates over the list of device wakeirqs to arm them.
347  */
348 void device_wakeup_arm_wake_irqs(void)
349 {
350 	struct wakeup_source *ws;
351 	int srcuidx;
352 
353 	srcuidx = srcu_read_lock(&wakeup_srcu);
354 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
355 		dev_pm_arm_wake_irq(ws->wakeirq);
356 	srcu_read_unlock(&wakeup_srcu, srcuidx);
357 }
358 
359 /**
360  * device_wakeup_disarm_wake_irqs(void)
361  *
362  * Itereates over the list of device wakeirqs to disarm them.
363  */
364 void device_wakeup_disarm_wake_irqs(void)
365 {
366 	struct wakeup_source *ws;
367 	int srcuidx;
368 
369 	srcuidx = srcu_read_lock(&wakeup_srcu);
370 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
371 		dev_pm_disarm_wake_irq(ws->wakeirq);
372 	srcu_read_unlock(&wakeup_srcu, srcuidx);
373 }
374 
375 /**
376  * device_wakeup_detach - Detach a device's wakeup source object from it.
377  * @dev: Device to detach the wakeup source object from.
378  *
379  * After it returns, @dev will not be treated as a wakeup device any more.
380  */
381 static struct wakeup_source *device_wakeup_detach(struct device *dev)
382 {
383 	struct wakeup_source *ws;
384 
385 	spin_lock_irq(&dev->power.lock);
386 	ws = dev->power.wakeup;
387 	dev->power.wakeup = NULL;
388 	spin_unlock_irq(&dev->power.lock);
389 	return ws;
390 }
391 
392 /**
393  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
394  * @dev: Device to handle.
395  *
396  * Detach the @dev's wakeup source object from it, unregister this wakeup source
397  * object and destroy it.
398  */
399 int device_wakeup_disable(struct device *dev)
400 {
401 	struct wakeup_source *ws;
402 
403 	if (!dev || !dev->power.can_wakeup)
404 		return -EINVAL;
405 
406 	ws = device_wakeup_detach(dev);
407 	wakeup_source_unregister(ws);
408 	return 0;
409 }
410 EXPORT_SYMBOL_GPL(device_wakeup_disable);
411 
412 /**
413  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
414  * @dev: Device to handle.
415  * @capable: Whether or not @dev is capable of waking up the system from sleep.
416  *
417  * If @capable is set, set the @dev's power.can_wakeup flag and add its
418  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
419  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
420  *
421  * This function may sleep and it can't be called from any context where
422  * sleeping is not allowed.
423  */
424 void device_set_wakeup_capable(struct device *dev, bool capable)
425 {
426 	if (!!dev->power.can_wakeup == !!capable)
427 		return;
428 
429 	dev->power.can_wakeup = capable;
430 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
431 		if (capable) {
432 			int ret = wakeup_sysfs_add(dev);
433 
434 			if (ret)
435 				dev_info(dev, "Wakeup sysfs attributes not added\n");
436 		} else {
437 			wakeup_sysfs_remove(dev);
438 		}
439 	}
440 }
441 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
442 
443 /**
444  * device_init_wakeup - Device wakeup initialization.
445  * @dev: Device to handle.
446  * @enable: Whether or not to enable @dev as a wakeup device.
447  *
448  * By default, most devices should leave wakeup disabled.  The exceptions are
449  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
450  * possibly network interfaces, etc.  Also, devices that don't generate their
451  * own wakeup requests but merely forward requests from one bus to another
452  * (like PCI bridges) should have wakeup enabled by default.
453  */
454 int device_init_wakeup(struct device *dev, bool enable)
455 {
456 	int ret = 0;
457 
458 	if (!dev)
459 		return -EINVAL;
460 
461 	if (enable) {
462 		device_set_wakeup_capable(dev, true);
463 		ret = device_wakeup_enable(dev);
464 	} else {
465 		device_wakeup_disable(dev);
466 		device_set_wakeup_capable(dev, false);
467 	}
468 
469 	return ret;
470 }
471 EXPORT_SYMBOL_GPL(device_init_wakeup);
472 
473 /**
474  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
475  * @dev: Device to handle.
476  */
477 int device_set_wakeup_enable(struct device *dev, bool enable)
478 {
479 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
480 }
481 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
482 
483 /**
484  * wakeup_source_not_registered - validate the given wakeup source.
485  * @ws: Wakeup source to be validated.
486  */
487 static bool wakeup_source_not_registered(struct wakeup_source *ws)
488 {
489 	/*
490 	 * Use timer struct to check if the given source is initialized
491 	 * by wakeup_source_add.
492 	 */
493 	return ws->timer.function != pm_wakeup_timer_fn;
494 }
495 
496 /*
497  * The functions below use the observation that each wakeup event starts a
498  * period in which the system should not be suspended.  The moment this period
499  * will end depends on how the wakeup event is going to be processed after being
500  * detected and all of the possible cases can be divided into two distinct
501  * groups.
502  *
503  * First, a wakeup event may be detected by the same functional unit that will
504  * carry out the entire processing of it and possibly will pass it to user space
505  * for further processing.  In that case the functional unit that has detected
506  * the event may later "close" the "no suspend" period associated with it
507  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
508  * pm_relax(), balanced with each other, is supposed to be used in such
509  * situations.
510  *
511  * Second, a wakeup event may be detected by one functional unit and processed
512  * by another one.  In that case the unit that has detected it cannot really
513  * "close" the "no suspend" period associated with it, unless it knows in
514  * advance what's going to happen to the event during processing.  This
515  * knowledge, however, may not be available to it, so it can simply specify time
516  * to wait before the system can be suspended and pass it as the second
517  * argument of pm_wakeup_event().
518  *
519  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
520  * "no suspend" period will be ended either by the pm_relax(), or by the timer
521  * function executed when the timer expires, whichever comes first.
522  */
523 
524 /**
525  * wakup_source_activate - Mark given wakeup source as active.
526  * @ws: Wakeup source to handle.
527  *
528  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
529  * core of the event by incrementing the counter of of wakeup events being
530  * processed.
531  */
532 static void wakeup_source_activate(struct wakeup_source *ws)
533 {
534 	unsigned int cec;
535 
536 	if (WARN_ONCE(wakeup_source_not_registered(ws),
537 			"unregistered wakeup source\n"))
538 		return;
539 
540 	ws->active = true;
541 	ws->active_count++;
542 	ws->last_time = ktime_get();
543 	if (ws->autosleep_enabled)
544 		ws->start_prevent_time = ws->last_time;
545 
546 	/* Increment the counter of events in progress. */
547 	cec = atomic_inc_return(&combined_event_count);
548 
549 	trace_wakeup_source_activate(ws->name, cec);
550 }
551 
552 /**
553  * wakeup_source_report_event - Report wakeup event using the given source.
554  * @ws: Wakeup source to report the event for.
555  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
556  */
557 static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
558 {
559 	ws->event_count++;
560 	/* This is racy, but the counter is approximate anyway. */
561 	if (events_check_enabled)
562 		ws->wakeup_count++;
563 
564 	if (!ws->active)
565 		wakeup_source_activate(ws);
566 
567 	if (hard)
568 		pm_system_wakeup();
569 }
570 
571 /**
572  * __pm_stay_awake - Notify the PM core of a wakeup event.
573  * @ws: Wakeup source object associated with the source of the event.
574  *
575  * It is safe to call this function from interrupt context.
576  */
577 void __pm_stay_awake(struct wakeup_source *ws)
578 {
579 	unsigned long flags;
580 
581 	if (!ws)
582 		return;
583 
584 	spin_lock_irqsave(&ws->lock, flags);
585 
586 	wakeup_source_report_event(ws, false);
587 	del_timer(&ws->timer);
588 	ws->timer_expires = 0;
589 
590 	spin_unlock_irqrestore(&ws->lock, flags);
591 }
592 EXPORT_SYMBOL_GPL(__pm_stay_awake);
593 
594 /**
595  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
596  * @dev: Device the wakeup event is related to.
597  *
598  * Notify the PM core of a wakeup event (signaled by @dev) by calling
599  * __pm_stay_awake for the @dev's wakeup source object.
600  *
601  * Call this function after detecting of a wakeup event if pm_relax() is going
602  * to be called directly after processing the event (and possibly passing it to
603  * user space for further processing).
604  */
605 void pm_stay_awake(struct device *dev)
606 {
607 	unsigned long flags;
608 
609 	if (!dev)
610 		return;
611 
612 	spin_lock_irqsave(&dev->power.lock, flags);
613 	__pm_stay_awake(dev->power.wakeup);
614 	spin_unlock_irqrestore(&dev->power.lock, flags);
615 }
616 EXPORT_SYMBOL_GPL(pm_stay_awake);
617 
618 #ifdef CONFIG_PM_AUTOSLEEP
619 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
620 {
621 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
622 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
623 }
624 #else
625 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
626 					     ktime_t now) {}
627 #endif
628 
629 /**
630  * wakup_source_deactivate - Mark given wakeup source as inactive.
631  * @ws: Wakeup source to handle.
632  *
633  * Update the @ws' statistics and notify the PM core that the wakeup source has
634  * become inactive by decrementing the counter of wakeup events being processed
635  * and incrementing the counter of registered wakeup events.
636  */
637 static void wakeup_source_deactivate(struct wakeup_source *ws)
638 {
639 	unsigned int cnt, inpr, cec;
640 	ktime_t duration;
641 	ktime_t now;
642 
643 	ws->relax_count++;
644 	/*
645 	 * __pm_relax() may be called directly or from a timer function.
646 	 * If it is called directly right after the timer function has been
647 	 * started, but before the timer function calls __pm_relax(), it is
648 	 * possible that __pm_stay_awake() will be called in the meantime and
649 	 * will set ws->active.  Then, ws->active may be cleared immediately
650 	 * by the __pm_relax() called from the timer function, but in such a
651 	 * case ws->relax_count will be different from ws->active_count.
652 	 */
653 	if (ws->relax_count != ws->active_count) {
654 		ws->relax_count--;
655 		return;
656 	}
657 
658 	ws->active = false;
659 
660 	now = ktime_get();
661 	duration = ktime_sub(now, ws->last_time);
662 	ws->total_time = ktime_add(ws->total_time, duration);
663 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
664 		ws->max_time = duration;
665 
666 	ws->last_time = now;
667 	del_timer(&ws->timer);
668 	ws->timer_expires = 0;
669 
670 	if (ws->autosleep_enabled)
671 		update_prevent_sleep_time(ws, now);
672 
673 	/*
674 	 * Increment the counter of registered wakeup events and decrement the
675 	 * couter of wakeup events in progress simultaneously.
676 	 */
677 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
678 	trace_wakeup_source_deactivate(ws->name, cec);
679 
680 	split_counters(&cnt, &inpr);
681 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
682 		wake_up(&wakeup_count_wait_queue);
683 }
684 
685 /**
686  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
687  * @ws: Wakeup source object associated with the source of the event.
688  *
689  * Call this function for wakeup events whose processing started with calling
690  * __pm_stay_awake().
691  *
692  * It is safe to call it from interrupt context.
693  */
694 void __pm_relax(struct wakeup_source *ws)
695 {
696 	unsigned long flags;
697 
698 	if (!ws)
699 		return;
700 
701 	spin_lock_irqsave(&ws->lock, flags);
702 	if (ws->active)
703 		wakeup_source_deactivate(ws);
704 	spin_unlock_irqrestore(&ws->lock, flags);
705 }
706 EXPORT_SYMBOL_GPL(__pm_relax);
707 
708 /**
709  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
710  * @dev: Device that signaled the event.
711  *
712  * Execute __pm_relax() for the @dev's wakeup source object.
713  */
714 void pm_relax(struct device *dev)
715 {
716 	unsigned long flags;
717 
718 	if (!dev)
719 		return;
720 
721 	spin_lock_irqsave(&dev->power.lock, flags);
722 	__pm_relax(dev->power.wakeup);
723 	spin_unlock_irqrestore(&dev->power.lock, flags);
724 }
725 EXPORT_SYMBOL_GPL(pm_relax);
726 
727 /**
728  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
729  * @data: Address of the wakeup source object associated with the event source.
730  *
731  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
732  * in @data if it is currently active and its timer has not been canceled and
733  * the expiration time of the timer is not in future.
734  */
735 static void pm_wakeup_timer_fn(struct timer_list *t)
736 {
737 	struct wakeup_source *ws = from_timer(ws, t, timer);
738 	unsigned long flags;
739 
740 	spin_lock_irqsave(&ws->lock, flags);
741 
742 	if (ws->active && ws->timer_expires
743 	    && time_after_eq(jiffies, ws->timer_expires)) {
744 		wakeup_source_deactivate(ws);
745 		ws->expire_count++;
746 	}
747 
748 	spin_unlock_irqrestore(&ws->lock, flags);
749 }
750 
751 /**
752  * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
753  * @ws: Wakeup source object associated with the event source.
754  * @msec: Anticipated event processing time (in milliseconds).
755  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
756  *
757  * Notify the PM core of a wakeup event whose source is @ws that will take
758  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
759  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
760  * execute pm_wakeup_timer_fn() in future.
761  *
762  * It is safe to call this function from interrupt context.
763  */
764 void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
765 {
766 	unsigned long flags;
767 	unsigned long expires;
768 
769 	if (!ws)
770 		return;
771 
772 	spin_lock_irqsave(&ws->lock, flags);
773 
774 	wakeup_source_report_event(ws, hard);
775 
776 	if (!msec) {
777 		wakeup_source_deactivate(ws);
778 		goto unlock;
779 	}
780 
781 	expires = jiffies + msecs_to_jiffies(msec);
782 	if (!expires)
783 		expires = 1;
784 
785 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
786 		mod_timer(&ws->timer, expires);
787 		ws->timer_expires = expires;
788 	}
789 
790  unlock:
791 	spin_unlock_irqrestore(&ws->lock, flags);
792 }
793 EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
794 
795 /**
796  * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
797  * @dev: Device the wakeup event is related to.
798  * @msec: Anticipated event processing time (in milliseconds).
799  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
800  *
801  * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
802  */
803 void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
804 {
805 	unsigned long flags;
806 
807 	if (!dev)
808 		return;
809 
810 	spin_lock_irqsave(&dev->power.lock, flags);
811 	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
812 	spin_unlock_irqrestore(&dev->power.lock, flags);
813 }
814 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
815 
816 void pm_print_active_wakeup_sources(void)
817 {
818 	struct wakeup_source *ws;
819 	int srcuidx, active = 0;
820 	struct wakeup_source *last_activity_ws = NULL;
821 
822 	srcuidx = srcu_read_lock(&wakeup_srcu);
823 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
824 		if (ws->active) {
825 			pm_pr_dbg("active wakeup source: %s\n", ws->name);
826 			active = 1;
827 		} else if (!active &&
828 			   (!last_activity_ws ||
829 			    ktime_to_ns(ws->last_time) >
830 			    ktime_to_ns(last_activity_ws->last_time))) {
831 			last_activity_ws = ws;
832 		}
833 	}
834 
835 	if (!active && last_activity_ws)
836 		pm_pr_dbg("last active wakeup source: %s\n",
837 			last_activity_ws->name);
838 	srcu_read_unlock(&wakeup_srcu, srcuidx);
839 }
840 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
841 
842 /**
843  * pm_wakeup_pending - Check if power transition in progress should be aborted.
844  *
845  * Compare the current number of registered wakeup events with its preserved
846  * value from the past and return true if new wakeup events have been registered
847  * since the old value was stored.  Also return true if the current number of
848  * wakeup events being processed is different from zero.
849  */
850 bool pm_wakeup_pending(void)
851 {
852 	unsigned long flags;
853 	bool ret = false;
854 
855 	raw_spin_lock_irqsave(&events_lock, flags);
856 	if (events_check_enabled) {
857 		unsigned int cnt, inpr;
858 
859 		split_counters(&cnt, &inpr);
860 		ret = (cnt != saved_count || inpr > 0);
861 		events_check_enabled = !ret;
862 	}
863 	raw_spin_unlock_irqrestore(&events_lock, flags);
864 
865 	if (ret) {
866 		pm_pr_dbg("Wakeup pending, aborting suspend\n");
867 		pm_print_active_wakeup_sources();
868 	}
869 
870 	return ret || atomic_read(&pm_abort_suspend) > 0;
871 }
872 
873 void pm_system_wakeup(void)
874 {
875 	atomic_inc(&pm_abort_suspend);
876 	s2idle_wake();
877 }
878 EXPORT_SYMBOL_GPL(pm_system_wakeup);
879 
880 void pm_system_cancel_wakeup(void)
881 {
882 	atomic_dec_if_positive(&pm_abort_suspend);
883 }
884 
885 void pm_wakeup_clear(bool reset)
886 {
887 	pm_wakeup_irq = 0;
888 	if (reset)
889 		atomic_set(&pm_abort_suspend, 0);
890 }
891 
892 void pm_system_irq_wakeup(unsigned int irq_number)
893 {
894 	if (pm_wakeup_irq == 0) {
895 		pm_wakeup_irq = irq_number;
896 		pm_system_wakeup();
897 	}
898 }
899 
900 /**
901  * pm_get_wakeup_count - Read the number of registered wakeup events.
902  * @count: Address to store the value at.
903  * @block: Whether or not to block.
904  *
905  * Store the number of registered wakeup events at the address in @count.  If
906  * @block is set, block until the current number of wakeup events being
907  * processed is zero.
908  *
909  * Return 'false' if the current number of wakeup events being processed is
910  * nonzero.  Otherwise return 'true'.
911  */
912 bool pm_get_wakeup_count(unsigned int *count, bool block)
913 {
914 	unsigned int cnt, inpr;
915 
916 	if (block) {
917 		DEFINE_WAIT(wait);
918 
919 		for (;;) {
920 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
921 					TASK_INTERRUPTIBLE);
922 			split_counters(&cnt, &inpr);
923 			if (inpr == 0 || signal_pending(current))
924 				break;
925 			pm_print_active_wakeup_sources();
926 			schedule();
927 		}
928 		finish_wait(&wakeup_count_wait_queue, &wait);
929 	}
930 
931 	split_counters(&cnt, &inpr);
932 	*count = cnt;
933 	return !inpr;
934 }
935 
936 /**
937  * pm_save_wakeup_count - Save the current number of registered wakeup events.
938  * @count: Value to compare with the current number of registered wakeup events.
939  *
940  * If @count is equal to the current number of registered wakeup events and the
941  * current number of wakeup events being processed is zero, store @count as the
942  * old number of registered wakeup events for pm_check_wakeup_events(), enable
943  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
944  * detection and return 'false'.
945  */
946 bool pm_save_wakeup_count(unsigned int count)
947 {
948 	unsigned int cnt, inpr;
949 	unsigned long flags;
950 
951 	events_check_enabled = false;
952 	raw_spin_lock_irqsave(&events_lock, flags);
953 	split_counters(&cnt, &inpr);
954 	if (cnt == count && inpr == 0) {
955 		saved_count = count;
956 		events_check_enabled = true;
957 	}
958 	raw_spin_unlock_irqrestore(&events_lock, flags);
959 	return events_check_enabled;
960 }
961 
962 #ifdef CONFIG_PM_AUTOSLEEP
963 /**
964  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
965  * @enabled: Whether to set or to clear the autosleep_enabled flags.
966  */
967 void pm_wakep_autosleep_enabled(bool set)
968 {
969 	struct wakeup_source *ws;
970 	ktime_t now = ktime_get();
971 	int srcuidx;
972 
973 	srcuidx = srcu_read_lock(&wakeup_srcu);
974 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
975 		spin_lock_irq(&ws->lock);
976 		if (ws->autosleep_enabled != set) {
977 			ws->autosleep_enabled = set;
978 			if (ws->active) {
979 				if (set)
980 					ws->start_prevent_time = now;
981 				else
982 					update_prevent_sleep_time(ws, now);
983 			}
984 		}
985 		spin_unlock_irq(&ws->lock);
986 	}
987 	srcu_read_unlock(&wakeup_srcu, srcuidx);
988 }
989 #endif /* CONFIG_PM_AUTOSLEEP */
990 
991 /**
992  * print_wakeup_source_stats - Print wakeup source statistics information.
993  * @m: seq_file to print the statistics into.
994  * @ws: Wakeup source object to print the statistics for.
995  */
996 static int print_wakeup_source_stats(struct seq_file *m,
997 				     struct wakeup_source *ws)
998 {
999 	unsigned long flags;
1000 	ktime_t total_time;
1001 	ktime_t max_time;
1002 	unsigned long active_count;
1003 	ktime_t active_time;
1004 	ktime_t prevent_sleep_time;
1005 
1006 	spin_lock_irqsave(&ws->lock, flags);
1007 
1008 	total_time = ws->total_time;
1009 	max_time = ws->max_time;
1010 	prevent_sleep_time = ws->prevent_sleep_time;
1011 	active_count = ws->active_count;
1012 	if (ws->active) {
1013 		ktime_t now = ktime_get();
1014 
1015 		active_time = ktime_sub(now, ws->last_time);
1016 		total_time = ktime_add(total_time, active_time);
1017 		if (active_time > max_time)
1018 			max_time = active_time;
1019 
1020 		if (ws->autosleep_enabled)
1021 			prevent_sleep_time = ktime_add(prevent_sleep_time,
1022 				ktime_sub(now, ws->start_prevent_time));
1023 	} else {
1024 		active_time = 0;
1025 	}
1026 
1027 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1028 		   ws->name, active_count, ws->event_count,
1029 		   ws->wakeup_count, ws->expire_count,
1030 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1031 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1032 		   ktime_to_ms(prevent_sleep_time));
1033 
1034 	spin_unlock_irqrestore(&ws->lock, flags);
1035 
1036 	return 0;
1037 }
1038 
1039 static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1040 					loff_t *pos)
1041 {
1042 	struct wakeup_source *ws;
1043 	loff_t n = *pos;
1044 	int *srcuidx = m->private;
1045 
1046 	if (n == 0) {
1047 		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1048 			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1049 			"last_change\tprevent_suspend_time\n");
1050 	}
1051 
1052 	*srcuidx = srcu_read_lock(&wakeup_srcu);
1053 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
1054 		if (n-- <= 0)
1055 			return ws;
1056 	}
1057 
1058 	return NULL;
1059 }
1060 
1061 static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1062 					void *v, loff_t *pos)
1063 {
1064 	struct wakeup_source *ws = v;
1065 	struct wakeup_source *next_ws = NULL;
1066 
1067 	++(*pos);
1068 
1069 	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1070 		next_ws = ws;
1071 		break;
1072 	}
1073 
1074 	return next_ws;
1075 }
1076 
1077 static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1078 {
1079 	int *srcuidx = m->private;
1080 
1081 	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1082 }
1083 
1084 /**
1085  * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1086  * @m: seq_file to print the statistics into.
1087  * @v: wakeup_source of each iteration
1088  */
1089 static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1090 {
1091 	struct wakeup_source *ws = v;
1092 
1093 	print_wakeup_source_stats(m, ws);
1094 
1095 	return 0;
1096 }
1097 
1098 static const struct seq_operations wakeup_sources_stats_seq_ops = {
1099 	.start = wakeup_sources_stats_seq_start,
1100 	.next  = wakeup_sources_stats_seq_next,
1101 	.stop  = wakeup_sources_stats_seq_stop,
1102 	.show  = wakeup_sources_stats_seq_show,
1103 };
1104 
1105 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1106 {
1107 	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1108 }
1109 
1110 static const struct file_operations wakeup_sources_stats_fops = {
1111 	.owner = THIS_MODULE,
1112 	.open = wakeup_sources_stats_open,
1113 	.read = seq_read,
1114 	.llseek = seq_lseek,
1115 	.release = seq_release_private,
1116 };
1117 
1118 static int __init wakeup_sources_debugfs_init(void)
1119 {
1120 	debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
1121 			    &wakeup_sources_stats_fops);
1122 	return 0;
1123 }
1124 
1125 postcore_initcall(wakeup_sources_debugfs_init);
1126