xref: /openbmc/linux/drivers/base/power/wakeup.c (revision 3d3337de)
1 /*
2  * drivers/base/power/wakeup.c - System wakeup events framework
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <trace/events/power.h>
18 
19 #include "power.h"
20 
21 /*
22  * If set, the suspend/hibernate code will abort transitions to a sleep state
23  * if wakeup events are registered during or immediately before the transition.
24  */
25 bool events_check_enabled __read_mostly;
26 
27 /* If set and the system is suspending, terminate the suspend. */
28 static bool pm_abort_suspend __read_mostly;
29 
30 /*
31  * Combined counters of registered wakeup events and wakeup events in progress.
32  * They need to be modified together atomically, so it's better to use one
33  * atomic variable to hold them both.
34  */
35 static atomic_t combined_event_count = ATOMIC_INIT(0);
36 
37 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
38 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
39 
40 static void split_counters(unsigned int *cnt, unsigned int *inpr)
41 {
42 	unsigned int comb = atomic_read(&combined_event_count);
43 
44 	*cnt = (comb >> IN_PROGRESS_BITS);
45 	*inpr = comb & MAX_IN_PROGRESS;
46 }
47 
48 /* A preserved old value of the events counter. */
49 static unsigned int saved_count;
50 
51 static DEFINE_SPINLOCK(events_lock);
52 
53 static void pm_wakeup_timer_fn(unsigned long data);
54 
55 static LIST_HEAD(wakeup_sources);
56 
57 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
58 
59 /**
60  * wakeup_source_prepare - Prepare a new wakeup source for initialization.
61  * @ws: Wakeup source to prepare.
62  * @name: Pointer to the name of the new wakeup source.
63  *
64  * Callers must ensure that the @name string won't be freed when @ws is still in
65  * use.
66  */
67 void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
68 {
69 	if (ws) {
70 		memset(ws, 0, sizeof(*ws));
71 		ws->name = name;
72 	}
73 }
74 EXPORT_SYMBOL_GPL(wakeup_source_prepare);
75 
76 /**
77  * wakeup_source_create - Create a struct wakeup_source object.
78  * @name: Name of the new wakeup source.
79  */
80 struct wakeup_source *wakeup_source_create(const char *name)
81 {
82 	struct wakeup_source *ws;
83 
84 	ws = kmalloc(sizeof(*ws), GFP_KERNEL);
85 	if (!ws)
86 		return NULL;
87 
88 	wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
89 	return ws;
90 }
91 EXPORT_SYMBOL_GPL(wakeup_source_create);
92 
93 /**
94  * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
95  * @ws: Wakeup source to prepare for destruction.
96  *
97  * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
98  * be run in parallel with this function for the same wakeup source object.
99  */
100 void wakeup_source_drop(struct wakeup_source *ws)
101 {
102 	if (!ws)
103 		return;
104 
105 	del_timer_sync(&ws->timer);
106 	__pm_relax(ws);
107 }
108 EXPORT_SYMBOL_GPL(wakeup_source_drop);
109 
110 /**
111  * wakeup_source_destroy - Destroy a struct wakeup_source object.
112  * @ws: Wakeup source to destroy.
113  *
114  * Use only for wakeup source objects created with wakeup_source_create().
115  */
116 void wakeup_source_destroy(struct wakeup_source *ws)
117 {
118 	if (!ws)
119 		return;
120 
121 	wakeup_source_drop(ws);
122 	kfree(ws->name);
123 	kfree(ws);
124 }
125 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
126 
127 /**
128  * wakeup_source_add - Add given object to the list of wakeup sources.
129  * @ws: Wakeup source object to add to the list.
130  */
131 void wakeup_source_add(struct wakeup_source *ws)
132 {
133 	unsigned long flags;
134 
135 	if (WARN_ON(!ws))
136 		return;
137 
138 	spin_lock_init(&ws->lock);
139 	setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
140 	ws->active = false;
141 	ws->last_time = ktime_get();
142 
143 	spin_lock_irqsave(&events_lock, flags);
144 	list_add_rcu(&ws->entry, &wakeup_sources);
145 	spin_unlock_irqrestore(&events_lock, flags);
146 }
147 EXPORT_SYMBOL_GPL(wakeup_source_add);
148 
149 /**
150  * wakeup_source_remove - Remove given object from the wakeup sources list.
151  * @ws: Wakeup source object to remove from the list.
152  */
153 void wakeup_source_remove(struct wakeup_source *ws)
154 {
155 	unsigned long flags;
156 
157 	if (WARN_ON(!ws))
158 		return;
159 
160 	spin_lock_irqsave(&events_lock, flags);
161 	list_del_rcu(&ws->entry);
162 	spin_unlock_irqrestore(&events_lock, flags);
163 	synchronize_rcu();
164 }
165 EXPORT_SYMBOL_GPL(wakeup_source_remove);
166 
167 /**
168  * wakeup_source_register - Create wakeup source and add it to the list.
169  * @name: Name of the wakeup source to register.
170  */
171 struct wakeup_source *wakeup_source_register(const char *name)
172 {
173 	struct wakeup_source *ws;
174 
175 	ws = wakeup_source_create(name);
176 	if (ws)
177 		wakeup_source_add(ws);
178 
179 	return ws;
180 }
181 EXPORT_SYMBOL_GPL(wakeup_source_register);
182 
183 /**
184  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
185  * @ws: Wakeup source object to unregister.
186  */
187 void wakeup_source_unregister(struct wakeup_source *ws)
188 {
189 	if (ws) {
190 		wakeup_source_remove(ws);
191 		wakeup_source_destroy(ws);
192 	}
193 }
194 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
195 
196 /**
197  * device_wakeup_attach - Attach a wakeup source object to a device object.
198  * @dev: Device to handle.
199  * @ws: Wakeup source object to attach to @dev.
200  *
201  * This causes @dev to be treated as a wakeup device.
202  */
203 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
204 {
205 	spin_lock_irq(&dev->power.lock);
206 	if (dev->power.wakeup) {
207 		spin_unlock_irq(&dev->power.lock);
208 		return -EEXIST;
209 	}
210 	dev->power.wakeup = ws;
211 	spin_unlock_irq(&dev->power.lock);
212 	return 0;
213 }
214 
215 /**
216  * device_wakeup_enable - Enable given device to be a wakeup source.
217  * @dev: Device to handle.
218  *
219  * Create a wakeup source object, register it and attach it to @dev.
220  */
221 int device_wakeup_enable(struct device *dev)
222 {
223 	struct wakeup_source *ws;
224 	int ret;
225 
226 	if (!dev || !dev->power.can_wakeup)
227 		return -EINVAL;
228 
229 	ws = wakeup_source_register(dev_name(dev));
230 	if (!ws)
231 		return -ENOMEM;
232 
233 	ret = device_wakeup_attach(dev, ws);
234 	if (ret)
235 		wakeup_source_unregister(ws);
236 
237 	return ret;
238 }
239 EXPORT_SYMBOL_GPL(device_wakeup_enable);
240 
241 /**
242  * device_wakeup_detach - Detach a device's wakeup source object from it.
243  * @dev: Device to detach the wakeup source object from.
244  *
245  * After it returns, @dev will not be treated as a wakeup device any more.
246  */
247 static struct wakeup_source *device_wakeup_detach(struct device *dev)
248 {
249 	struct wakeup_source *ws;
250 
251 	spin_lock_irq(&dev->power.lock);
252 	ws = dev->power.wakeup;
253 	dev->power.wakeup = NULL;
254 	spin_unlock_irq(&dev->power.lock);
255 	return ws;
256 }
257 
258 /**
259  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
260  * @dev: Device to handle.
261  *
262  * Detach the @dev's wakeup source object from it, unregister this wakeup source
263  * object and destroy it.
264  */
265 int device_wakeup_disable(struct device *dev)
266 {
267 	struct wakeup_source *ws;
268 
269 	if (!dev || !dev->power.can_wakeup)
270 		return -EINVAL;
271 
272 	ws = device_wakeup_detach(dev);
273 	if (ws)
274 		wakeup_source_unregister(ws);
275 
276 	return 0;
277 }
278 EXPORT_SYMBOL_GPL(device_wakeup_disable);
279 
280 /**
281  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
282  * @dev: Device to handle.
283  * @capable: Whether or not @dev is capable of waking up the system from sleep.
284  *
285  * If @capable is set, set the @dev's power.can_wakeup flag and add its
286  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
287  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
288  *
289  * This function may sleep and it can't be called from any context where
290  * sleeping is not allowed.
291  */
292 void device_set_wakeup_capable(struct device *dev, bool capable)
293 {
294 	if (!!dev->power.can_wakeup == !!capable)
295 		return;
296 
297 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
298 		if (capable) {
299 			if (wakeup_sysfs_add(dev))
300 				return;
301 		} else {
302 			wakeup_sysfs_remove(dev);
303 		}
304 	}
305 	dev->power.can_wakeup = capable;
306 }
307 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
308 
309 /**
310  * device_init_wakeup - Device wakeup initialization.
311  * @dev: Device to handle.
312  * @enable: Whether or not to enable @dev as a wakeup device.
313  *
314  * By default, most devices should leave wakeup disabled.  The exceptions are
315  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
316  * possibly network interfaces, etc.  Also, devices that don't generate their
317  * own wakeup requests but merely forward requests from one bus to another
318  * (like PCI bridges) should have wakeup enabled by default.
319  */
320 int device_init_wakeup(struct device *dev, bool enable)
321 {
322 	int ret = 0;
323 
324 	if (!dev)
325 		return -EINVAL;
326 
327 	if (enable) {
328 		device_set_wakeup_capable(dev, true);
329 		ret = device_wakeup_enable(dev);
330 	} else {
331 		if (dev->power.can_wakeup)
332 			device_wakeup_disable(dev);
333 
334 		device_set_wakeup_capable(dev, false);
335 	}
336 
337 	return ret;
338 }
339 EXPORT_SYMBOL_GPL(device_init_wakeup);
340 
341 /**
342  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
343  * @dev: Device to handle.
344  */
345 int device_set_wakeup_enable(struct device *dev, bool enable)
346 {
347 	if (!dev || !dev->power.can_wakeup)
348 		return -EINVAL;
349 
350 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
351 }
352 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
353 
354 /*
355  * The functions below use the observation that each wakeup event starts a
356  * period in which the system should not be suspended.  The moment this period
357  * will end depends on how the wakeup event is going to be processed after being
358  * detected and all of the possible cases can be divided into two distinct
359  * groups.
360  *
361  * First, a wakeup event may be detected by the same functional unit that will
362  * carry out the entire processing of it and possibly will pass it to user space
363  * for further processing.  In that case the functional unit that has detected
364  * the event may later "close" the "no suspend" period associated with it
365  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
366  * pm_relax(), balanced with each other, is supposed to be used in such
367  * situations.
368  *
369  * Second, a wakeup event may be detected by one functional unit and processed
370  * by another one.  In that case the unit that has detected it cannot really
371  * "close" the "no suspend" period associated with it, unless it knows in
372  * advance what's going to happen to the event during processing.  This
373  * knowledge, however, may not be available to it, so it can simply specify time
374  * to wait before the system can be suspended and pass it as the second
375  * argument of pm_wakeup_event().
376  *
377  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
378  * "no suspend" period will be ended either by the pm_relax(), or by the timer
379  * function executed when the timer expires, whichever comes first.
380  */
381 
382 /**
383  * wakup_source_activate - Mark given wakeup source as active.
384  * @ws: Wakeup source to handle.
385  *
386  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
387  * core of the event by incrementing the counter of of wakeup events being
388  * processed.
389  */
390 static void wakeup_source_activate(struct wakeup_source *ws)
391 {
392 	unsigned int cec;
393 
394 	/*
395 	 * active wakeup source should bring the system
396 	 * out of PM_SUSPEND_FREEZE state
397 	 */
398 	freeze_wake();
399 
400 	ws->active = true;
401 	ws->active_count++;
402 	ws->last_time = ktime_get();
403 	if (ws->autosleep_enabled)
404 		ws->start_prevent_time = ws->last_time;
405 
406 	/* Increment the counter of events in progress. */
407 	cec = atomic_inc_return(&combined_event_count);
408 
409 	trace_wakeup_source_activate(ws->name, cec);
410 }
411 
412 /**
413  * wakeup_source_report_event - Report wakeup event using the given source.
414  * @ws: Wakeup source to report the event for.
415  */
416 static void wakeup_source_report_event(struct wakeup_source *ws)
417 {
418 	ws->event_count++;
419 	/* This is racy, but the counter is approximate anyway. */
420 	if (events_check_enabled)
421 		ws->wakeup_count++;
422 
423 	if (!ws->active)
424 		wakeup_source_activate(ws);
425 }
426 
427 /**
428  * __pm_stay_awake - Notify the PM core of a wakeup event.
429  * @ws: Wakeup source object associated with the source of the event.
430  *
431  * It is safe to call this function from interrupt context.
432  */
433 void __pm_stay_awake(struct wakeup_source *ws)
434 {
435 	unsigned long flags;
436 
437 	if (!ws)
438 		return;
439 
440 	spin_lock_irqsave(&ws->lock, flags);
441 
442 	wakeup_source_report_event(ws);
443 	del_timer(&ws->timer);
444 	ws->timer_expires = 0;
445 
446 	spin_unlock_irqrestore(&ws->lock, flags);
447 }
448 EXPORT_SYMBOL_GPL(__pm_stay_awake);
449 
450 /**
451  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
452  * @dev: Device the wakeup event is related to.
453  *
454  * Notify the PM core of a wakeup event (signaled by @dev) by calling
455  * __pm_stay_awake for the @dev's wakeup source object.
456  *
457  * Call this function after detecting of a wakeup event if pm_relax() is going
458  * to be called directly after processing the event (and possibly passing it to
459  * user space for further processing).
460  */
461 void pm_stay_awake(struct device *dev)
462 {
463 	unsigned long flags;
464 
465 	if (!dev)
466 		return;
467 
468 	spin_lock_irqsave(&dev->power.lock, flags);
469 	__pm_stay_awake(dev->power.wakeup);
470 	spin_unlock_irqrestore(&dev->power.lock, flags);
471 }
472 EXPORT_SYMBOL_GPL(pm_stay_awake);
473 
474 #ifdef CONFIG_PM_AUTOSLEEP
475 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
476 {
477 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
478 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
479 }
480 #else
481 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
482 					     ktime_t now) {}
483 #endif
484 
485 /**
486  * wakup_source_deactivate - Mark given wakeup source as inactive.
487  * @ws: Wakeup source to handle.
488  *
489  * Update the @ws' statistics and notify the PM core that the wakeup source has
490  * become inactive by decrementing the counter of wakeup events being processed
491  * and incrementing the counter of registered wakeup events.
492  */
493 static void wakeup_source_deactivate(struct wakeup_source *ws)
494 {
495 	unsigned int cnt, inpr, cec;
496 	ktime_t duration;
497 	ktime_t now;
498 
499 	ws->relax_count++;
500 	/*
501 	 * __pm_relax() may be called directly or from a timer function.
502 	 * If it is called directly right after the timer function has been
503 	 * started, but before the timer function calls __pm_relax(), it is
504 	 * possible that __pm_stay_awake() will be called in the meantime and
505 	 * will set ws->active.  Then, ws->active may be cleared immediately
506 	 * by the __pm_relax() called from the timer function, but in such a
507 	 * case ws->relax_count will be different from ws->active_count.
508 	 */
509 	if (ws->relax_count != ws->active_count) {
510 		ws->relax_count--;
511 		return;
512 	}
513 
514 	ws->active = false;
515 
516 	now = ktime_get();
517 	duration = ktime_sub(now, ws->last_time);
518 	ws->total_time = ktime_add(ws->total_time, duration);
519 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
520 		ws->max_time = duration;
521 
522 	ws->last_time = now;
523 	del_timer(&ws->timer);
524 	ws->timer_expires = 0;
525 
526 	if (ws->autosleep_enabled)
527 		update_prevent_sleep_time(ws, now);
528 
529 	/*
530 	 * Increment the counter of registered wakeup events and decrement the
531 	 * couter of wakeup events in progress simultaneously.
532 	 */
533 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
534 	trace_wakeup_source_deactivate(ws->name, cec);
535 
536 	split_counters(&cnt, &inpr);
537 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
538 		wake_up(&wakeup_count_wait_queue);
539 }
540 
541 /**
542  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
543  * @ws: Wakeup source object associated with the source of the event.
544  *
545  * Call this function for wakeup events whose processing started with calling
546  * __pm_stay_awake().
547  *
548  * It is safe to call it from interrupt context.
549  */
550 void __pm_relax(struct wakeup_source *ws)
551 {
552 	unsigned long flags;
553 
554 	if (!ws)
555 		return;
556 
557 	spin_lock_irqsave(&ws->lock, flags);
558 	if (ws->active)
559 		wakeup_source_deactivate(ws);
560 	spin_unlock_irqrestore(&ws->lock, flags);
561 }
562 EXPORT_SYMBOL_GPL(__pm_relax);
563 
564 /**
565  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
566  * @dev: Device that signaled the event.
567  *
568  * Execute __pm_relax() for the @dev's wakeup source object.
569  */
570 void pm_relax(struct device *dev)
571 {
572 	unsigned long flags;
573 
574 	if (!dev)
575 		return;
576 
577 	spin_lock_irqsave(&dev->power.lock, flags);
578 	__pm_relax(dev->power.wakeup);
579 	spin_unlock_irqrestore(&dev->power.lock, flags);
580 }
581 EXPORT_SYMBOL_GPL(pm_relax);
582 
583 /**
584  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
585  * @data: Address of the wakeup source object associated with the event source.
586  *
587  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
588  * in @data if it is currently active and its timer has not been canceled and
589  * the expiration time of the timer is not in future.
590  */
591 static void pm_wakeup_timer_fn(unsigned long data)
592 {
593 	struct wakeup_source *ws = (struct wakeup_source *)data;
594 	unsigned long flags;
595 
596 	spin_lock_irqsave(&ws->lock, flags);
597 
598 	if (ws->active && ws->timer_expires
599 	    && time_after_eq(jiffies, ws->timer_expires)) {
600 		wakeup_source_deactivate(ws);
601 		ws->expire_count++;
602 	}
603 
604 	spin_unlock_irqrestore(&ws->lock, flags);
605 }
606 
607 /**
608  * __pm_wakeup_event - Notify the PM core of a wakeup event.
609  * @ws: Wakeup source object associated with the event source.
610  * @msec: Anticipated event processing time (in milliseconds).
611  *
612  * Notify the PM core of a wakeup event whose source is @ws that will take
613  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
614  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
615  * execute pm_wakeup_timer_fn() in future.
616  *
617  * It is safe to call this function from interrupt context.
618  */
619 void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
620 {
621 	unsigned long flags;
622 	unsigned long expires;
623 
624 	if (!ws)
625 		return;
626 
627 	spin_lock_irqsave(&ws->lock, flags);
628 
629 	wakeup_source_report_event(ws);
630 
631 	if (!msec) {
632 		wakeup_source_deactivate(ws);
633 		goto unlock;
634 	}
635 
636 	expires = jiffies + msecs_to_jiffies(msec);
637 	if (!expires)
638 		expires = 1;
639 
640 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
641 		mod_timer(&ws->timer, expires);
642 		ws->timer_expires = expires;
643 	}
644 
645  unlock:
646 	spin_unlock_irqrestore(&ws->lock, flags);
647 }
648 EXPORT_SYMBOL_GPL(__pm_wakeup_event);
649 
650 
651 /**
652  * pm_wakeup_event - Notify the PM core of a wakeup event.
653  * @dev: Device the wakeup event is related to.
654  * @msec: Anticipated event processing time (in milliseconds).
655  *
656  * Call __pm_wakeup_event() for the @dev's wakeup source object.
657  */
658 void pm_wakeup_event(struct device *dev, unsigned int msec)
659 {
660 	unsigned long flags;
661 
662 	if (!dev)
663 		return;
664 
665 	spin_lock_irqsave(&dev->power.lock, flags);
666 	__pm_wakeup_event(dev->power.wakeup, msec);
667 	spin_unlock_irqrestore(&dev->power.lock, flags);
668 }
669 EXPORT_SYMBOL_GPL(pm_wakeup_event);
670 
671 void pm_print_active_wakeup_sources(void)
672 {
673 	struct wakeup_source *ws;
674 	int active = 0;
675 	struct wakeup_source *last_activity_ws = NULL;
676 
677 	rcu_read_lock();
678 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
679 		if (ws->active) {
680 			pr_info("active wakeup source: %s\n", ws->name);
681 			active = 1;
682 		} else if (!active &&
683 			   (!last_activity_ws ||
684 			    ktime_to_ns(ws->last_time) >
685 			    ktime_to_ns(last_activity_ws->last_time))) {
686 			last_activity_ws = ws;
687 		}
688 	}
689 
690 	if (!active && last_activity_ws)
691 		pr_info("last active wakeup source: %s\n",
692 			last_activity_ws->name);
693 	rcu_read_unlock();
694 }
695 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
696 
697 /**
698  * pm_wakeup_pending - Check if power transition in progress should be aborted.
699  *
700  * Compare the current number of registered wakeup events with its preserved
701  * value from the past and return true if new wakeup events have been registered
702  * since the old value was stored.  Also return true if the current number of
703  * wakeup events being processed is different from zero.
704  */
705 bool pm_wakeup_pending(void)
706 {
707 	unsigned long flags;
708 	bool ret = false;
709 
710 	spin_lock_irqsave(&events_lock, flags);
711 	if (events_check_enabled) {
712 		unsigned int cnt, inpr;
713 
714 		split_counters(&cnt, &inpr);
715 		ret = (cnt != saved_count || inpr > 0);
716 		events_check_enabled = !ret;
717 	}
718 	spin_unlock_irqrestore(&events_lock, flags);
719 
720 	if (ret) {
721 		pr_info("PM: Wakeup pending, aborting suspend\n");
722 		pm_print_active_wakeup_sources();
723 	}
724 
725 	return ret || pm_abort_suspend;
726 }
727 
728 void pm_system_wakeup(void)
729 {
730 	pm_abort_suspend = true;
731 	freeze_wake();
732 }
733 EXPORT_SYMBOL_GPL(pm_system_wakeup);
734 
735 void pm_wakeup_clear(void)
736 {
737 	pm_abort_suspend = false;
738 }
739 
740 /**
741  * pm_get_wakeup_count - Read the number of registered wakeup events.
742  * @count: Address to store the value at.
743  * @block: Whether or not to block.
744  *
745  * Store the number of registered wakeup events at the address in @count.  If
746  * @block is set, block until the current number of wakeup events being
747  * processed is zero.
748  *
749  * Return 'false' if the current number of wakeup events being processed is
750  * nonzero.  Otherwise return 'true'.
751  */
752 bool pm_get_wakeup_count(unsigned int *count, bool block)
753 {
754 	unsigned int cnt, inpr;
755 
756 	if (block) {
757 		DEFINE_WAIT(wait);
758 
759 		for (;;) {
760 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
761 					TASK_INTERRUPTIBLE);
762 			split_counters(&cnt, &inpr);
763 			if (inpr == 0 || signal_pending(current))
764 				break;
765 
766 			schedule();
767 		}
768 		finish_wait(&wakeup_count_wait_queue, &wait);
769 	}
770 
771 	split_counters(&cnt, &inpr);
772 	*count = cnt;
773 	return !inpr;
774 }
775 
776 /**
777  * pm_save_wakeup_count - Save the current number of registered wakeup events.
778  * @count: Value to compare with the current number of registered wakeup events.
779  *
780  * If @count is equal to the current number of registered wakeup events and the
781  * current number of wakeup events being processed is zero, store @count as the
782  * old number of registered wakeup events for pm_check_wakeup_events(), enable
783  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
784  * detection and return 'false'.
785  */
786 bool pm_save_wakeup_count(unsigned int count)
787 {
788 	unsigned int cnt, inpr;
789 	unsigned long flags;
790 
791 	events_check_enabled = false;
792 	spin_lock_irqsave(&events_lock, flags);
793 	split_counters(&cnt, &inpr);
794 	if (cnt == count && inpr == 0) {
795 		saved_count = count;
796 		events_check_enabled = true;
797 	}
798 	spin_unlock_irqrestore(&events_lock, flags);
799 	return events_check_enabled;
800 }
801 
802 #ifdef CONFIG_PM_AUTOSLEEP
803 /**
804  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
805  * @enabled: Whether to set or to clear the autosleep_enabled flags.
806  */
807 void pm_wakep_autosleep_enabled(bool set)
808 {
809 	struct wakeup_source *ws;
810 	ktime_t now = ktime_get();
811 
812 	rcu_read_lock();
813 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
814 		spin_lock_irq(&ws->lock);
815 		if (ws->autosleep_enabled != set) {
816 			ws->autosleep_enabled = set;
817 			if (ws->active) {
818 				if (set)
819 					ws->start_prevent_time = now;
820 				else
821 					update_prevent_sleep_time(ws, now);
822 			}
823 		}
824 		spin_unlock_irq(&ws->lock);
825 	}
826 	rcu_read_unlock();
827 }
828 #endif /* CONFIG_PM_AUTOSLEEP */
829 
830 static struct dentry *wakeup_sources_stats_dentry;
831 
832 /**
833  * print_wakeup_source_stats - Print wakeup source statistics information.
834  * @m: seq_file to print the statistics into.
835  * @ws: Wakeup source object to print the statistics for.
836  */
837 static int print_wakeup_source_stats(struct seq_file *m,
838 				     struct wakeup_source *ws)
839 {
840 	unsigned long flags;
841 	ktime_t total_time;
842 	ktime_t max_time;
843 	unsigned long active_count;
844 	ktime_t active_time;
845 	ktime_t prevent_sleep_time;
846 
847 	spin_lock_irqsave(&ws->lock, flags);
848 
849 	total_time = ws->total_time;
850 	max_time = ws->max_time;
851 	prevent_sleep_time = ws->prevent_sleep_time;
852 	active_count = ws->active_count;
853 	if (ws->active) {
854 		ktime_t now = ktime_get();
855 
856 		active_time = ktime_sub(now, ws->last_time);
857 		total_time = ktime_add(total_time, active_time);
858 		if (active_time.tv64 > max_time.tv64)
859 			max_time = active_time;
860 
861 		if (ws->autosleep_enabled)
862 			prevent_sleep_time = ktime_add(prevent_sleep_time,
863 				ktime_sub(now, ws->start_prevent_time));
864 	} else {
865 		active_time = ktime_set(0, 0);
866 	}
867 
868 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
869 		   ws->name, active_count, ws->event_count,
870 		   ws->wakeup_count, ws->expire_count,
871 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
872 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
873 		   ktime_to_ms(prevent_sleep_time));
874 
875 	spin_unlock_irqrestore(&ws->lock, flags);
876 
877 	return 0;
878 }
879 
880 /**
881  * wakeup_sources_stats_show - Print wakeup sources statistics information.
882  * @m: seq_file to print the statistics into.
883  */
884 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
885 {
886 	struct wakeup_source *ws;
887 
888 	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
889 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
890 		"last_change\tprevent_suspend_time\n");
891 
892 	rcu_read_lock();
893 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
894 		print_wakeup_source_stats(m, ws);
895 	rcu_read_unlock();
896 
897 	return 0;
898 }
899 
900 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
901 {
902 	return single_open(file, wakeup_sources_stats_show, NULL);
903 }
904 
905 static const struct file_operations wakeup_sources_stats_fops = {
906 	.owner = THIS_MODULE,
907 	.open = wakeup_sources_stats_open,
908 	.read = seq_read,
909 	.llseek = seq_lseek,
910 	.release = single_release,
911 };
912 
913 static int __init wakeup_sources_debugfs_init(void)
914 {
915 	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
916 			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
917 	return 0;
918 }
919 
920 postcore_initcall(wakeup_sources_debugfs_init);
921