xref: /openbmc/linux/drivers/base/power/wakeup.c (revision 861e10be)
1 /*
2  * drivers/base/power/wakeup.c - System wakeup events framework
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <trace/events/power.h>
18 
19 #include "power.h"
20 
21 /*
22  * If set, the suspend/hibernate code will abort transitions to a sleep state
23  * if wakeup events are registered during or immediately before the transition.
24  */
25 bool events_check_enabled __read_mostly;
26 
27 /*
28  * Combined counters of registered wakeup events and wakeup events in progress.
29  * They need to be modified together atomically, so it's better to use one
30  * atomic variable to hold them both.
31  */
32 static atomic_t combined_event_count = ATOMIC_INIT(0);
33 
34 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
35 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
36 
37 static void split_counters(unsigned int *cnt, unsigned int *inpr)
38 {
39 	unsigned int comb = atomic_read(&combined_event_count);
40 
41 	*cnt = (comb >> IN_PROGRESS_BITS);
42 	*inpr = comb & MAX_IN_PROGRESS;
43 }
44 
45 /* A preserved old value of the events counter. */
46 static unsigned int saved_count;
47 
48 static DEFINE_SPINLOCK(events_lock);
49 
50 static void pm_wakeup_timer_fn(unsigned long data);
51 
52 static LIST_HEAD(wakeup_sources);
53 
54 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
55 
56 /**
57  * wakeup_source_prepare - Prepare a new wakeup source for initialization.
58  * @ws: Wakeup source to prepare.
59  * @name: Pointer to the name of the new wakeup source.
60  *
61  * Callers must ensure that the @name string won't be freed when @ws is still in
62  * use.
63  */
64 void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
65 {
66 	if (ws) {
67 		memset(ws, 0, sizeof(*ws));
68 		ws->name = name;
69 	}
70 }
71 EXPORT_SYMBOL_GPL(wakeup_source_prepare);
72 
73 /**
74  * wakeup_source_create - Create a struct wakeup_source object.
75  * @name: Name of the new wakeup source.
76  */
77 struct wakeup_source *wakeup_source_create(const char *name)
78 {
79 	struct wakeup_source *ws;
80 
81 	ws = kmalloc(sizeof(*ws), GFP_KERNEL);
82 	if (!ws)
83 		return NULL;
84 
85 	wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
86 	return ws;
87 }
88 EXPORT_SYMBOL_GPL(wakeup_source_create);
89 
90 /**
91  * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
92  * @ws: Wakeup source to prepare for destruction.
93  *
94  * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
95  * be run in parallel with this function for the same wakeup source object.
96  */
97 void wakeup_source_drop(struct wakeup_source *ws)
98 {
99 	if (!ws)
100 		return;
101 
102 	del_timer_sync(&ws->timer);
103 	__pm_relax(ws);
104 }
105 EXPORT_SYMBOL_GPL(wakeup_source_drop);
106 
107 /**
108  * wakeup_source_destroy - Destroy a struct wakeup_source object.
109  * @ws: Wakeup source to destroy.
110  *
111  * Use only for wakeup source objects created with wakeup_source_create().
112  */
113 void wakeup_source_destroy(struct wakeup_source *ws)
114 {
115 	if (!ws)
116 		return;
117 
118 	wakeup_source_drop(ws);
119 	kfree(ws->name);
120 	kfree(ws);
121 }
122 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
123 
124 /**
125  * wakeup_source_add - Add given object to the list of wakeup sources.
126  * @ws: Wakeup source object to add to the list.
127  */
128 void wakeup_source_add(struct wakeup_source *ws)
129 {
130 	unsigned long flags;
131 
132 	if (WARN_ON(!ws))
133 		return;
134 
135 	spin_lock_init(&ws->lock);
136 	setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
137 	ws->active = false;
138 	ws->last_time = ktime_get();
139 
140 	spin_lock_irqsave(&events_lock, flags);
141 	list_add_rcu(&ws->entry, &wakeup_sources);
142 	spin_unlock_irqrestore(&events_lock, flags);
143 }
144 EXPORT_SYMBOL_GPL(wakeup_source_add);
145 
146 /**
147  * wakeup_source_remove - Remove given object from the wakeup sources list.
148  * @ws: Wakeup source object to remove from the list.
149  */
150 void wakeup_source_remove(struct wakeup_source *ws)
151 {
152 	unsigned long flags;
153 
154 	if (WARN_ON(!ws))
155 		return;
156 
157 	spin_lock_irqsave(&events_lock, flags);
158 	list_del_rcu(&ws->entry);
159 	spin_unlock_irqrestore(&events_lock, flags);
160 	synchronize_rcu();
161 }
162 EXPORT_SYMBOL_GPL(wakeup_source_remove);
163 
164 /**
165  * wakeup_source_register - Create wakeup source and add it to the list.
166  * @name: Name of the wakeup source to register.
167  */
168 struct wakeup_source *wakeup_source_register(const char *name)
169 {
170 	struct wakeup_source *ws;
171 
172 	ws = wakeup_source_create(name);
173 	if (ws)
174 		wakeup_source_add(ws);
175 
176 	return ws;
177 }
178 EXPORT_SYMBOL_GPL(wakeup_source_register);
179 
180 /**
181  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
182  * @ws: Wakeup source object to unregister.
183  */
184 void wakeup_source_unregister(struct wakeup_source *ws)
185 {
186 	if (ws) {
187 		wakeup_source_remove(ws);
188 		wakeup_source_destroy(ws);
189 	}
190 }
191 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
192 
193 /**
194  * device_wakeup_attach - Attach a wakeup source object to a device object.
195  * @dev: Device to handle.
196  * @ws: Wakeup source object to attach to @dev.
197  *
198  * This causes @dev to be treated as a wakeup device.
199  */
200 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
201 {
202 	spin_lock_irq(&dev->power.lock);
203 	if (dev->power.wakeup) {
204 		spin_unlock_irq(&dev->power.lock);
205 		return -EEXIST;
206 	}
207 	dev->power.wakeup = ws;
208 	spin_unlock_irq(&dev->power.lock);
209 	return 0;
210 }
211 
212 /**
213  * device_wakeup_enable - Enable given device to be a wakeup source.
214  * @dev: Device to handle.
215  *
216  * Create a wakeup source object, register it and attach it to @dev.
217  */
218 int device_wakeup_enable(struct device *dev)
219 {
220 	struct wakeup_source *ws;
221 	int ret;
222 
223 	if (!dev || !dev->power.can_wakeup)
224 		return -EINVAL;
225 
226 	ws = wakeup_source_register(dev_name(dev));
227 	if (!ws)
228 		return -ENOMEM;
229 
230 	ret = device_wakeup_attach(dev, ws);
231 	if (ret)
232 		wakeup_source_unregister(ws);
233 
234 	return ret;
235 }
236 EXPORT_SYMBOL_GPL(device_wakeup_enable);
237 
238 /**
239  * device_wakeup_detach - Detach a device's wakeup source object from it.
240  * @dev: Device to detach the wakeup source object from.
241  *
242  * After it returns, @dev will not be treated as a wakeup device any more.
243  */
244 static struct wakeup_source *device_wakeup_detach(struct device *dev)
245 {
246 	struct wakeup_source *ws;
247 
248 	spin_lock_irq(&dev->power.lock);
249 	ws = dev->power.wakeup;
250 	dev->power.wakeup = NULL;
251 	spin_unlock_irq(&dev->power.lock);
252 	return ws;
253 }
254 
255 /**
256  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
257  * @dev: Device to handle.
258  *
259  * Detach the @dev's wakeup source object from it, unregister this wakeup source
260  * object and destroy it.
261  */
262 int device_wakeup_disable(struct device *dev)
263 {
264 	struct wakeup_source *ws;
265 
266 	if (!dev || !dev->power.can_wakeup)
267 		return -EINVAL;
268 
269 	ws = device_wakeup_detach(dev);
270 	if (ws)
271 		wakeup_source_unregister(ws);
272 
273 	return 0;
274 }
275 EXPORT_SYMBOL_GPL(device_wakeup_disable);
276 
277 /**
278  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
279  * @dev: Device to handle.
280  * @capable: Whether or not @dev is capable of waking up the system from sleep.
281  *
282  * If @capable is set, set the @dev's power.can_wakeup flag and add its
283  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
284  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
285  *
286  * This function may sleep and it can't be called from any context where
287  * sleeping is not allowed.
288  */
289 void device_set_wakeup_capable(struct device *dev, bool capable)
290 {
291 	if (!!dev->power.can_wakeup == !!capable)
292 		return;
293 
294 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
295 		if (capable) {
296 			if (wakeup_sysfs_add(dev))
297 				return;
298 		} else {
299 			wakeup_sysfs_remove(dev);
300 		}
301 	}
302 	dev->power.can_wakeup = capable;
303 }
304 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
305 
306 /**
307  * device_init_wakeup - Device wakeup initialization.
308  * @dev: Device to handle.
309  * @enable: Whether or not to enable @dev as a wakeup device.
310  *
311  * By default, most devices should leave wakeup disabled.  The exceptions are
312  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
313  * possibly network interfaces, etc.  Also, devices that don't generate their
314  * own wakeup requests but merely forward requests from one bus to another
315  * (like PCI bridges) should have wakeup enabled by default.
316  */
317 int device_init_wakeup(struct device *dev, bool enable)
318 {
319 	int ret = 0;
320 
321 	if (enable) {
322 		device_set_wakeup_capable(dev, true);
323 		ret = device_wakeup_enable(dev);
324 	} else {
325 		device_set_wakeup_capable(dev, false);
326 	}
327 
328 	return ret;
329 }
330 EXPORT_SYMBOL_GPL(device_init_wakeup);
331 
332 /**
333  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
334  * @dev: Device to handle.
335  */
336 int device_set_wakeup_enable(struct device *dev, bool enable)
337 {
338 	if (!dev || !dev->power.can_wakeup)
339 		return -EINVAL;
340 
341 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
342 }
343 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
344 
345 /*
346  * The functions below use the observation that each wakeup event starts a
347  * period in which the system should not be suspended.  The moment this period
348  * will end depends on how the wakeup event is going to be processed after being
349  * detected and all of the possible cases can be divided into two distinct
350  * groups.
351  *
352  * First, a wakeup event may be detected by the same functional unit that will
353  * carry out the entire processing of it and possibly will pass it to user space
354  * for further processing.  In that case the functional unit that has detected
355  * the event may later "close" the "no suspend" period associated with it
356  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
357  * pm_relax(), balanced with each other, is supposed to be used in such
358  * situations.
359  *
360  * Second, a wakeup event may be detected by one functional unit and processed
361  * by another one.  In that case the unit that has detected it cannot really
362  * "close" the "no suspend" period associated with it, unless it knows in
363  * advance what's going to happen to the event during processing.  This
364  * knowledge, however, may not be available to it, so it can simply specify time
365  * to wait before the system can be suspended and pass it as the second
366  * argument of pm_wakeup_event().
367  *
368  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
369  * "no suspend" period will be ended either by the pm_relax(), or by the timer
370  * function executed when the timer expires, whichever comes first.
371  */
372 
373 /**
374  * wakup_source_activate - Mark given wakeup source as active.
375  * @ws: Wakeup source to handle.
376  *
377  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
378  * core of the event by incrementing the counter of of wakeup events being
379  * processed.
380  */
381 static void wakeup_source_activate(struct wakeup_source *ws)
382 {
383 	unsigned int cec;
384 
385 	ws->active = true;
386 	ws->active_count++;
387 	ws->last_time = ktime_get();
388 	if (ws->autosleep_enabled)
389 		ws->start_prevent_time = ws->last_time;
390 
391 	/* Increment the counter of events in progress. */
392 	cec = atomic_inc_return(&combined_event_count);
393 
394 	trace_wakeup_source_activate(ws->name, cec);
395 }
396 
397 /**
398  * wakeup_source_report_event - Report wakeup event using the given source.
399  * @ws: Wakeup source to report the event for.
400  */
401 static void wakeup_source_report_event(struct wakeup_source *ws)
402 {
403 	ws->event_count++;
404 	/* This is racy, but the counter is approximate anyway. */
405 	if (events_check_enabled)
406 		ws->wakeup_count++;
407 
408 	if (!ws->active)
409 		wakeup_source_activate(ws);
410 }
411 
412 /**
413  * __pm_stay_awake - Notify the PM core of a wakeup event.
414  * @ws: Wakeup source object associated with the source of the event.
415  *
416  * It is safe to call this function from interrupt context.
417  */
418 void __pm_stay_awake(struct wakeup_source *ws)
419 {
420 	unsigned long flags;
421 
422 	if (!ws)
423 		return;
424 
425 	spin_lock_irqsave(&ws->lock, flags);
426 
427 	wakeup_source_report_event(ws);
428 	del_timer(&ws->timer);
429 	ws->timer_expires = 0;
430 
431 	spin_unlock_irqrestore(&ws->lock, flags);
432 }
433 EXPORT_SYMBOL_GPL(__pm_stay_awake);
434 
435 /**
436  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
437  * @dev: Device the wakeup event is related to.
438  *
439  * Notify the PM core of a wakeup event (signaled by @dev) by calling
440  * __pm_stay_awake for the @dev's wakeup source object.
441  *
442  * Call this function after detecting of a wakeup event if pm_relax() is going
443  * to be called directly after processing the event (and possibly passing it to
444  * user space for further processing).
445  */
446 void pm_stay_awake(struct device *dev)
447 {
448 	unsigned long flags;
449 
450 	if (!dev)
451 		return;
452 
453 	spin_lock_irqsave(&dev->power.lock, flags);
454 	__pm_stay_awake(dev->power.wakeup);
455 	spin_unlock_irqrestore(&dev->power.lock, flags);
456 }
457 EXPORT_SYMBOL_GPL(pm_stay_awake);
458 
459 #ifdef CONFIG_PM_AUTOSLEEP
460 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
461 {
462 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
463 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
464 }
465 #else
466 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
467 					     ktime_t now) {}
468 #endif
469 
470 /**
471  * wakup_source_deactivate - Mark given wakeup source as inactive.
472  * @ws: Wakeup source to handle.
473  *
474  * Update the @ws' statistics and notify the PM core that the wakeup source has
475  * become inactive by decrementing the counter of wakeup events being processed
476  * and incrementing the counter of registered wakeup events.
477  */
478 static void wakeup_source_deactivate(struct wakeup_source *ws)
479 {
480 	unsigned int cnt, inpr, cec;
481 	ktime_t duration;
482 	ktime_t now;
483 
484 	ws->relax_count++;
485 	/*
486 	 * __pm_relax() may be called directly or from a timer function.
487 	 * If it is called directly right after the timer function has been
488 	 * started, but before the timer function calls __pm_relax(), it is
489 	 * possible that __pm_stay_awake() will be called in the meantime and
490 	 * will set ws->active.  Then, ws->active may be cleared immediately
491 	 * by the __pm_relax() called from the timer function, but in such a
492 	 * case ws->relax_count will be different from ws->active_count.
493 	 */
494 	if (ws->relax_count != ws->active_count) {
495 		ws->relax_count--;
496 		return;
497 	}
498 
499 	ws->active = false;
500 
501 	now = ktime_get();
502 	duration = ktime_sub(now, ws->last_time);
503 	ws->total_time = ktime_add(ws->total_time, duration);
504 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
505 		ws->max_time = duration;
506 
507 	ws->last_time = now;
508 	del_timer(&ws->timer);
509 	ws->timer_expires = 0;
510 
511 	if (ws->autosleep_enabled)
512 		update_prevent_sleep_time(ws, now);
513 
514 	/*
515 	 * Increment the counter of registered wakeup events and decrement the
516 	 * couter of wakeup events in progress simultaneously.
517 	 */
518 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
519 	trace_wakeup_source_deactivate(ws->name, cec);
520 
521 	split_counters(&cnt, &inpr);
522 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
523 		wake_up(&wakeup_count_wait_queue);
524 }
525 
526 /**
527  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
528  * @ws: Wakeup source object associated with the source of the event.
529  *
530  * Call this function for wakeup events whose processing started with calling
531  * __pm_stay_awake().
532  *
533  * It is safe to call it from interrupt context.
534  */
535 void __pm_relax(struct wakeup_source *ws)
536 {
537 	unsigned long flags;
538 
539 	if (!ws)
540 		return;
541 
542 	spin_lock_irqsave(&ws->lock, flags);
543 	if (ws->active)
544 		wakeup_source_deactivate(ws);
545 	spin_unlock_irqrestore(&ws->lock, flags);
546 }
547 EXPORT_SYMBOL_GPL(__pm_relax);
548 
549 /**
550  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
551  * @dev: Device that signaled the event.
552  *
553  * Execute __pm_relax() for the @dev's wakeup source object.
554  */
555 void pm_relax(struct device *dev)
556 {
557 	unsigned long flags;
558 
559 	if (!dev)
560 		return;
561 
562 	spin_lock_irqsave(&dev->power.lock, flags);
563 	__pm_relax(dev->power.wakeup);
564 	spin_unlock_irqrestore(&dev->power.lock, flags);
565 }
566 EXPORT_SYMBOL_GPL(pm_relax);
567 
568 /**
569  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
570  * @data: Address of the wakeup source object associated with the event source.
571  *
572  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
573  * in @data if it is currently active and its timer has not been canceled and
574  * the expiration time of the timer is not in future.
575  */
576 static void pm_wakeup_timer_fn(unsigned long data)
577 {
578 	struct wakeup_source *ws = (struct wakeup_source *)data;
579 	unsigned long flags;
580 
581 	spin_lock_irqsave(&ws->lock, flags);
582 
583 	if (ws->active && ws->timer_expires
584 	    && time_after_eq(jiffies, ws->timer_expires)) {
585 		wakeup_source_deactivate(ws);
586 		ws->expire_count++;
587 	}
588 
589 	spin_unlock_irqrestore(&ws->lock, flags);
590 }
591 
592 /**
593  * __pm_wakeup_event - Notify the PM core of a wakeup event.
594  * @ws: Wakeup source object associated with the event source.
595  * @msec: Anticipated event processing time (in milliseconds).
596  *
597  * Notify the PM core of a wakeup event whose source is @ws that will take
598  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
599  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
600  * execute pm_wakeup_timer_fn() in future.
601  *
602  * It is safe to call this function from interrupt context.
603  */
604 void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
605 {
606 	unsigned long flags;
607 	unsigned long expires;
608 
609 	if (!ws)
610 		return;
611 
612 	spin_lock_irqsave(&ws->lock, flags);
613 
614 	wakeup_source_report_event(ws);
615 
616 	if (!msec) {
617 		wakeup_source_deactivate(ws);
618 		goto unlock;
619 	}
620 
621 	expires = jiffies + msecs_to_jiffies(msec);
622 	if (!expires)
623 		expires = 1;
624 
625 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
626 		mod_timer(&ws->timer, expires);
627 		ws->timer_expires = expires;
628 	}
629 
630  unlock:
631 	spin_unlock_irqrestore(&ws->lock, flags);
632 }
633 EXPORT_SYMBOL_GPL(__pm_wakeup_event);
634 
635 
636 /**
637  * pm_wakeup_event - Notify the PM core of a wakeup event.
638  * @dev: Device the wakeup event is related to.
639  * @msec: Anticipated event processing time (in milliseconds).
640  *
641  * Call __pm_wakeup_event() for the @dev's wakeup source object.
642  */
643 void pm_wakeup_event(struct device *dev, unsigned int msec)
644 {
645 	unsigned long flags;
646 
647 	if (!dev)
648 		return;
649 
650 	spin_lock_irqsave(&dev->power.lock, flags);
651 	__pm_wakeup_event(dev->power.wakeup, msec);
652 	spin_unlock_irqrestore(&dev->power.lock, flags);
653 }
654 EXPORT_SYMBOL_GPL(pm_wakeup_event);
655 
656 static void print_active_wakeup_sources(void)
657 {
658 	struct wakeup_source *ws;
659 	int active = 0;
660 	struct wakeup_source *last_activity_ws = NULL;
661 
662 	rcu_read_lock();
663 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
664 		if (ws->active) {
665 			pr_info("active wakeup source: %s\n", ws->name);
666 			active = 1;
667 		} else if (!active &&
668 			   (!last_activity_ws ||
669 			    ktime_to_ns(ws->last_time) >
670 			    ktime_to_ns(last_activity_ws->last_time))) {
671 			last_activity_ws = ws;
672 		}
673 	}
674 
675 	if (!active && last_activity_ws)
676 		pr_info("last active wakeup source: %s\n",
677 			last_activity_ws->name);
678 	rcu_read_unlock();
679 }
680 
681 /**
682  * pm_wakeup_pending - Check if power transition in progress should be aborted.
683  *
684  * Compare the current number of registered wakeup events with its preserved
685  * value from the past and return true if new wakeup events have been registered
686  * since the old value was stored.  Also return true if the current number of
687  * wakeup events being processed is different from zero.
688  */
689 bool pm_wakeup_pending(void)
690 {
691 	unsigned long flags;
692 	bool ret = false;
693 
694 	spin_lock_irqsave(&events_lock, flags);
695 	if (events_check_enabled) {
696 		unsigned int cnt, inpr;
697 
698 		split_counters(&cnt, &inpr);
699 		ret = (cnt != saved_count || inpr > 0);
700 		events_check_enabled = !ret;
701 	}
702 	spin_unlock_irqrestore(&events_lock, flags);
703 
704 	if (ret)
705 		print_active_wakeup_sources();
706 
707 	return ret;
708 }
709 
710 /**
711  * pm_get_wakeup_count - Read the number of registered wakeup events.
712  * @count: Address to store the value at.
713  * @block: Whether or not to block.
714  *
715  * Store the number of registered wakeup events at the address in @count.  If
716  * @block is set, block until the current number of wakeup events being
717  * processed is zero.
718  *
719  * Return 'false' if the current number of wakeup events being processed is
720  * nonzero.  Otherwise return 'true'.
721  */
722 bool pm_get_wakeup_count(unsigned int *count, bool block)
723 {
724 	unsigned int cnt, inpr;
725 
726 	if (block) {
727 		DEFINE_WAIT(wait);
728 
729 		for (;;) {
730 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
731 					TASK_INTERRUPTIBLE);
732 			split_counters(&cnt, &inpr);
733 			if (inpr == 0 || signal_pending(current))
734 				break;
735 
736 			schedule();
737 		}
738 		finish_wait(&wakeup_count_wait_queue, &wait);
739 	}
740 
741 	split_counters(&cnt, &inpr);
742 	*count = cnt;
743 	return !inpr;
744 }
745 
746 /**
747  * pm_save_wakeup_count - Save the current number of registered wakeup events.
748  * @count: Value to compare with the current number of registered wakeup events.
749  *
750  * If @count is equal to the current number of registered wakeup events and the
751  * current number of wakeup events being processed is zero, store @count as the
752  * old number of registered wakeup events for pm_check_wakeup_events(), enable
753  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
754  * detection and return 'false'.
755  */
756 bool pm_save_wakeup_count(unsigned int count)
757 {
758 	unsigned int cnt, inpr;
759 	unsigned long flags;
760 
761 	events_check_enabled = false;
762 	spin_lock_irqsave(&events_lock, flags);
763 	split_counters(&cnt, &inpr);
764 	if (cnt == count && inpr == 0) {
765 		saved_count = count;
766 		events_check_enabled = true;
767 	}
768 	spin_unlock_irqrestore(&events_lock, flags);
769 	return events_check_enabled;
770 }
771 
772 #ifdef CONFIG_PM_AUTOSLEEP
773 /**
774  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
775  * @enabled: Whether to set or to clear the autosleep_enabled flags.
776  */
777 void pm_wakep_autosleep_enabled(bool set)
778 {
779 	struct wakeup_source *ws;
780 	ktime_t now = ktime_get();
781 
782 	rcu_read_lock();
783 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
784 		spin_lock_irq(&ws->lock);
785 		if (ws->autosleep_enabled != set) {
786 			ws->autosleep_enabled = set;
787 			if (ws->active) {
788 				if (set)
789 					ws->start_prevent_time = now;
790 				else
791 					update_prevent_sleep_time(ws, now);
792 			}
793 		}
794 		spin_unlock_irq(&ws->lock);
795 	}
796 	rcu_read_unlock();
797 }
798 #endif /* CONFIG_PM_AUTOSLEEP */
799 
800 static struct dentry *wakeup_sources_stats_dentry;
801 
802 /**
803  * print_wakeup_source_stats - Print wakeup source statistics information.
804  * @m: seq_file to print the statistics into.
805  * @ws: Wakeup source object to print the statistics for.
806  */
807 static int print_wakeup_source_stats(struct seq_file *m,
808 				     struct wakeup_source *ws)
809 {
810 	unsigned long flags;
811 	ktime_t total_time;
812 	ktime_t max_time;
813 	unsigned long active_count;
814 	ktime_t active_time;
815 	ktime_t prevent_sleep_time;
816 	int ret;
817 
818 	spin_lock_irqsave(&ws->lock, flags);
819 
820 	total_time = ws->total_time;
821 	max_time = ws->max_time;
822 	prevent_sleep_time = ws->prevent_sleep_time;
823 	active_count = ws->active_count;
824 	if (ws->active) {
825 		ktime_t now = ktime_get();
826 
827 		active_time = ktime_sub(now, ws->last_time);
828 		total_time = ktime_add(total_time, active_time);
829 		if (active_time.tv64 > max_time.tv64)
830 			max_time = active_time;
831 
832 		if (ws->autosleep_enabled)
833 			prevent_sleep_time = ktime_add(prevent_sleep_time,
834 				ktime_sub(now, ws->start_prevent_time));
835 	} else {
836 		active_time = ktime_set(0, 0);
837 	}
838 
839 	ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t"
840 			"%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
841 			ws->name, active_count, ws->event_count,
842 			ws->wakeup_count, ws->expire_count,
843 			ktime_to_ms(active_time), ktime_to_ms(total_time),
844 			ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
845 			ktime_to_ms(prevent_sleep_time));
846 
847 	spin_unlock_irqrestore(&ws->lock, flags);
848 
849 	return ret;
850 }
851 
852 /**
853  * wakeup_sources_stats_show - Print wakeup sources statistics information.
854  * @m: seq_file to print the statistics into.
855  */
856 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
857 {
858 	struct wakeup_source *ws;
859 
860 	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
861 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
862 		"last_change\tprevent_suspend_time\n");
863 
864 	rcu_read_lock();
865 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
866 		print_wakeup_source_stats(m, ws);
867 	rcu_read_unlock();
868 
869 	return 0;
870 }
871 
872 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
873 {
874 	return single_open(file, wakeup_sources_stats_show, NULL);
875 }
876 
877 static const struct file_operations wakeup_sources_stats_fops = {
878 	.owner = THIS_MODULE,
879 	.open = wakeup_sources_stats_open,
880 	.read = seq_read,
881 	.llseek = seq_lseek,
882 	.release = single_release,
883 };
884 
885 static int __init wakeup_sources_debugfs_init(void)
886 {
887 	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
888 			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
889 	return 0;
890 }
891 
892 postcore_initcall(wakeup_sources_debugfs_init);
893