xref: /openbmc/linux/drivers/base/power/wakeup.c (revision 3b27d139)
1 /*
2  * drivers/base/power/wakeup.c - System wakeup events framework
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <trace/events/power.h>
19 
20 #include "power.h"
21 
22 /*
23  * If set, the suspend/hibernate code will abort transitions to a sleep state
24  * if wakeup events are registered during or immediately before the transition.
25  */
26 bool events_check_enabled __read_mostly;
27 
28 /* If set and the system is suspending, terminate the suspend. */
29 static bool pm_abort_suspend __read_mostly;
30 
31 /*
32  * Combined counters of registered wakeup events and wakeup events in progress.
33  * They need to be modified together atomically, so it's better to use one
34  * atomic variable to hold them both.
35  */
36 static atomic_t combined_event_count = ATOMIC_INIT(0);
37 
38 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
39 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
40 
41 static void split_counters(unsigned int *cnt, unsigned int *inpr)
42 {
43 	unsigned int comb = atomic_read(&combined_event_count);
44 
45 	*cnt = (comb >> IN_PROGRESS_BITS);
46 	*inpr = comb & MAX_IN_PROGRESS;
47 }
48 
49 /* A preserved old value of the events counter. */
50 static unsigned int saved_count;
51 
52 static DEFINE_SPINLOCK(events_lock);
53 
54 static void pm_wakeup_timer_fn(unsigned long data);
55 
56 static LIST_HEAD(wakeup_sources);
57 
58 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
59 
60 static struct wakeup_source deleted_ws = {
61 	.name = "deleted",
62 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
63 };
64 
65 /**
66  * wakeup_source_prepare - Prepare a new wakeup source for initialization.
67  * @ws: Wakeup source to prepare.
68  * @name: Pointer to the name of the new wakeup source.
69  *
70  * Callers must ensure that the @name string won't be freed when @ws is still in
71  * use.
72  */
73 void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
74 {
75 	if (ws) {
76 		memset(ws, 0, sizeof(*ws));
77 		ws->name = name;
78 	}
79 }
80 EXPORT_SYMBOL_GPL(wakeup_source_prepare);
81 
82 /**
83  * wakeup_source_create - Create a struct wakeup_source object.
84  * @name: Name of the new wakeup source.
85  */
86 struct wakeup_source *wakeup_source_create(const char *name)
87 {
88 	struct wakeup_source *ws;
89 
90 	ws = kmalloc(sizeof(*ws), GFP_KERNEL);
91 	if (!ws)
92 		return NULL;
93 
94 	wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
95 	return ws;
96 }
97 EXPORT_SYMBOL_GPL(wakeup_source_create);
98 
99 /**
100  * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
101  * @ws: Wakeup source to prepare for destruction.
102  *
103  * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
104  * be run in parallel with this function for the same wakeup source object.
105  */
106 void wakeup_source_drop(struct wakeup_source *ws)
107 {
108 	if (!ws)
109 		return;
110 
111 	del_timer_sync(&ws->timer);
112 	__pm_relax(ws);
113 }
114 EXPORT_SYMBOL_GPL(wakeup_source_drop);
115 
116 /*
117  * Record wakeup_source statistics being deleted into a dummy wakeup_source.
118  */
119 static void wakeup_source_record(struct wakeup_source *ws)
120 {
121 	unsigned long flags;
122 
123 	spin_lock_irqsave(&deleted_ws.lock, flags);
124 
125 	if (ws->event_count) {
126 		deleted_ws.total_time =
127 			ktime_add(deleted_ws.total_time, ws->total_time);
128 		deleted_ws.prevent_sleep_time =
129 			ktime_add(deleted_ws.prevent_sleep_time,
130 				  ws->prevent_sleep_time);
131 		deleted_ws.max_time =
132 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
133 				deleted_ws.max_time : ws->max_time;
134 		deleted_ws.event_count += ws->event_count;
135 		deleted_ws.active_count += ws->active_count;
136 		deleted_ws.relax_count += ws->relax_count;
137 		deleted_ws.expire_count += ws->expire_count;
138 		deleted_ws.wakeup_count += ws->wakeup_count;
139 	}
140 
141 	spin_unlock_irqrestore(&deleted_ws.lock, flags);
142 }
143 
144 /**
145  * wakeup_source_destroy - Destroy a struct wakeup_source object.
146  * @ws: Wakeup source to destroy.
147  *
148  * Use only for wakeup source objects created with wakeup_source_create().
149  */
150 void wakeup_source_destroy(struct wakeup_source *ws)
151 {
152 	if (!ws)
153 		return;
154 
155 	wakeup_source_drop(ws);
156 	wakeup_source_record(ws);
157 	kfree(ws->name);
158 	kfree(ws);
159 }
160 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
161 
162 /**
163  * wakeup_source_add - Add given object to the list of wakeup sources.
164  * @ws: Wakeup source object to add to the list.
165  */
166 void wakeup_source_add(struct wakeup_source *ws)
167 {
168 	unsigned long flags;
169 
170 	if (WARN_ON(!ws))
171 		return;
172 
173 	spin_lock_init(&ws->lock);
174 	setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
175 	ws->active = false;
176 	ws->last_time = ktime_get();
177 
178 	spin_lock_irqsave(&events_lock, flags);
179 	list_add_rcu(&ws->entry, &wakeup_sources);
180 	spin_unlock_irqrestore(&events_lock, flags);
181 }
182 EXPORT_SYMBOL_GPL(wakeup_source_add);
183 
184 /**
185  * wakeup_source_remove - Remove given object from the wakeup sources list.
186  * @ws: Wakeup source object to remove from the list.
187  */
188 void wakeup_source_remove(struct wakeup_source *ws)
189 {
190 	unsigned long flags;
191 
192 	if (WARN_ON(!ws))
193 		return;
194 
195 	spin_lock_irqsave(&events_lock, flags);
196 	list_del_rcu(&ws->entry);
197 	spin_unlock_irqrestore(&events_lock, flags);
198 	synchronize_rcu();
199 }
200 EXPORT_SYMBOL_GPL(wakeup_source_remove);
201 
202 /**
203  * wakeup_source_register - Create wakeup source and add it to the list.
204  * @name: Name of the wakeup source to register.
205  */
206 struct wakeup_source *wakeup_source_register(const char *name)
207 {
208 	struct wakeup_source *ws;
209 
210 	ws = wakeup_source_create(name);
211 	if (ws)
212 		wakeup_source_add(ws);
213 
214 	return ws;
215 }
216 EXPORT_SYMBOL_GPL(wakeup_source_register);
217 
218 /**
219  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
220  * @ws: Wakeup source object to unregister.
221  */
222 void wakeup_source_unregister(struct wakeup_source *ws)
223 {
224 	if (ws) {
225 		wakeup_source_remove(ws);
226 		wakeup_source_destroy(ws);
227 	}
228 }
229 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
230 
231 /**
232  * device_wakeup_attach - Attach a wakeup source object to a device object.
233  * @dev: Device to handle.
234  * @ws: Wakeup source object to attach to @dev.
235  *
236  * This causes @dev to be treated as a wakeup device.
237  */
238 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
239 {
240 	spin_lock_irq(&dev->power.lock);
241 	if (dev->power.wakeup) {
242 		spin_unlock_irq(&dev->power.lock);
243 		return -EEXIST;
244 	}
245 	dev->power.wakeup = ws;
246 	spin_unlock_irq(&dev->power.lock);
247 	return 0;
248 }
249 
250 /**
251  * device_wakeup_enable - Enable given device to be a wakeup source.
252  * @dev: Device to handle.
253  *
254  * Create a wakeup source object, register it and attach it to @dev.
255  */
256 int device_wakeup_enable(struct device *dev)
257 {
258 	struct wakeup_source *ws;
259 	int ret;
260 
261 	if (!dev || !dev->power.can_wakeup)
262 		return -EINVAL;
263 
264 	ws = wakeup_source_register(dev_name(dev));
265 	if (!ws)
266 		return -ENOMEM;
267 
268 	ret = device_wakeup_attach(dev, ws);
269 	if (ret)
270 		wakeup_source_unregister(ws);
271 
272 	return ret;
273 }
274 EXPORT_SYMBOL_GPL(device_wakeup_enable);
275 
276 /**
277  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
278  * @dev: Device to handle
279  * @wakeirq: Device specific wakeirq entry
280  *
281  * Attach a device wakeirq to the wakeup source so the device
282  * wake IRQ can be configured automatically for suspend and
283  * resume.
284  *
285  * Call under the device's power.lock lock.
286  */
287 int device_wakeup_attach_irq(struct device *dev,
288 			     struct wake_irq *wakeirq)
289 {
290 	struct wakeup_source *ws;
291 
292 	ws = dev->power.wakeup;
293 	if (!ws) {
294 		dev_err(dev, "forgot to call call device_init_wakeup?\n");
295 		return -EINVAL;
296 	}
297 
298 	if (ws->wakeirq)
299 		return -EEXIST;
300 
301 	ws->wakeirq = wakeirq;
302 	return 0;
303 }
304 
305 /**
306  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
307  * @dev: Device to handle
308  *
309  * Removes a device wakeirq from the wakeup source.
310  *
311  * Call under the device's power.lock lock.
312  */
313 void device_wakeup_detach_irq(struct device *dev)
314 {
315 	struct wakeup_source *ws;
316 
317 	ws = dev->power.wakeup;
318 	if (ws)
319 		ws->wakeirq = NULL;
320 }
321 
322 /**
323  * device_wakeup_arm_wake_irqs(void)
324  *
325  * Itereates over the list of device wakeirqs to arm them.
326  */
327 void device_wakeup_arm_wake_irqs(void)
328 {
329 	struct wakeup_source *ws;
330 
331 	rcu_read_lock();
332 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
333 		if (ws->wakeirq)
334 			dev_pm_arm_wake_irq(ws->wakeirq);
335 	}
336 	rcu_read_unlock();
337 }
338 
339 /**
340  * device_wakeup_disarm_wake_irqs(void)
341  *
342  * Itereates over the list of device wakeirqs to disarm them.
343  */
344 void device_wakeup_disarm_wake_irqs(void)
345 {
346 	struct wakeup_source *ws;
347 
348 	rcu_read_lock();
349 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
350 		if (ws->wakeirq)
351 			dev_pm_disarm_wake_irq(ws->wakeirq);
352 	}
353 	rcu_read_unlock();
354 }
355 
356 /**
357  * device_wakeup_detach - Detach a device's wakeup source object from it.
358  * @dev: Device to detach the wakeup source object from.
359  *
360  * After it returns, @dev will not be treated as a wakeup device any more.
361  */
362 static struct wakeup_source *device_wakeup_detach(struct device *dev)
363 {
364 	struct wakeup_source *ws;
365 
366 	spin_lock_irq(&dev->power.lock);
367 	ws = dev->power.wakeup;
368 	dev->power.wakeup = NULL;
369 	spin_unlock_irq(&dev->power.lock);
370 	return ws;
371 }
372 
373 /**
374  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
375  * @dev: Device to handle.
376  *
377  * Detach the @dev's wakeup source object from it, unregister this wakeup source
378  * object and destroy it.
379  */
380 int device_wakeup_disable(struct device *dev)
381 {
382 	struct wakeup_source *ws;
383 
384 	if (!dev || !dev->power.can_wakeup)
385 		return -EINVAL;
386 
387 	ws = device_wakeup_detach(dev);
388 	if (ws)
389 		wakeup_source_unregister(ws);
390 
391 	return 0;
392 }
393 EXPORT_SYMBOL_GPL(device_wakeup_disable);
394 
395 /**
396  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
397  * @dev: Device to handle.
398  * @capable: Whether or not @dev is capable of waking up the system from sleep.
399  *
400  * If @capable is set, set the @dev's power.can_wakeup flag and add its
401  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
402  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
403  *
404  * This function may sleep and it can't be called from any context where
405  * sleeping is not allowed.
406  */
407 void device_set_wakeup_capable(struct device *dev, bool capable)
408 {
409 	if (!!dev->power.can_wakeup == !!capable)
410 		return;
411 
412 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
413 		if (capable) {
414 			if (wakeup_sysfs_add(dev))
415 				return;
416 		} else {
417 			wakeup_sysfs_remove(dev);
418 		}
419 	}
420 	dev->power.can_wakeup = capable;
421 }
422 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
423 
424 /**
425  * device_init_wakeup - Device wakeup initialization.
426  * @dev: Device to handle.
427  * @enable: Whether or not to enable @dev as a wakeup device.
428  *
429  * By default, most devices should leave wakeup disabled.  The exceptions are
430  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
431  * possibly network interfaces, etc.  Also, devices that don't generate their
432  * own wakeup requests but merely forward requests from one bus to another
433  * (like PCI bridges) should have wakeup enabled by default.
434  */
435 int device_init_wakeup(struct device *dev, bool enable)
436 {
437 	int ret = 0;
438 
439 	if (!dev)
440 		return -EINVAL;
441 
442 	if (enable) {
443 		device_set_wakeup_capable(dev, true);
444 		ret = device_wakeup_enable(dev);
445 	} else {
446 		if (dev->power.can_wakeup)
447 			device_wakeup_disable(dev);
448 
449 		device_set_wakeup_capable(dev, false);
450 	}
451 
452 	return ret;
453 }
454 EXPORT_SYMBOL_GPL(device_init_wakeup);
455 
456 /**
457  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
458  * @dev: Device to handle.
459  */
460 int device_set_wakeup_enable(struct device *dev, bool enable)
461 {
462 	if (!dev || !dev->power.can_wakeup)
463 		return -EINVAL;
464 
465 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
466 }
467 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
468 
469 /**
470  * wakeup_source_not_registered - validate the given wakeup source.
471  * @ws: Wakeup source to be validated.
472  */
473 static bool wakeup_source_not_registered(struct wakeup_source *ws)
474 {
475 	/*
476 	 * Use timer struct to check if the given source is initialized
477 	 * by wakeup_source_add.
478 	 */
479 	return ws->timer.function != pm_wakeup_timer_fn ||
480 		   ws->timer.data != (unsigned long)ws;
481 }
482 
483 /*
484  * The functions below use the observation that each wakeup event starts a
485  * period in which the system should not be suspended.  The moment this period
486  * will end depends on how the wakeup event is going to be processed after being
487  * detected and all of the possible cases can be divided into two distinct
488  * groups.
489  *
490  * First, a wakeup event may be detected by the same functional unit that will
491  * carry out the entire processing of it and possibly will pass it to user space
492  * for further processing.  In that case the functional unit that has detected
493  * the event may later "close" the "no suspend" period associated with it
494  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
495  * pm_relax(), balanced with each other, is supposed to be used in such
496  * situations.
497  *
498  * Second, a wakeup event may be detected by one functional unit and processed
499  * by another one.  In that case the unit that has detected it cannot really
500  * "close" the "no suspend" period associated with it, unless it knows in
501  * advance what's going to happen to the event during processing.  This
502  * knowledge, however, may not be available to it, so it can simply specify time
503  * to wait before the system can be suspended and pass it as the second
504  * argument of pm_wakeup_event().
505  *
506  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
507  * "no suspend" period will be ended either by the pm_relax(), or by the timer
508  * function executed when the timer expires, whichever comes first.
509  */
510 
511 /**
512  * wakup_source_activate - Mark given wakeup source as active.
513  * @ws: Wakeup source to handle.
514  *
515  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
516  * core of the event by incrementing the counter of of wakeup events being
517  * processed.
518  */
519 static void wakeup_source_activate(struct wakeup_source *ws)
520 {
521 	unsigned int cec;
522 
523 	if (WARN_ONCE(wakeup_source_not_registered(ws),
524 			"unregistered wakeup source\n"))
525 		return;
526 
527 	/*
528 	 * active wakeup source should bring the system
529 	 * out of PM_SUSPEND_FREEZE state
530 	 */
531 	freeze_wake();
532 
533 	ws->active = true;
534 	ws->active_count++;
535 	ws->last_time = ktime_get();
536 	if (ws->autosleep_enabled)
537 		ws->start_prevent_time = ws->last_time;
538 
539 	/* Increment the counter of events in progress. */
540 	cec = atomic_inc_return(&combined_event_count);
541 
542 	trace_wakeup_source_activate(ws->name, cec);
543 }
544 
545 /**
546  * wakeup_source_report_event - Report wakeup event using the given source.
547  * @ws: Wakeup source to report the event for.
548  */
549 static void wakeup_source_report_event(struct wakeup_source *ws)
550 {
551 	ws->event_count++;
552 	/* This is racy, but the counter is approximate anyway. */
553 	if (events_check_enabled)
554 		ws->wakeup_count++;
555 
556 	if (!ws->active)
557 		wakeup_source_activate(ws);
558 }
559 
560 /**
561  * __pm_stay_awake - Notify the PM core of a wakeup event.
562  * @ws: Wakeup source object associated with the source of the event.
563  *
564  * It is safe to call this function from interrupt context.
565  */
566 void __pm_stay_awake(struct wakeup_source *ws)
567 {
568 	unsigned long flags;
569 
570 	if (!ws)
571 		return;
572 
573 	spin_lock_irqsave(&ws->lock, flags);
574 
575 	wakeup_source_report_event(ws);
576 	del_timer(&ws->timer);
577 	ws->timer_expires = 0;
578 
579 	spin_unlock_irqrestore(&ws->lock, flags);
580 }
581 EXPORT_SYMBOL_GPL(__pm_stay_awake);
582 
583 /**
584  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
585  * @dev: Device the wakeup event is related to.
586  *
587  * Notify the PM core of a wakeup event (signaled by @dev) by calling
588  * __pm_stay_awake for the @dev's wakeup source object.
589  *
590  * Call this function after detecting of a wakeup event if pm_relax() is going
591  * to be called directly after processing the event (and possibly passing it to
592  * user space for further processing).
593  */
594 void pm_stay_awake(struct device *dev)
595 {
596 	unsigned long flags;
597 
598 	if (!dev)
599 		return;
600 
601 	spin_lock_irqsave(&dev->power.lock, flags);
602 	__pm_stay_awake(dev->power.wakeup);
603 	spin_unlock_irqrestore(&dev->power.lock, flags);
604 }
605 EXPORT_SYMBOL_GPL(pm_stay_awake);
606 
607 #ifdef CONFIG_PM_AUTOSLEEP
608 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
609 {
610 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
611 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
612 }
613 #else
614 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
615 					     ktime_t now) {}
616 #endif
617 
618 /**
619  * wakup_source_deactivate - Mark given wakeup source as inactive.
620  * @ws: Wakeup source to handle.
621  *
622  * Update the @ws' statistics and notify the PM core that the wakeup source has
623  * become inactive by decrementing the counter of wakeup events being processed
624  * and incrementing the counter of registered wakeup events.
625  */
626 static void wakeup_source_deactivate(struct wakeup_source *ws)
627 {
628 	unsigned int cnt, inpr, cec;
629 	ktime_t duration;
630 	ktime_t now;
631 
632 	ws->relax_count++;
633 	/*
634 	 * __pm_relax() may be called directly or from a timer function.
635 	 * If it is called directly right after the timer function has been
636 	 * started, but before the timer function calls __pm_relax(), it is
637 	 * possible that __pm_stay_awake() will be called in the meantime and
638 	 * will set ws->active.  Then, ws->active may be cleared immediately
639 	 * by the __pm_relax() called from the timer function, but in such a
640 	 * case ws->relax_count will be different from ws->active_count.
641 	 */
642 	if (ws->relax_count != ws->active_count) {
643 		ws->relax_count--;
644 		return;
645 	}
646 
647 	ws->active = false;
648 
649 	now = ktime_get();
650 	duration = ktime_sub(now, ws->last_time);
651 	ws->total_time = ktime_add(ws->total_time, duration);
652 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
653 		ws->max_time = duration;
654 
655 	ws->last_time = now;
656 	del_timer(&ws->timer);
657 	ws->timer_expires = 0;
658 
659 	if (ws->autosleep_enabled)
660 		update_prevent_sleep_time(ws, now);
661 
662 	/*
663 	 * Increment the counter of registered wakeup events and decrement the
664 	 * couter of wakeup events in progress simultaneously.
665 	 */
666 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
667 	trace_wakeup_source_deactivate(ws->name, cec);
668 
669 	split_counters(&cnt, &inpr);
670 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
671 		wake_up(&wakeup_count_wait_queue);
672 }
673 
674 /**
675  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
676  * @ws: Wakeup source object associated with the source of the event.
677  *
678  * Call this function for wakeup events whose processing started with calling
679  * __pm_stay_awake().
680  *
681  * It is safe to call it from interrupt context.
682  */
683 void __pm_relax(struct wakeup_source *ws)
684 {
685 	unsigned long flags;
686 
687 	if (!ws)
688 		return;
689 
690 	spin_lock_irqsave(&ws->lock, flags);
691 	if (ws->active)
692 		wakeup_source_deactivate(ws);
693 	spin_unlock_irqrestore(&ws->lock, flags);
694 }
695 EXPORT_SYMBOL_GPL(__pm_relax);
696 
697 /**
698  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
699  * @dev: Device that signaled the event.
700  *
701  * Execute __pm_relax() for the @dev's wakeup source object.
702  */
703 void pm_relax(struct device *dev)
704 {
705 	unsigned long flags;
706 
707 	if (!dev)
708 		return;
709 
710 	spin_lock_irqsave(&dev->power.lock, flags);
711 	__pm_relax(dev->power.wakeup);
712 	spin_unlock_irqrestore(&dev->power.lock, flags);
713 }
714 EXPORT_SYMBOL_GPL(pm_relax);
715 
716 /**
717  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
718  * @data: Address of the wakeup source object associated with the event source.
719  *
720  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
721  * in @data if it is currently active and its timer has not been canceled and
722  * the expiration time of the timer is not in future.
723  */
724 static void pm_wakeup_timer_fn(unsigned long data)
725 {
726 	struct wakeup_source *ws = (struct wakeup_source *)data;
727 	unsigned long flags;
728 
729 	spin_lock_irqsave(&ws->lock, flags);
730 
731 	if (ws->active && ws->timer_expires
732 	    && time_after_eq(jiffies, ws->timer_expires)) {
733 		wakeup_source_deactivate(ws);
734 		ws->expire_count++;
735 	}
736 
737 	spin_unlock_irqrestore(&ws->lock, flags);
738 }
739 
740 /**
741  * __pm_wakeup_event - Notify the PM core of a wakeup event.
742  * @ws: Wakeup source object associated with the event source.
743  * @msec: Anticipated event processing time (in milliseconds).
744  *
745  * Notify the PM core of a wakeup event whose source is @ws that will take
746  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
747  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
748  * execute pm_wakeup_timer_fn() in future.
749  *
750  * It is safe to call this function from interrupt context.
751  */
752 void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
753 {
754 	unsigned long flags;
755 	unsigned long expires;
756 
757 	if (!ws)
758 		return;
759 
760 	spin_lock_irqsave(&ws->lock, flags);
761 
762 	wakeup_source_report_event(ws);
763 
764 	if (!msec) {
765 		wakeup_source_deactivate(ws);
766 		goto unlock;
767 	}
768 
769 	expires = jiffies + msecs_to_jiffies(msec);
770 	if (!expires)
771 		expires = 1;
772 
773 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
774 		mod_timer(&ws->timer, expires);
775 		ws->timer_expires = expires;
776 	}
777 
778  unlock:
779 	spin_unlock_irqrestore(&ws->lock, flags);
780 }
781 EXPORT_SYMBOL_GPL(__pm_wakeup_event);
782 
783 
784 /**
785  * pm_wakeup_event - Notify the PM core of a wakeup event.
786  * @dev: Device the wakeup event is related to.
787  * @msec: Anticipated event processing time (in milliseconds).
788  *
789  * Call __pm_wakeup_event() for the @dev's wakeup source object.
790  */
791 void pm_wakeup_event(struct device *dev, unsigned int msec)
792 {
793 	unsigned long flags;
794 
795 	if (!dev)
796 		return;
797 
798 	spin_lock_irqsave(&dev->power.lock, flags);
799 	__pm_wakeup_event(dev->power.wakeup, msec);
800 	spin_unlock_irqrestore(&dev->power.lock, flags);
801 }
802 EXPORT_SYMBOL_GPL(pm_wakeup_event);
803 
804 void pm_print_active_wakeup_sources(void)
805 {
806 	struct wakeup_source *ws;
807 	int active = 0;
808 	struct wakeup_source *last_activity_ws = NULL;
809 
810 	rcu_read_lock();
811 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
812 		if (ws->active) {
813 			pr_info("active wakeup source: %s\n", ws->name);
814 			active = 1;
815 		} else if (!active &&
816 			   (!last_activity_ws ||
817 			    ktime_to_ns(ws->last_time) >
818 			    ktime_to_ns(last_activity_ws->last_time))) {
819 			last_activity_ws = ws;
820 		}
821 	}
822 
823 	if (!active && last_activity_ws)
824 		pr_info("last active wakeup source: %s\n",
825 			last_activity_ws->name);
826 	rcu_read_unlock();
827 }
828 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
829 
830 /**
831  * pm_wakeup_pending - Check if power transition in progress should be aborted.
832  *
833  * Compare the current number of registered wakeup events with its preserved
834  * value from the past and return true if new wakeup events have been registered
835  * since the old value was stored.  Also return true if the current number of
836  * wakeup events being processed is different from zero.
837  */
838 bool pm_wakeup_pending(void)
839 {
840 	unsigned long flags;
841 	bool ret = false;
842 
843 	spin_lock_irqsave(&events_lock, flags);
844 	if (events_check_enabled) {
845 		unsigned int cnt, inpr;
846 
847 		split_counters(&cnt, &inpr);
848 		ret = (cnt != saved_count || inpr > 0);
849 		events_check_enabled = !ret;
850 	}
851 	spin_unlock_irqrestore(&events_lock, flags);
852 
853 	if (ret) {
854 		pr_info("PM: Wakeup pending, aborting suspend\n");
855 		pm_print_active_wakeup_sources();
856 	}
857 
858 	return ret || pm_abort_suspend;
859 }
860 
861 void pm_system_wakeup(void)
862 {
863 	pm_abort_suspend = true;
864 	freeze_wake();
865 }
866 EXPORT_SYMBOL_GPL(pm_system_wakeup);
867 
868 void pm_wakeup_clear(void)
869 {
870 	pm_abort_suspend = false;
871 }
872 
873 /**
874  * pm_get_wakeup_count - Read the number of registered wakeup events.
875  * @count: Address to store the value at.
876  * @block: Whether or not to block.
877  *
878  * Store the number of registered wakeup events at the address in @count.  If
879  * @block is set, block until the current number of wakeup events being
880  * processed is zero.
881  *
882  * Return 'false' if the current number of wakeup events being processed is
883  * nonzero.  Otherwise return 'true'.
884  */
885 bool pm_get_wakeup_count(unsigned int *count, bool block)
886 {
887 	unsigned int cnt, inpr;
888 
889 	if (block) {
890 		DEFINE_WAIT(wait);
891 
892 		for (;;) {
893 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
894 					TASK_INTERRUPTIBLE);
895 			split_counters(&cnt, &inpr);
896 			if (inpr == 0 || signal_pending(current))
897 				break;
898 
899 			schedule();
900 		}
901 		finish_wait(&wakeup_count_wait_queue, &wait);
902 	}
903 
904 	split_counters(&cnt, &inpr);
905 	*count = cnt;
906 	return !inpr;
907 }
908 
909 /**
910  * pm_save_wakeup_count - Save the current number of registered wakeup events.
911  * @count: Value to compare with the current number of registered wakeup events.
912  *
913  * If @count is equal to the current number of registered wakeup events and the
914  * current number of wakeup events being processed is zero, store @count as the
915  * old number of registered wakeup events for pm_check_wakeup_events(), enable
916  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
917  * detection and return 'false'.
918  */
919 bool pm_save_wakeup_count(unsigned int count)
920 {
921 	unsigned int cnt, inpr;
922 	unsigned long flags;
923 
924 	events_check_enabled = false;
925 	spin_lock_irqsave(&events_lock, flags);
926 	split_counters(&cnt, &inpr);
927 	if (cnt == count && inpr == 0) {
928 		saved_count = count;
929 		events_check_enabled = true;
930 	}
931 	spin_unlock_irqrestore(&events_lock, flags);
932 	return events_check_enabled;
933 }
934 
935 #ifdef CONFIG_PM_AUTOSLEEP
936 /**
937  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
938  * @enabled: Whether to set or to clear the autosleep_enabled flags.
939  */
940 void pm_wakep_autosleep_enabled(bool set)
941 {
942 	struct wakeup_source *ws;
943 	ktime_t now = ktime_get();
944 
945 	rcu_read_lock();
946 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
947 		spin_lock_irq(&ws->lock);
948 		if (ws->autosleep_enabled != set) {
949 			ws->autosleep_enabled = set;
950 			if (ws->active) {
951 				if (set)
952 					ws->start_prevent_time = now;
953 				else
954 					update_prevent_sleep_time(ws, now);
955 			}
956 		}
957 		spin_unlock_irq(&ws->lock);
958 	}
959 	rcu_read_unlock();
960 }
961 #endif /* CONFIG_PM_AUTOSLEEP */
962 
963 static struct dentry *wakeup_sources_stats_dentry;
964 
965 /**
966  * print_wakeup_source_stats - Print wakeup source statistics information.
967  * @m: seq_file to print the statistics into.
968  * @ws: Wakeup source object to print the statistics for.
969  */
970 static int print_wakeup_source_stats(struct seq_file *m,
971 				     struct wakeup_source *ws)
972 {
973 	unsigned long flags;
974 	ktime_t total_time;
975 	ktime_t max_time;
976 	unsigned long active_count;
977 	ktime_t active_time;
978 	ktime_t prevent_sleep_time;
979 
980 	spin_lock_irqsave(&ws->lock, flags);
981 
982 	total_time = ws->total_time;
983 	max_time = ws->max_time;
984 	prevent_sleep_time = ws->prevent_sleep_time;
985 	active_count = ws->active_count;
986 	if (ws->active) {
987 		ktime_t now = ktime_get();
988 
989 		active_time = ktime_sub(now, ws->last_time);
990 		total_time = ktime_add(total_time, active_time);
991 		if (active_time.tv64 > max_time.tv64)
992 			max_time = active_time;
993 
994 		if (ws->autosleep_enabled)
995 			prevent_sleep_time = ktime_add(prevent_sleep_time,
996 				ktime_sub(now, ws->start_prevent_time));
997 	} else {
998 		active_time = ktime_set(0, 0);
999 	}
1000 
1001 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1002 		   ws->name, active_count, ws->event_count,
1003 		   ws->wakeup_count, ws->expire_count,
1004 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1005 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1006 		   ktime_to_ms(prevent_sleep_time));
1007 
1008 	spin_unlock_irqrestore(&ws->lock, flags);
1009 
1010 	return 0;
1011 }
1012 
1013 /**
1014  * wakeup_sources_stats_show - Print wakeup sources statistics information.
1015  * @m: seq_file to print the statistics into.
1016  */
1017 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
1018 {
1019 	struct wakeup_source *ws;
1020 
1021 	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1022 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
1023 		"last_change\tprevent_suspend_time\n");
1024 
1025 	rcu_read_lock();
1026 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
1027 		print_wakeup_source_stats(m, ws);
1028 	rcu_read_unlock();
1029 
1030 	print_wakeup_source_stats(m, &deleted_ws);
1031 
1032 	return 0;
1033 }
1034 
1035 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1036 {
1037 	return single_open(file, wakeup_sources_stats_show, NULL);
1038 }
1039 
1040 static const struct file_operations wakeup_sources_stats_fops = {
1041 	.owner = THIS_MODULE,
1042 	.open = wakeup_sources_stats_open,
1043 	.read = seq_read,
1044 	.llseek = seq_lseek,
1045 	.release = single_release,
1046 };
1047 
1048 static int __init wakeup_sources_debugfs_init(void)
1049 {
1050 	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
1051 			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
1052 	return 0;
1053 }
1054 
1055 postcore_initcall(wakeup_sources_debugfs_init);
1056