xref: /openbmc/linux/drivers/base/power/main.c (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/devfreq.h>
37 #include <linux/timer.h>
38 
39 #include "../base.h"
40 #include "power.h"
41 
42 typedef int (*pm_callback_t)(struct device *);
43 
44 #define list_for_each_entry_rcu_locked(pos, head, member) \
45 	list_for_each_entry_rcu(pos, head, member, \
46 			device_links_read_lock_held())
47 
48 /*
49  * The entries in the dpm_list list are in a depth first order, simply
50  * because children are guaranteed to be discovered after parents, and
51  * are inserted at the back of the list on discovery.
52  *
53  * Since device_pm_add() may be called with a device lock held,
54  * we must never try to acquire a device lock while holding
55  * dpm_list_mutex.
56  */
57 
58 LIST_HEAD(dpm_list);
59 static LIST_HEAD(dpm_prepared_list);
60 static LIST_HEAD(dpm_suspended_list);
61 static LIST_HEAD(dpm_late_early_list);
62 static LIST_HEAD(dpm_noirq_list);
63 
64 struct suspend_stats suspend_stats;
65 static DEFINE_MUTEX(dpm_list_mtx);
66 static pm_message_t pm_transition;
67 
68 static int async_error;
69 
70 static const char *pm_verb(int event)
71 {
72 	switch (event) {
73 	case PM_EVENT_SUSPEND:
74 		return "suspend";
75 	case PM_EVENT_RESUME:
76 		return "resume";
77 	case PM_EVENT_FREEZE:
78 		return "freeze";
79 	case PM_EVENT_QUIESCE:
80 		return "quiesce";
81 	case PM_EVENT_HIBERNATE:
82 		return "hibernate";
83 	case PM_EVENT_THAW:
84 		return "thaw";
85 	case PM_EVENT_RESTORE:
86 		return "restore";
87 	case PM_EVENT_RECOVER:
88 		return "recover";
89 	default:
90 		return "(unknown PM event)";
91 	}
92 }
93 
94 /**
95  * device_pm_sleep_init - Initialize system suspend-related device fields.
96  * @dev: Device object being initialized.
97  */
98 void device_pm_sleep_init(struct device *dev)
99 {
100 	dev->power.is_prepared = false;
101 	dev->power.is_suspended = false;
102 	dev->power.is_noirq_suspended = false;
103 	dev->power.is_late_suspended = false;
104 	init_completion(&dev->power.completion);
105 	complete_all(&dev->power.completion);
106 	dev->power.wakeup = NULL;
107 	INIT_LIST_HEAD(&dev->power.entry);
108 }
109 
110 /**
111  * device_pm_lock - Lock the list of active devices used by the PM core.
112  */
113 void device_pm_lock(void)
114 {
115 	mutex_lock(&dpm_list_mtx);
116 }
117 
118 /**
119  * device_pm_unlock - Unlock the list of active devices used by the PM core.
120  */
121 void device_pm_unlock(void)
122 {
123 	mutex_unlock(&dpm_list_mtx);
124 }
125 
126 /**
127  * device_pm_add - Add a device to the PM core's list of active devices.
128  * @dev: Device to add to the list.
129  */
130 void device_pm_add(struct device *dev)
131 {
132 	/* Skip PM setup/initialization. */
133 	if (device_pm_not_required(dev))
134 		return;
135 
136 	pr_debug("Adding info for %s:%s\n",
137 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138 	device_pm_check_callbacks(dev);
139 	mutex_lock(&dpm_list_mtx);
140 	if (dev->parent && dev->parent->power.is_prepared)
141 		dev_warn(dev, "parent %s should not be sleeping\n",
142 			dev_name(dev->parent));
143 	list_add_tail(&dev->power.entry, &dpm_list);
144 	dev->power.in_dpm_list = true;
145 	mutex_unlock(&dpm_list_mtx);
146 }
147 
148 /**
149  * device_pm_remove - Remove a device from the PM core's list of active devices.
150  * @dev: Device to be removed from the list.
151  */
152 void device_pm_remove(struct device *dev)
153 {
154 	if (device_pm_not_required(dev))
155 		return;
156 
157 	pr_debug("Removing info for %s:%s\n",
158 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 	complete_all(&dev->power.completion);
160 	mutex_lock(&dpm_list_mtx);
161 	list_del_init(&dev->power.entry);
162 	dev->power.in_dpm_list = false;
163 	mutex_unlock(&dpm_list_mtx);
164 	device_wakeup_disable(dev);
165 	pm_runtime_remove(dev);
166 	device_pm_check_callbacks(dev);
167 }
168 
169 /**
170  * device_pm_move_before - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come before.
173  */
174 void device_pm_move_before(struct device *deva, struct device *devb)
175 {
176 	pr_debug("Moving %s:%s before %s:%s\n",
177 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179 	/* Delete deva from dpm_list and reinsert before devb. */
180 	list_move_tail(&deva->power.entry, &devb->power.entry);
181 }
182 
183 /**
184  * device_pm_move_after - Move device in the PM core's list of active devices.
185  * @deva: Device to move in dpm_list.
186  * @devb: Device @deva should come after.
187  */
188 void device_pm_move_after(struct device *deva, struct device *devb)
189 {
190 	pr_debug("Moving %s:%s after %s:%s\n",
191 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
192 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
193 	/* Delete deva from dpm_list and reinsert after devb. */
194 	list_move(&deva->power.entry, &devb->power.entry);
195 }
196 
197 /**
198  * device_pm_move_last - Move device to end of the PM core's list of devices.
199  * @dev: Device to move in dpm_list.
200  */
201 void device_pm_move_last(struct device *dev)
202 {
203 	pr_debug("Moving %s:%s to end of list\n",
204 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
205 	list_move_tail(&dev->power.entry, &dpm_list);
206 }
207 
208 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 {
210 	if (!pm_print_times_enabled)
211 		return 0;
212 
213 	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
214 		 task_pid_nr(current),
215 		 dev->parent ? dev_name(dev->parent) : "none");
216 	return ktime_get();
217 }
218 
219 static void initcall_debug_report(struct device *dev, ktime_t calltime,
220 				  void *cb, int error)
221 {
222 	ktime_t rettime;
223 
224 	if (!pm_print_times_enabled)
225 		return;
226 
227 	rettime = ktime_get();
228 	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
229 		 (unsigned long long)ktime_us_delta(rettime, calltime));
230 }
231 
232 /**
233  * dpm_wait - Wait for a PM operation to complete.
234  * @dev: Device to wait for.
235  * @async: If unset, wait only if the device's power.async_suspend flag is set.
236  */
237 static void dpm_wait(struct device *dev, bool async)
238 {
239 	if (!dev)
240 		return;
241 
242 	if (async || (pm_async_enabled && dev->power.async_suspend))
243 		wait_for_completion(&dev->power.completion);
244 }
245 
246 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 {
248 	dpm_wait(dev, *((bool *)async_ptr));
249 	return 0;
250 }
251 
252 static void dpm_wait_for_children(struct device *dev, bool async)
253 {
254        device_for_each_child(dev, &async, dpm_wait_fn);
255 }
256 
257 static void dpm_wait_for_suppliers(struct device *dev, bool async)
258 {
259 	struct device_link *link;
260 	int idx;
261 
262 	idx = device_links_read_lock();
263 
264 	/*
265 	 * If the supplier goes away right after we've checked the link to it,
266 	 * we'll wait for its completion to change the state, but that's fine,
267 	 * because the only things that will block as a result are the SRCU
268 	 * callbacks freeing the link objects for the links in the list we're
269 	 * walking.
270 	 */
271 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
272 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
273 			dpm_wait(link->supplier, async);
274 
275 	device_links_read_unlock(idx);
276 }
277 
278 static bool dpm_wait_for_superior(struct device *dev, bool async)
279 {
280 	struct device *parent;
281 
282 	/*
283 	 * If the device is resumed asynchronously and the parent's callback
284 	 * deletes both the device and the parent itself, the parent object may
285 	 * be freed while this function is running, so avoid that by reference
286 	 * counting the parent once more unless the device has been deleted
287 	 * already (in which case return right away).
288 	 */
289 	mutex_lock(&dpm_list_mtx);
290 
291 	if (!device_pm_initialized(dev)) {
292 		mutex_unlock(&dpm_list_mtx);
293 		return false;
294 	}
295 
296 	parent = get_device(dev->parent);
297 
298 	mutex_unlock(&dpm_list_mtx);
299 
300 	dpm_wait(parent, async);
301 	put_device(parent);
302 
303 	dpm_wait_for_suppliers(dev, async);
304 
305 	/*
306 	 * If the parent's callback has deleted the device, attempting to resume
307 	 * it would be invalid, so avoid doing that then.
308 	 */
309 	return device_pm_initialized(dev);
310 }
311 
312 static void dpm_wait_for_consumers(struct device *dev, bool async)
313 {
314 	struct device_link *link;
315 	int idx;
316 
317 	idx = device_links_read_lock();
318 
319 	/*
320 	 * The status of a device link can only be changed from "dormant" by a
321 	 * probe, but that cannot happen during system suspend/resume.  In
322 	 * theory it can change to "dormant" at that time, but then it is
323 	 * reasonable to wait for the target device anyway (eg. if it goes
324 	 * away, it's better to wait for it to go away completely and then
325 	 * continue instead of trying to continue in parallel with its
326 	 * unregistration).
327 	 */
328 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
329 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
330 			dpm_wait(link->consumer, async);
331 
332 	device_links_read_unlock(idx);
333 }
334 
335 static void dpm_wait_for_subordinate(struct device *dev, bool async)
336 {
337 	dpm_wait_for_children(dev, async);
338 	dpm_wait_for_consumers(dev, async);
339 }
340 
341 /**
342  * pm_op - Return the PM operation appropriate for given PM event.
343  * @ops: PM operations to choose from.
344  * @state: PM transition of the system being carried out.
345  */
346 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
347 {
348 	switch (state.event) {
349 #ifdef CONFIG_SUSPEND
350 	case PM_EVENT_SUSPEND:
351 		return ops->suspend;
352 	case PM_EVENT_RESUME:
353 		return ops->resume;
354 #endif /* CONFIG_SUSPEND */
355 #ifdef CONFIG_HIBERNATE_CALLBACKS
356 	case PM_EVENT_FREEZE:
357 	case PM_EVENT_QUIESCE:
358 		return ops->freeze;
359 	case PM_EVENT_HIBERNATE:
360 		return ops->poweroff;
361 	case PM_EVENT_THAW:
362 	case PM_EVENT_RECOVER:
363 		return ops->thaw;
364 	case PM_EVENT_RESTORE:
365 		return ops->restore;
366 #endif /* CONFIG_HIBERNATE_CALLBACKS */
367 	}
368 
369 	return NULL;
370 }
371 
372 /**
373  * pm_late_early_op - Return the PM operation appropriate for given PM event.
374  * @ops: PM operations to choose from.
375  * @state: PM transition of the system being carried out.
376  *
377  * Runtime PM is disabled for @dev while this function is being executed.
378  */
379 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
380 				      pm_message_t state)
381 {
382 	switch (state.event) {
383 #ifdef CONFIG_SUSPEND
384 	case PM_EVENT_SUSPEND:
385 		return ops->suspend_late;
386 	case PM_EVENT_RESUME:
387 		return ops->resume_early;
388 #endif /* CONFIG_SUSPEND */
389 #ifdef CONFIG_HIBERNATE_CALLBACKS
390 	case PM_EVENT_FREEZE:
391 	case PM_EVENT_QUIESCE:
392 		return ops->freeze_late;
393 	case PM_EVENT_HIBERNATE:
394 		return ops->poweroff_late;
395 	case PM_EVENT_THAW:
396 	case PM_EVENT_RECOVER:
397 		return ops->thaw_early;
398 	case PM_EVENT_RESTORE:
399 		return ops->restore_early;
400 #endif /* CONFIG_HIBERNATE_CALLBACKS */
401 	}
402 
403 	return NULL;
404 }
405 
406 /**
407  * pm_noirq_op - Return the PM operation appropriate for given PM event.
408  * @ops: PM operations to choose from.
409  * @state: PM transition of the system being carried out.
410  *
411  * The driver of @dev will not receive interrupts while this function is being
412  * executed.
413  */
414 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
415 {
416 	switch (state.event) {
417 #ifdef CONFIG_SUSPEND
418 	case PM_EVENT_SUSPEND:
419 		return ops->suspend_noirq;
420 	case PM_EVENT_RESUME:
421 		return ops->resume_noirq;
422 #endif /* CONFIG_SUSPEND */
423 #ifdef CONFIG_HIBERNATE_CALLBACKS
424 	case PM_EVENT_FREEZE:
425 	case PM_EVENT_QUIESCE:
426 		return ops->freeze_noirq;
427 	case PM_EVENT_HIBERNATE:
428 		return ops->poweroff_noirq;
429 	case PM_EVENT_THAW:
430 	case PM_EVENT_RECOVER:
431 		return ops->thaw_noirq;
432 	case PM_EVENT_RESTORE:
433 		return ops->restore_noirq;
434 #endif /* CONFIG_HIBERNATE_CALLBACKS */
435 	}
436 
437 	return NULL;
438 }
439 
440 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441 {
442 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
443 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
444 		", may wakeup" : "", dev->power.driver_flags);
445 }
446 
447 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
448 			int error)
449 {
450 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
451 		error);
452 }
453 
454 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
455 			  const char *info)
456 {
457 	ktime_t calltime;
458 	u64 usecs64;
459 	int usecs;
460 
461 	calltime = ktime_get();
462 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
463 	do_div(usecs64, NSEC_PER_USEC);
464 	usecs = usecs64;
465 	if (usecs == 0)
466 		usecs = 1;
467 
468 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
469 		  info ?: "", info ? " " : "", pm_verb(state.event),
470 		  error ? "aborted" : "complete",
471 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
472 }
473 
474 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
475 			    pm_message_t state, const char *info)
476 {
477 	ktime_t calltime;
478 	int error;
479 
480 	if (!cb)
481 		return 0;
482 
483 	calltime = initcall_debug_start(dev, cb);
484 
485 	pm_dev_dbg(dev, state, info);
486 	trace_device_pm_callback_start(dev, info, state.event);
487 	error = cb(dev);
488 	trace_device_pm_callback_end(dev, error);
489 	suspend_report_result(cb, error);
490 
491 	initcall_debug_report(dev, calltime, cb, error);
492 
493 	return error;
494 }
495 
496 #ifdef CONFIG_DPM_WATCHDOG
497 struct dpm_watchdog {
498 	struct device		*dev;
499 	struct task_struct	*tsk;
500 	struct timer_list	timer;
501 };
502 
503 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
504 	struct dpm_watchdog wd
505 
506 /**
507  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
508  * @t: The timer that PM watchdog depends on.
509  *
510  * Called when a driver has timed out suspending or resuming.
511  * There's not much we can do here to recover so panic() to
512  * capture a crash-dump in pstore.
513  */
514 static void dpm_watchdog_handler(struct timer_list *t)
515 {
516 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
517 
518 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
519 	show_stack(wd->tsk, NULL, KERN_EMERG);
520 	panic("%s %s: unrecoverable failure\n",
521 		dev_driver_string(wd->dev), dev_name(wd->dev));
522 }
523 
524 /**
525  * dpm_watchdog_set - Enable pm watchdog for given device.
526  * @wd: Watchdog. Must be allocated on the stack.
527  * @dev: Device to handle.
528  */
529 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
530 {
531 	struct timer_list *timer = &wd->timer;
532 
533 	wd->dev = dev;
534 	wd->tsk = current;
535 
536 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
537 	/* use same timeout value for both suspend and resume */
538 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
539 	add_timer(timer);
540 }
541 
542 /**
543  * dpm_watchdog_clear - Disable suspend/resume watchdog.
544  * @wd: Watchdog to disable.
545  */
546 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
547 {
548 	struct timer_list *timer = &wd->timer;
549 
550 	del_timer_sync(timer);
551 	destroy_timer_on_stack(timer);
552 }
553 #else
554 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
555 #define dpm_watchdog_set(x, y)
556 #define dpm_watchdog_clear(x)
557 #endif
558 
559 /*------------------------- Resume routines -------------------------*/
560 
561 /**
562  * dev_pm_skip_resume - System-wide device resume optimization check.
563  * @dev: Target device.
564  *
565  * Return:
566  * - %false if the transition under way is RESTORE.
567  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
568  * - The logical negation of %power.must_resume otherwise (that is, when the
569  *   transition under way is RESUME).
570  */
571 bool dev_pm_skip_resume(struct device *dev)
572 {
573 	if (pm_transition.event == PM_EVENT_RESTORE)
574 		return false;
575 
576 	if (pm_transition.event == PM_EVENT_THAW)
577 		return dev_pm_skip_suspend(dev);
578 
579 	return !dev->power.must_resume;
580 }
581 
582 /**
583  * device_resume_noirq - Execute a "noirq resume" callback for given device.
584  * @dev: Device to handle.
585  * @state: PM transition of the system being carried out.
586  * @async: If true, the device is being resumed asynchronously.
587  *
588  * The driver of @dev will not receive interrupts while this function is being
589  * executed.
590  */
591 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
592 {
593 	pm_callback_t callback = NULL;
594 	const char *info = NULL;
595 	bool skip_resume;
596 	int error = 0;
597 
598 	TRACE_DEVICE(dev);
599 	TRACE_RESUME(0);
600 
601 	if (dev->power.syscore || dev->power.direct_complete)
602 		goto Out;
603 
604 	if (!dev->power.is_noirq_suspended)
605 		goto Out;
606 
607 	if (!dpm_wait_for_superior(dev, async))
608 		goto Out;
609 
610 	skip_resume = dev_pm_skip_resume(dev);
611 	/*
612 	 * If the driver callback is skipped below or by the middle layer
613 	 * callback and device_resume_early() also skips the driver callback for
614 	 * this device later, it needs to appear as "suspended" to PM-runtime,
615 	 * so change its status accordingly.
616 	 *
617 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
618 	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
619 	 * to avoid confusing drivers that don't use it.
620 	 */
621 	if (skip_resume)
622 		pm_runtime_set_suspended(dev);
623 	else if (dev_pm_skip_suspend(dev))
624 		pm_runtime_set_active(dev);
625 
626 	if (dev->pm_domain) {
627 		info = "noirq power domain ";
628 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
629 	} else if (dev->type && dev->type->pm) {
630 		info = "noirq type ";
631 		callback = pm_noirq_op(dev->type->pm, state);
632 	} else if (dev->class && dev->class->pm) {
633 		info = "noirq class ";
634 		callback = pm_noirq_op(dev->class->pm, state);
635 	} else if (dev->bus && dev->bus->pm) {
636 		info = "noirq bus ";
637 		callback = pm_noirq_op(dev->bus->pm, state);
638 	}
639 	if (callback)
640 		goto Run;
641 
642 	if (skip_resume)
643 		goto Skip;
644 
645 	if (dev->driver && dev->driver->pm) {
646 		info = "noirq driver ";
647 		callback = pm_noirq_op(dev->driver->pm, state);
648 	}
649 
650 Run:
651 	error = dpm_run_callback(callback, dev, state, info);
652 
653 Skip:
654 	dev->power.is_noirq_suspended = false;
655 
656 Out:
657 	complete_all(&dev->power.completion);
658 	TRACE_RESUME(error);
659 	return error;
660 }
661 
662 static bool is_async(struct device *dev)
663 {
664 	return dev->power.async_suspend && pm_async_enabled
665 		&& !pm_trace_is_enabled();
666 }
667 
668 static bool dpm_async_fn(struct device *dev, async_func_t func)
669 {
670 	reinit_completion(&dev->power.completion);
671 
672 	if (is_async(dev)) {
673 		get_device(dev);
674 		async_schedule_dev(func, dev);
675 		return true;
676 	}
677 
678 	return false;
679 }
680 
681 static void async_resume_noirq(void *data, async_cookie_t cookie)
682 {
683 	struct device *dev = (struct device *)data;
684 	int error;
685 
686 	error = device_resume_noirq(dev, pm_transition, true);
687 	if (error)
688 		pm_dev_err(dev, pm_transition, " async", error);
689 
690 	put_device(dev);
691 }
692 
693 static void dpm_noirq_resume_devices(pm_message_t state)
694 {
695 	struct device *dev;
696 	ktime_t starttime = ktime_get();
697 
698 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
699 	mutex_lock(&dpm_list_mtx);
700 	pm_transition = state;
701 
702 	/*
703 	 * Advanced the async threads upfront,
704 	 * in case the starting of async threads is
705 	 * delayed by non-async resuming devices.
706 	 */
707 	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
708 		dpm_async_fn(dev, async_resume_noirq);
709 
710 	while (!list_empty(&dpm_noirq_list)) {
711 		dev = to_device(dpm_noirq_list.next);
712 		get_device(dev);
713 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
714 		mutex_unlock(&dpm_list_mtx);
715 
716 		if (!is_async(dev)) {
717 			int error;
718 
719 			error = device_resume_noirq(dev, state, false);
720 			if (error) {
721 				suspend_stats.failed_resume_noirq++;
722 				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
723 				dpm_save_failed_dev(dev_name(dev));
724 				pm_dev_err(dev, state, " noirq", error);
725 			}
726 		}
727 
728 		mutex_lock(&dpm_list_mtx);
729 		put_device(dev);
730 	}
731 	mutex_unlock(&dpm_list_mtx);
732 	async_synchronize_full();
733 	dpm_show_time(starttime, state, 0, "noirq");
734 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
735 }
736 
737 /**
738  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
739  * @state: PM transition of the system being carried out.
740  *
741  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
742  * allow device drivers' interrupt handlers to be called.
743  */
744 void dpm_resume_noirq(pm_message_t state)
745 {
746 	dpm_noirq_resume_devices(state);
747 
748 	resume_device_irqs();
749 	device_wakeup_disarm_wake_irqs();
750 
751 	cpuidle_resume();
752 }
753 
754 /**
755  * device_resume_early - Execute an "early resume" callback for given device.
756  * @dev: Device to handle.
757  * @state: PM transition of the system being carried out.
758  * @async: If true, the device is being resumed asynchronously.
759  *
760  * Runtime PM is disabled for @dev while this function is being executed.
761  */
762 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
763 {
764 	pm_callback_t callback = NULL;
765 	const char *info = NULL;
766 	int error = 0;
767 
768 	TRACE_DEVICE(dev);
769 	TRACE_RESUME(0);
770 
771 	if (dev->power.syscore || dev->power.direct_complete)
772 		goto Out;
773 
774 	if (!dev->power.is_late_suspended)
775 		goto Out;
776 
777 	if (!dpm_wait_for_superior(dev, async))
778 		goto Out;
779 
780 	if (dev->pm_domain) {
781 		info = "early power domain ";
782 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
783 	} else if (dev->type && dev->type->pm) {
784 		info = "early type ";
785 		callback = pm_late_early_op(dev->type->pm, state);
786 	} else if (dev->class && dev->class->pm) {
787 		info = "early class ";
788 		callback = pm_late_early_op(dev->class->pm, state);
789 	} else if (dev->bus && dev->bus->pm) {
790 		info = "early bus ";
791 		callback = pm_late_early_op(dev->bus->pm, state);
792 	}
793 	if (callback)
794 		goto Run;
795 
796 	if (dev_pm_skip_resume(dev))
797 		goto Skip;
798 
799 	if (dev->driver && dev->driver->pm) {
800 		info = "early driver ";
801 		callback = pm_late_early_op(dev->driver->pm, state);
802 	}
803 
804 Run:
805 	error = dpm_run_callback(callback, dev, state, info);
806 
807 Skip:
808 	dev->power.is_late_suspended = false;
809 
810 Out:
811 	TRACE_RESUME(error);
812 
813 	pm_runtime_enable(dev);
814 	complete_all(&dev->power.completion);
815 	return error;
816 }
817 
818 static void async_resume_early(void *data, async_cookie_t cookie)
819 {
820 	struct device *dev = (struct device *)data;
821 	int error;
822 
823 	error = device_resume_early(dev, pm_transition, true);
824 	if (error)
825 		pm_dev_err(dev, pm_transition, " async", error);
826 
827 	put_device(dev);
828 }
829 
830 /**
831  * dpm_resume_early - Execute "early resume" callbacks for all devices.
832  * @state: PM transition of the system being carried out.
833  */
834 void dpm_resume_early(pm_message_t state)
835 {
836 	struct device *dev;
837 	ktime_t starttime = ktime_get();
838 
839 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
840 	mutex_lock(&dpm_list_mtx);
841 	pm_transition = state;
842 
843 	/*
844 	 * Advanced the async threads upfront,
845 	 * in case the starting of async threads is
846 	 * delayed by non-async resuming devices.
847 	 */
848 	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
849 		dpm_async_fn(dev, async_resume_early);
850 
851 	while (!list_empty(&dpm_late_early_list)) {
852 		dev = to_device(dpm_late_early_list.next);
853 		get_device(dev);
854 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
855 		mutex_unlock(&dpm_list_mtx);
856 
857 		if (!is_async(dev)) {
858 			int error;
859 
860 			error = device_resume_early(dev, state, false);
861 			if (error) {
862 				suspend_stats.failed_resume_early++;
863 				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
864 				dpm_save_failed_dev(dev_name(dev));
865 				pm_dev_err(dev, state, " early", error);
866 			}
867 		}
868 		mutex_lock(&dpm_list_mtx);
869 		put_device(dev);
870 	}
871 	mutex_unlock(&dpm_list_mtx);
872 	async_synchronize_full();
873 	dpm_show_time(starttime, state, 0, "early");
874 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
875 }
876 
877 /**
878  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
879  * @state: PM transition of the system being carried out.
880  */
881 void dpm_resume_start(pm_message_t state)
882 {
883 	dpm_resume_noirq(state);
884 	dpm_resume_early(state);
885 }
886 EXPORT_SYMBOL_GPL(dpm_resume_start);
887 
888 /**
889  * device_resume - Execute "resume" callbacks for given device.
890  * @dev: Device to handle.
891  * @state: PM transition of the system being carried out.
892  * @async: If true, the device is being resumed asynchronously.
893  */
894 static int device_resume(struct device *dev, pm_message_t state, bool async)
895 {
896 	pm_callback_t callback = NULL;
897 	const char *info = NULL;
898 	int error = 0;
899 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
900 
901 	TRACE_DEVICE(dev);
902 	TRACE_RESUME(0);
903 
904 	if (dev->power.syscore)
905 		goto Complete;
906 
907 	if (dev->power.direct_complete) {
908 		/* Match the pm_runtime_disable() in __device_suspend(). */
909 		pm_runtime_enable(dev);
910 		goto Complete;
911 	}
912 
913 	if (!dpm_wait_for_superior(dev, async))
914 		goto Complete;
915 
916 	dpm_watchdog_set(&wd, dev);
917 	device_lock(dev);
918 
919 	/*
920 	 * This is a fib.  But we'll allow new children to be added below
921 	 * a resumed device, even if the device hasn't been completed yet.
922 	 */
923 	dev->power.is_prepared = false;
924 
925 	if (!dev->power.is_suspended)
926 		goto Unlock;
927 
928 	if (dev->pm_domain) {
929 		info = "power domain ";
930 		callback = pm_op(&dev->pm_domain->ops, state);
931 		goto Driver;
932 	}
933 
934 	if (dev->type && dev->type->pm) {
935 		info = "type ";
936 		callback = pm_op(dev->type->pm, state);
937 		goto Driver;
938 	}
939 
940 	if (dev->class && dev->class->pm) {
941 		info = "class ";
942 		callback = pm_op(dev->class->pm, state);
943 		goto Driver;
944 	}
945 
946 	if (dev->bus) {
947 		if (dev->bus->pm) {
948 			info = "bus ";
949 			callback = pm_op(dev->bus->pm, state);
950 		} else if (dev->bus->resume) {
951 			info = "legacy bus ";
952 			callback = dev->bus->resume;
953 			goto End;
954 		}
955 	}
956 
957  Driver:
958 	if (!callback && dev->driver && dev->driver->pm) {
959 		info = "driver ";
960 		callback = pm_op(dev->driver->pm, state);
961 	}
962 
963  End:
964 	error = dpm_run_callback(callback, dev, state, info);
965 	dev->power.is_suspended = false;
966 
967  Unlock:
968 	device_unlock(dev);
969 	dpm_watchdog_clear(&wd);
970 
971  Complete:
972 	complete_all(&dev->power.completion);
973 
974 	TRACE_RESUME(error);
975 
976 	return error;
977 }
978 
979 static void async_resume(void *data, async_cookie_t cookie)
980 {
981 	struct device *dev = (struct device *)data;
982 	int error;
983 
984 	error = device_resume(dev, pm_transition, true);
985 	if (error)
986 		pm_dev_err(dev, pm_transition, " async", error);
987 	put_device(dev);
988 }
989 
990 /**
991  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
992  * @state: PM transition of the system being carried out.
993  *
994  * Execute the appropriate "resume" callback for all devices whose status
995  * indicates that they are suspended.
996  */
997 void dpm_resume(pm_message_t state)
998 {
999 	struct device *dev;
1000 	ktime_t starttime = ktime_get();
1001 
1002 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1003 	might_sleep();
1004 
1005 	mutex_lock(&dpm_list_mtx);
1006 	pm_transition = state;
1007 	async_error = 0;
1008 
1009 	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1010 		dpm_async_fn(dev, async_resume);
1011 
1012 	while (!list_empty(&dpm_suspended_list)) {
1013 		dev = to_device(dpm_suspended_list.next);
1014 		get_device(dev);
1015 		if (!is_async(dev)) {
1016 			int error;
1017 
1018 			mutex_unlock(&dpm_list_mtx);
1019 
1020 			error = device_resume(dev, state, false);
1021 			if (error) {
1022 				suspend_stats.failed_resume++;
1023 				dpm_save_failed_step(SUSPEND_RESUME);
1024 				dpm_save_failed_dev(dev_name(dev));
1025 				pm_dev_err(dev, state, "", error);
1026 			}
1027 
1028 			mutex_lock(&dpm_list_mtx);
1029 		}
1030 		if (!list_empty(&dev->power.entry))
1031 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1032 		put_device(dev);
1033 	}
1034 	mutex_unlock(&dpm_list_mtx);
1035 	async_synchronize_full();
1036 	dpm_show_time(starttime, state, 0, NULL);
1037 
1038 	cpufreq_resume();
1039 	devfreq_resume();
1040 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1041 }
1042 
1043 /**
1044  * device_complete - Complete a PM transition for given device.
1045  * @dev: Device to handle.
1046  * @state: PM transition of the system being carried out.
1047  */
1048 static void device_complete(struct device *dev, pm_message_t state)
1049 {
1050 	void (*callback)(struct device *) = NULL;
1051 	const char *info = NULL;
1052 
1053 	if (dev->power.syscore)
1054 		return;
1055 
1056 	device_lock(dev);
1057 
1058 	if (dev->pm_domain) {
1059 		info = "completing power domain ";
1060 		callback = dev->pm_domain->ops.complete;
1061 	} else if (dev->type && dev->type->pm) {
1062 		info = "completing type ";
1063 		callback = dev->type->pm->complete;
1064 	} else if (dev->class && dev->class->pm) {
1065 		info = "completing class ";
1066 		callback = dev->class->pm->complete;
1067 	} else if (dev->bus && dev->bus->pm) {
1068 		info = "completing bus ";
1069 		callback = dev->bus->pm->complete;
1070 	}
1071 
1072 	if (!callback && dev->driver && dev->driver->pm) {
1073 		info = "completing driver ";
1074 		callback = dev->driver->pm->complete;
1075 	}
1076 
1077 	if (callback) {
1078 		pm_dev_dbg(dev, state, info);
1079 		callback(dev);
1080 	}
1081 
1082 	device_unlock(dev);
1083 
1084 	pm_runtime_put(dev);
1085 }
1086 
1087 /**
1088  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1089  * @state: PM transition of the system being carried out.
1090  *
1091  * Execute the ->complete() callbacks for all devices whose PM status is not
1092  * DPM_ON (this allows new devices to be registered).
1093  */
1094 void dpm_complete(pm_message_t state)
1095 {
1096 	struct list_head list;
1097 
1098 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1099 	might_sleep();
1100 
1101 	INIT_LIST_HEAD(&list);
1102 	mutex_lock(&dpm_list_mtx);
1103 	while (!list_empty(&dpm_prepared_list)) {
1104 		struct device *dev = to_device(dpm_prepared_list.prev);
1105 
1106 		get_device(dev);
1107 		dev->power.is_prepared = false;
1108 		list_move(&dev->power.entry, &list);
1109 		mutex_unlock(&dpm_list_mtx);
1110 
1111 		trace_device_pm_callback_start(dev, "", state.event);
1112 		device_complete(dev, state);
1113 		trace_device_pm_callback_end(dev, 0);
1114 
1115 		mutex_lock(&dpm_list_mtx);
1116 		put_device(dev);
1117 	}
1118 	list_splice(&list, &dpm_list);
1119 	mutex_unlock(&dpm_list_mtx);
1120 
1121 	/* Allow device probing and trigger re-probing of deferred devices */
1122 	device_unblock_probing();
1123 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1124 }
1125 
1126 /**
1127  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1128  * @state: PM transition of the system being carried out.
1129  *
1130  * Execute "resume" callbacks for all devices and complete the PM transition of
1131  * the system.
1132  */
1133 void dpm_resume_end(pm_message_t state)
1134 {
1135 	dpm_resume(state);
1136 	dpm_complete(state);
1137 }
1138 EXPORT_SYMBOL_GPL(dpm_resume_end);
1139 
1140 
1141 /*------------------------- Suspend routines -------------------------*/
1142 
1143 /**
1144  * resume_event - Return a "resume" message for given "suspend" sleep state.
1145  * @sleep_state: PM message representing a sleep state.
1146  *
1147  * Return a PM message representing the resume event corresponding to given
1148  * sleep state.
1149  */
1150 static pm_message_t resume_event(pm_message_t sleep_state)
1151 {
1152 	switch (sleep_state.event) {
1153 	case PM_EVENT_SUSPEND:
1154 		return PMSG_RESUME;
1155 	case PM_EVENT_FREEZE:
1156 	case PM_EVENT_QUIESCE:
1157 		return PMSG_RECOVER;
1158 	case PM_EVENT_HIBERNATE:
1159 		return PMSG_RESTORE;
1160 	}
1161 	return PMSG_ON;
1162 }
1163 
1164 static void dpm_superior_set_must_resume(struct device *dev)
1165 {
1166 	struct device_link *link;
1167 	int idx;
1168 
1169 	if (dev->parent)
1170 		dev->parent->power.must_resume = true;
1171 
1172 	idx = device_links_read_lock();
1173 
1174 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1175 		link->supplier->power.must_resume = true;
1176 
1177 	device_links_read_unlock(idx);
1178 }
1179 
1180 /**
1181  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1182  * @dev: Device to handle.
1183  * @state: PM transition of the system being carried out.
1184  * @async: If true, the device is being suspended asynchronously.
1185  *
1186  * The driver of @dev will not receive interrupts while this function is being
1187  * executed.
1188  */
1189 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1190 {
1191 	pm_callback_t callback = NULL;
1192 	const char *info = NULL;
1193 	int error = 0;
1194 
1195 	TRACE_DEVICE(dev);
1196 	TRACE_SUSPEND(0);
1197 
1198 	dpm_wait_for_subordinate(dev, async);
1199 
1200 	if (async_error)
1201 		goto Complete;
1202 
1203 	if (dev->power.syscore || dev->power.direct_complete)
1204 		goto Complete;
1205 
1206 	if (dev->pm_domain) {
1207 		info = "noirq power domain ";
1208 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1209 	} else if (dev->type && dev->type->pm) {
1210 		info = "noirq type ";
1211 		callback = pm_noirq_op(dev->type->pm, state);
1212 	} else if (dev->class && dev->class->pm) {
1213 		info = "noirq class ";
1214 		callback = pm_noirq_op(dev->class->pm, state);
1215 	} else if (dev->bus && dev->bus->pm) {
1216 		info = "noirq bus ";
1217 		callback = pm_noirq_op(dev->bus->pm, state);
1218 	}
1219 	if (callback)
1220 		goto Run;
1221 
1222 	if (dev_pm_skip_suspend(dev))
1223 		goto Skip;
1224 
1225 	if (dev->driver && dev->driver->pm) {
1226 		info = "noirq driver ";
1227 		callback = pm_noirq_op(dev->driver->pm, state);
1228 	}
1229 
1230 Run:
1231 	error = dpm_run_callback(callback, dev, state, info);
1232 	if (error) {
1233 		async_error = error;
1234 		goto Complete;
1235 	}
1236 
1237 Skip:
1238 	dev->power.is_noirq_suspended = true;
1239 
1240 	/*
1241 	 * Skipping the resume of devices that were in use right before the
1242 	 * system suspend (as indicated by their PM-runtime usage counters)
1243 	 * would be suboptimal.  Also resume them if doing that is not allowed
1244 	 * to be skipped.
1245 	 */
1246 	if (atomic_read(&dev->power.usage_count) > 1 ||
1247 	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1248 	      dev->power.may_skip_resume))
1249 		dev->power.must_resume = true;
1250 
1251 	if (dev->power.must_resume)
1252 		dpm_superior_set_must_resume(dev);
1253 
1254 Complete:
1255 	complete_all(&dev->power.completion);
1256 	TRACE_SUSPEND(error);
1257 	return error;
1258 }
1259 
1260 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1261 {
1262 	struct device *dev = (struct device *)data;
1263 	int error;
1264 
1265 	error = __device_suspend_noirq(dev, pm_transition, true);
1266 	if (error) {
1267 		dpm_save_failed_dev(dev_name(dev));
1268 		pm_dev_err(dev, pm_transition, " async", error);
1269 	}
1270 
1271 	put_device(dev);
1272 }
1273 
1274 static int device_suspend_noirq(struct device *dev)
1275 {
1276 	if (dpm_async_fn(dev, async_suspend_noirq))
1277 		return 0;
1278 
1279 	return __device_suspend_noirq(dev, pm_transition, false);
1280 }
1281 
1282 static int dpm_noirq_suspend_devices(pm_message_t state)
1283 {
1284 	ktime_t starttime = ktime_get();
1285 	int error = 0;
1286 
1287 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1288 	mutex_lock(&dpm_list_mtx);
1289 	pm_transition = state;
1290 	async_error = 0;
1291 
1292 	while (!list_empty(&dpm_late_early_list)) {
1293 		struct device *dev = to_device(dpm_late_early_list.prev);
1294 
1295 		get_device(dev);
1296 		mutex_unlock(&dpm_list_mtx);
1297 
1298 		error = device_suspend_noirq(dev);
1299 
1300 		mutex_lock(&dpm_list_mtx);
1301 		if (error) {
1302 			pm_dev_err(dev, state, " noirq", error);
1303 			dpm_save_failed_dev(dev_name(dev));
1304 			put_device(dev);
1305 			break;
1306 		}
1307 		if (!list_empty(&dev->power.entry))
1308 			list_move(&dev->power.entry, &dpm_noirq_list);
1309 		put_device(dev);
1310 
1311 		if (async_error)
1312 			break;
1313 	}
1314 	mutex_unlock(&dpm_list_mtx);
1315 	async_synchronize_full();
1316 	if (!error)
1317 		error = async_error;
1318 
1319 	if (error) {
1320 		suspend_stats.failed_suspend_noirq++;
1321 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1322 	}
1323 	dpm_show_time(starttime, state, error, "noirq");
1324 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1325 	return error;
1326 }
1327 
1328 /**
1329  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1330  * @state: PM transition of the system being carried out.
1331  *
1332  * Prevent device drivers' interrupt handlers from being called and invoke
1333  * "noirq" suspend callbacks for all non-sysdev devices.
1334  */
1335 int dpm_suspend_noirq(pm_message_t state)
1336 {
1337 	int ret;
1338 
1339 	cpuidle_pause();
1340 
1341 	device_wakeup_arm_wake_irqs();
1342 	suspend_device_irqs();
1343 
1344 	ret = dpm_noirq_suspend_devices(state);
1345 	if (ret)
1346 		dpm_resume_noirq(resume_event(state));
1347 
1348 	return ret;
1349 }
1350 
1351 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1352 {
1353 	struct device *parent = dev->parent;
1354 
1355 	if (!parent)
1356 		return;
1357 
1358 	spin_lock_irq(&parent->power.lock);
1359 
1360 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1361 		parent->power.wakeup_path = true;
1362 
1363 	spin_unlock_irq(&parent->power.lock);
1364 }
1365 
1366 /**
1367  * __device_suspend_late - Execute a "late suspend" callback for given device.
1368  * @dev: Device to handle.
1369  * @state: PM transition of the system being carried out.
1370  * @async: If true, the device is being suspended asynchronously.
1371  *
1372  * Runtime PM is disabled for @dev while this function is being executed.
1373  */
1374 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1375 {
1376 	pm_callback_t callback = NULL;
1377 	const char *info = NULL;
1378 	int error = 0;
1379 
1380 	TRACE_DEVICE(dev);
1381 	TRACE_SUSPEND(0);
1382 
1383 	__pm_runtime_disable(dev, false);
1384 
1385 	dpm_wait_for_subordinate(dev, async);
1386 
1387 	if (async_error)
1388 		goto Complete;
1389 
1390 	if (pm_wakeup_pending()) {
1391 		async_error = -EBUSY;
1392 		goto Complete;
1393 	}
1394 
1395 	if (dev->power.syscore || dev->power.direct_complete)
1396 		goto Complete;
1397 
1398 	if (dev->pm_domain) {
1399 		info = "late power domain ";
1400 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1401 	} else if (dev->type && dev->type->pm) {
1402 		info = "late type ";
1403 		callback = pm_late_early_op(dev->type->pm, state);
1404 	} else if (dev->class && dev->class->pm) {
1405 		info = "late class ";
1406 		callback = pm_late_early_op(dev->class->pm, state);
1407 	} else if (dev->bus && dev->bus->pm) {
1408 		info = "late bus ";
1409 		callback = pm_late_early_op(dev->bus->pm, state);
1410 	}
1411 	if (callback)
1412 		goto Run;
1413 
1414 	if (dev_pm_skip_suspend(dev))
1415 		goto Skip;
1416 
1417 	if (dev->driver && dev->driver->pm) {
1418 		info = "late driver ";
1419 		callback = pm_late_early_op(dev->driver->pm, state);
1420 	}
1421 
1422 Run:
1423 	error = dpm_run_callback(callback, dev, state, info);
1424 	if (error) {
1425 		async_error = error;
1426 		goto Complete;
1427 	}
1428 	dpm_propagate_wakeup_to_parent(dev);
1429 
1430 Skip:
1431 	dev->power.is_late_suspended = true;
1432 
1433 Complete:
1434 	TRACE_SUSPEND(error);
1435 	complete_all(&dev->power.completion);
1436 	return error;
1437 }
1438 
1439 static void async_suspend_late(void *data, async_cookie_t cookie)
1440 {
1441 	struct device *dev = (struct device *)data;
1442 	int error;
1443 
1444 	error = __device_suspend_late(dev, pm_transition, true);
1445 	if (error) {
1446 		dpm_save_failed_dev(dev_name(dev));
1447 		pm_dev_err(dev, pm_transition, " async", error);
1448 	}
1449 	put_device(dev);
1450 }
1451 
1452 static int device_suspend_late(struct device *dev)
1453 {
1454 	if (dpm_async_fn(dev, async_suspend_late))
1455 		return 0;
1456 
1457 	return __device_suspend_late(dev, pm_transition, false);
1458 }
1459 
1460 /**
1461  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1462  * @state: PM transition of the system being carried out.
1463  */
1464 int dpm_suspend_late(pm_message_t state)
1465 {
1466 	ktime_t starttime = ktime_get();
1467 	int error = 0;
1468 
1469 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1470 	mutex_lock(&dpm_list_mtx);
1471 	pm_transition = state;
1472 	async_error = 0;
1473 
1474 	while (!list_empty(&dpm_suspended_list)) {
1475 		struct device *dev = to_device(dpm_suspended_list.prev);
1476 
1477 		get_device(dev);
1478 		mutex_unlock(&dpm_list_mtx);
1479 
1480 		error = device_suspend_late(dev);
1481 
1482 		mutex_lock(&dpm_list_mtx);
1483 		if (!list_empty(&dev->power.entry))
1484 			list_move(&dev->power.entry, &dpm_late_early_list);
1485 
1486 		if (error) {
1487 			pm_dev_err(dev, state, " late", error);
1488 			dpm_save_failed_dev(dev_name(dev));
1489 			put_device(dev);
1490 			break;
1491 		}
1492 		put_device(dev);
1493 
1494 		if (async_error)
1495 			break;
1496 	}
1497 	mutex_unlock(&dpm_list_mtx);
1498 	async_synchronize_full();
1499 	if (!error)
1500 		error = async_error;
1501 	if (error) {
1502 		suspend_stats.failed_suspend_late++;
1503 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1504 		dpm_resume_early(resume_event(state));
1505 	}
1506 	dpm_show_time(starttime, state, error, "late");
1507 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1508 	return error;
1509 }
1510 
1511 /**
1512  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1513  * @state: PM transition of the system being carried out.
1514  */
1515 int dpm_suspend_end(pm_message_t state)
1516 {
1517 	ktime_t starttime = ktime_get();
1518 	int error;
1519 
1520 	error = dpm_suspend_late(state);
1521 	if (error)
1522 		goto out;
1523 
1524 	error = dpm_suspend_noirq(state);
1525 	if (error)
1526 		dpm_resume_early(resume_event(state));
1527 
1528 out:
1529 	dpm_show_time(starttime, state, error, "end");
1530 	return error;
1531 }
1532 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1533 
1534 /**
1535  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1536  * @dev: Device to suspend.
1537  * @state: PM transition of the system being carried out.
1538  * @cb: Suspend callback to execute.
1539  * @info: string description of caller.
1540  */
1541 static int legacy_suspend(struct device *dev, pm_message_t state,
1542 			  int (*cb)(struct device *dev, pm_message_t state),
1543 			  const char *info)
1544 {
1545 	int error;
1546 	ktime_t calltime;
1547 
1548 	calltime = initcall_debug_start(dev, cb);
1549 
1550 	trace_device_pm_callback_start(dev, info, state.event);
1551 	error = cb(dev, state);
1552 	trace_device_pm_callback_end(dev, error);
1553 	suspend_report_result(cb, error);
1554 
1555 	initcall_debug_report(dev, calltime, cb, error);
1556 
1557 	return error;
1558 }
1559 
1560 static void dpm_clear_superiors_direct_complete(struct device *dev)
1561 {
1562 	struct device_link *link;
1563 	int idx;
1564 
1565 	if (dev->parent) {
1566 		spin_lock_irq(&dev->parent->power.lock);
1567 		dev->parent->power.direct_complete = false;
1568 		spin_unlock_irq(&dev->parent->power.lock);
1569 	}
1570 
1571 	idx = device_links_read_lock();
1572 
1573 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1574 		spin_lock_irq(&link->supplier->power.lock);
1575 		link->supplier->power.direct_complete = false;
1576 		spin_unlock_irq(&link->supplier->power.lock);
1577 	}
1578 
1579 	device_links_read_unlock(idx);
1580 }
1581 
1582 /**
1583  * __device_suspend - Execute "suspend" callbacks for given device.
1584  * @dev: Device to handle.
1585  * @state: PM transition of the system being carried out.
1586  * @async: If true, the device is being suspended asynchronously.
1587  */
1588 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1589 {
1590 	pm_callback_t callback = NULL;
1591 	const char *info = NULL;
1592 	int error = 0;
1593 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1594 
1595 	TRACE_DEVICE(dev);
1596 	TRACE_SUSPEND(0);
1597 
1598 	dpm_wait_for_subordinate(dev, async);
1599 
1600 	if (async_error) {
1601 		dev->power.direct_complete = false;
1602 		goto Complete;
1603 	}
1604 
1605 	/*
1606 	 * Wait for possible runtime PM transitions of the device in progress
1607 	 * to complete and if there's a runtime resume request pending for it,
1608 	 * resume it before proceeding with invoking the system-wide suspend
1609 	 * callbacks for it.
1610 	 *
1611 	 * If the system-wide suspend callbacks below change the configuration
1612 	 * of the device, they must disable runtime PM for it or otherwise
1613 	 * ensure that its runtime-resume callbacks will not be confused by that
1614 	 * change in case they are invoked going forward.
1615 	 */
1616 	pm_runtime_barrier(dev);
1617 
1618 	if (pm_wakeup_pending()) {
1619 		dev->power.direct_complete = false;
1620 		async_error = -EBUSY;
1621 		goto Complete;
1622 	}
1623 
1624 	if (dev->power.syscore)
1625 		goto Complete;
1626 
1627 	/* Avoid direct_complete to let wakeup_path propagate. */
1628 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1629 		dev->power.direct_complete = false;
1630 
1631 	if (dev->power.direct_complete) {
1632 		if (pm_runtime_status_suspended(dev)) {
1633 			pm_runtime_disable(dev);
1634 			if (pm_runtime_status_suspended(dev)) {
1635 				pm_dev_dbg(dev, state, "direct-complete ");
1636 				goto Complete;
1637 			}
1638 
1639 			pm_runtime_enable(dev);
1640 		}
1641 		dev->power.direct_complete = false;
1642 	}
1643 
1644 	dev->power.may_skip_resume = true;
1645 	dev->power.must_resume = false;
1646 
1647 	dpm_watchdog_set(&wd, dev);
1648 	device_lock(dev);
1649 
1650 	if (dev->pm_domain) {
1651 		info = "power domain ";
1652 		callback = pm_op(&dev->pm_domain->ops, state);
1653 		goto Run;
1654 	}
1655 
1656 	if (dev->type && dev->type->pm) {
1657 		info = "type ";
1658 		callback = pm_op(dev->type->pm, state);
1659 		goto Run;
1660 	}
1661 
1662 	if (dev->class && dev->class->pm) {
1663 		info = "class ";
1664 		callback = pm_op(dev->class->pm, state);
1665 		goto Run;
1666 	}
1667 
1668 	if (dev->bus) {
1669 		if (dev->bus->pm) {
1670 			info = "bus ";
1671 			callback = pm_op(dev->bus->pm, state);
1672 		} else if (dev->bus->suspend) {
1673 			pm_dev_dbg(dev, state, "legacy bus ");
1674 			error = legacy_suspend(dev, state, dev->bus->suspend,
1675 						"legacy bus ");
1676 			goto End;
1677 		}
1678 	}
1679 
1680  Run:
1681 	if (!callback && dev->driver && dev->driver->pm) {
1682 		info = "driver ";
1683 		callback = pm_op(dev->driver->pm, state);
1684 	}
1685 
1686 	error = dpm_run_callback(callback, dev, state, info);
1687 
1688  End:
1689 	if (!error) {
1690 		dev->power.is_suspended = true;
1691 		if (device_may_wakeup(dev))
1692 			dev->power.wakeup_path = true;
1693 
1694 		dpm_propagate_wakeup_to_parent(dev);
1695 		dpm_clear_superiors_direct_complete(dev);
1696 	}
1697 
1698 	device_unlock(dev);
1699 	dpm_watchdog_clear(&wd);
1700 
1701  Complete:
1702 	if (error)
1703 		async_error = error;
1704 
1705 	complete_all(&dev->power.completion);
1706 	TRACE_SUSPEND(error);
1707 	return error;
1708 }
1709 
1710 static void async_suspend(void *data, async_cookie_t cookie)
1711 {
1712 	struct device *dev = (struct device *)data;
1713 	int error;
1714 
1715 	error = __device_suspend(dev, pm_transition, true);
1716 	if (error) {
1717 		dpm_save_failed_dev(dev_name(dev));
1718 		pm_dev_err(dev, pm_transition, " async", error);
1719 	}
1720 
1721 	put_device(dev);
1722 }
1723 
1724 static int device_suspend(struct device *dev)
1725 {
1726 	if (dpm_async_fn(dev, async_suspend))
1727 		return 0;
1728 
1729 	return __device_suspend(dev, pm_transition, false);
1730 }
1731 
1732 /**
1733  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1734  * @state: PM transition of the system being carried out.
1735  */
1736 int dpm_suspend(pm_message_t state)
1737 {
1738 	ktime_t starttime = ktime_get();
1739 	int error = 0;
1740 
1741 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1742 	might_sleep();
1743 
1744 	devfreq_suspend();
1745 	cpufreq_suspend();
1746 
1747 	mutex_lock(&dpm_list_mtx);
1748 	pm_transition = state;
1749 	async_error = 0;
1750 	while (!list_empty(&dpm_prepared_list)) {
1751 		struct device *dev = to_device(dpm_prepared_list.prev);
1752 
1753 		get_device(dev);
1754 		mutex_unlock(&dpm_list_mtx);
1755 
1756 		error = device_suspend(dev);
1757 
1758 		mutex_lock(&dpm_list_mtx);
1759 		if (error) {
1760 			pm_dev_err(dev, state, "", error);
1761 			dpm_save_failed_dev(dev_name(dev));
1762 			put_device(dev);
1763 			break;
1764 		}
1765 		if (!list_empty(&dev->power.entry))
1766 			list_move(&dev->power.entry, &dpm_suspended_list);
1767 		put_device(dev);
1768 		if (async_error)
1769 			break;
1770 	}
1771 	mutex_unlock(&dpm_list_mtx);
1772 	async_synchronize_full();
1773 	if (!error)
1774 		error = async_error;
1775 	if (error) {
1776 		suspend_stats.failed_suspend++;
1777 		dpm_save_failed_step(SUSPEND_SUSPEND);
1778 	}
1779 	dpm_show_time(starttime, state, error, NULL);
1780 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1781 	return error;
1782 }
1783 
1784 /**
1785  * device_prepare - Prepare a device for system power transition.
1786  * @dev: Device to handle.
1787  * @state: PM transition of the system being carried out.
1788  *
1789  * Execute the ->prepare() callback(s) for given device.  No new children of the
1790  * device may be registered after this function has returned.
1791  */
1792 static int device_prepare(struct device *dev, pm_message_t state)
1793 {
1794 	int (*callback)(struct device *) = NULL;
1795 	int ret = 0;
1796 
1797 	if (dev->power.syscore)
1798 		return 0;
1799 
1800 	/*
1801 	 * If a device's parent goes into runtime suspend at the wrong time,
1802 	 * it won't be possible to resume the device.  To prevent this we
1803 	 * block runtime suspend here, during the prepare phase, and allow
1804 	 * it again during the complete phase.
1805 	 */
1806 	pm_runtime_get_noresume(dev);
1807 
1808 	device_lock(dev);
1809 
1810 	dev->power.wakeup_path = false;
1811 
1812 	if (dev->power.no_pm_callbacks)
1813 		goto unlock;
1814 
1815 	if (dev->pm_domain)
1816 		callback = dev->pm_domain->ops.prepare;
1817 	else if (dev->type && dev->type->pm)
1818 		callback = dev->type->pm->prepare;
1819 	else if (dev->class && dev->class->pm)
1820 		callback = dev->class->pm->prepare;
1821 	else if (dev->bus && dev->bus->pm)
1822 		callback = dev->bus->pm->prepare;
1823 
1824 	if (!callback && dev->driver && dev->driver->pm)
1825 		callback = dev->driver->pm->prepare;
1826 
1827 	if (callback)
1828 		ret = callback(dev);
1829 
1830 unlock:
1831 	device_unlock(dev);
1832 
1833 	if (ret < 0) {
1834 		suspend_report_result(callback, ret);
1835 		pm_runtime_put(dev);
1836 		return ret;
1837 	}
1838 	/*
1839 	 * A positive return value from ->prepare() means "this device appears
1840 	 * to be runtime-suspended and its state is fine, so if it really is
1841 	 * runtime-suspended, you can leave it in that state provided that you
1842 	 * will do the same thing with all of its descendants".  This only
1843 	 * applies to suspend transitions, however.
1844 	 */
1845 	spin_lock_irq(&dev->power.lock);
1846 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1847 		(ret > 0 || dev->power.no_pm_callbacks) &&
1848 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1849 	spin_unlock_irq(&dev->power.lock);
1850 	return 0;
1851 }
1852 
1853 /**
1854  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1855  * @state: PM transition of the system being carried out.
1856  *
1857  * Execute the ->prepare() callback(s) for all devices.
1858  */
1859 int dpm_prepare(pm_message_t state)
1860 {
1861 	int error = 0;
1862 
1863 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1864 	might_sleep();
1865 
1866 	/*
1867 	 * Give a chance for the known devices to complete their probes, before
1868 	 * disable probing of devices. This sync point is important at least
1869 	 * at boot time + hibernation restore.
1870 	 */
1871 	wait_for_device_probe();
1872 	/*
1873 	 * It is unsafe if probing of devices will happen during suspend or
1874 	 * hibernation and system behavior will be unpredictable in this case.
1875 	 * So, let's prohibit device's probing here and defer their probes
1876 	 * instead. The normal behavior will be restored in dpm_complete().
1877 	 */
1878 	device_block_probing();
1879 
1880 	mutex_lock(&dpm_list_mtx);
1881 	while (!list_empty(&dpm_list)) {
1882 		struct device *dev = to_device(dpm_list.next);
1883 
1884 		get_device(dev);
1885 		mutex_unlock(&dpm_list_mtx);
1886 
1887 		trace_device_pm_callback_start(dev, "", state.event);
1888 		error = device_prepare(dev, state);
1889 		trace_device_pm_callback_end(dev, error);
1890 
1891 		mutex_lock(&dpm_list_mtx);
1892 		if (error) {
1893 			if (error == -EAGAIN) {
1894 				put_device(dev);
1895 				error = 0;
1896 				continue;
1897 			}
1898 			dev_info(dev, "not prepared for power transition: code %d\n",
1899 				 error);
1900 			put_device(dev);
1901 			break;
1902 		}
1903 		dev->power.is_prepared = true;
1904 		if (!list_empty(&dev->power.entry))
1905 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1906 		put_device(dev);
1907 	}
1908 	mutex_unlock(&dpm_list_mtx);
1909 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1910 	return error;
1911 }
1912 
1913 /**
1914  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1915  * @state: PM transition of the system being carried out.
1916  *
1917  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1918  * callbacks for them.
1919  */
1920 int dpm_suspend_start(pm_message_t state)
1921 {
1922 	ktime_t starttime = ktime_get();
1923 	int error;
1924 
1925 	error = dpm_prepare(state);
1926 	if (error) {
1927 		suspend_stats.failed_prepare++;
1928 		dpm_save_failed_step(SUSPEND_PREPARE);
1929 	} else
1930 		error = dpm_suspend(state);
1931 	dpm_show_time(starttime, state, error, "start");
1932 	return error;
1933 }
1934 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1935 
1936 void __suspend_report_result(const char *function, void *fn, int ret)
1937 {
1938 	if (ret)
1939 		pr_err("%s(): %pS returns %d\n", function, fn, ret);
1940 }
1941 EXPORT_SYMBOL_GPL(__suspend_report_result);
1942 
1943 /**
1944  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1945  * @subordinate: Device that needs to wait for @dev.
1946  * @dev: Device to wait for.
1947  */
1948 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1949 {
1950 	dpm_wait(dev, subordinate->power.async_suspend);
1951 	return async_error;
1952 }
1953 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1954 
1955 /**
1956  * dpm_for_each_dev - device iterator.
1957  * @data: data for the callback.
1958  * @fn: function to be called for each device.
1959  *
1960  * Iterate over devices in dpm_list, and call @fn for each device,
1961  * passing it @data.
1962  */
1963 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1964 {
1965 	struct device *dev;
1966 
1967 	if (!fn)
1968 		return;
1969 
1970 	device_pm_lock();
1971 	list_for_each_entry(dev, &dpm_list, power.entry)
1972 		fn(dev, data);
1973 	device_pm_unlock();
1974 }
1975 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1976 
1977 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1978 {
1979 	if (!ops)
1980 		return true;
1981 
1982 	return !ops->prepare &&
1983 	       !ops->suspend &&
1984 	       !ops->suspend_late &&
1985 	       !ops->suspend_noirq &&
1986 	       !ops->resume_noirq &&
1987 	       !ops->resume_early &&
1988 	       !ops->resume &&
1989 	       !ops->complete;
1990 }
1991 
1992 void device_pm_check_callbacks(struct device *dev)
1993 {
1994 	spin_lock_irq(&dev->power.lock);
1995 	dev->power.no_pm_callbacks =
1996 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1997 		 !dev->bus->suspend && !dev->bus->resume)) &&
1998 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1999 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2000 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2001 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2002 		 !dev->driver->suspend && !dev->driver->resume));
2003 	spin_unlock_irq(&dev->power.lock);
2004 }
2005 
2006 bool dev_pm_skip_suspend(struct device *dev)
2007 {
2008 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2009 		pm_runtime_status_suspended(dev);
2010 }
2011