xref: /openbmc/linux/drivers/base/power/main.c (revision 86db9f28)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52 
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58 
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62 
63 static int async_error;
64 
65 static const char *pm_verb(int event)
66 {
67 	switch (event) {
68 	case PM_EVENT_SUSPEND:
69 		return "suspend";
70 	case PM_EVENT_RESUME:
71 		return "resume";
72 	case PM_EVENT_FREEZE:
73 		return "freeze";
74 	case PM_EVENT_QUIESCE:
75 		return "quiesce";
76 	case PM_EVENT_HIBERNATE:
77 		return "hibernate";
78 	case PM_EVENT_THAW:
79 		return "thaw";
80 	case PM_EVENT_RESTORE:
81 		return "restore";
82 	case PM_EVENT_RECOVER:
83 		return "recover";
84 	default:
85 		return "(unknown PM event)";
86 	}
87 }
88 
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95 	dev->power.is_prepared = false;
96 	dev->power.is_suspended = false;
97 	dev->power.is_noirq_suspended = false;
98 	dev->power.is_late_suspended = false;
99 	init_completion(&dev->power.completion);
100 	complete_all(&dev->power.completion);
101 	dev->power.wakeup = NULL;
102 	INIT_LIST_HEAD(&dev->power.entry);
103 }
104 
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110 	mutex_lock(&dpm_list_mtx);
111 }
112 
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118 	mutex_unlock(&dpm_list_mtx);
119 }
120 
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127 	/* Skip PM setup/initialization. */
128 	if (device_pm_not_required(dev))
129 		return;
130 
131 	pr_debug("Adding info for %s:%s\n",
132 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
133 	device_pm_check_callbacks(dev);
134 	mutex_lock(&dpm_list_mtx);
135 	if (dev->parent && dev->parent->power.is_prepared)
136 		dev_warn(dev, "parent %s should not be sleeping\n",
137 			dev_name(dev->parent));
138 	list_add_tail(&dev->power.entry, &dpm_list);
139 	dev->power.in_dpm_list = true;
140 	mutex_unlock(&dpm_list_mtx);
141 }
142 
143 /**
144  * device_pm_remove - Remove a device from the PM core's list of active devices.
145  * @dev: Device to be removed from the list.
146  */
147 void device_pm_remove(struct device *dev)
148 {
149 	if (device_pm_not_required(dev))
150 		return;
151 
152 	pr_debug("Removing info for %s:%s\n",
153 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
154 	complete_all(&dev->power.completion);
155 	mutex_lock(&dpm_list_mtx);
156 	list_del_init(&dev->power.entry);
157 	dev->power.in_dpm_list = false;
158 	mutex_unlock(&dpm_list_mtx);
159 	device_wakeup_disable(dev);
160 	pm_runtime_remove(dev);
161 	device_pm_check_callbacks(dev);
162 }
163 
164 /**
165  * device_pm_move_before - Move device in the PM core's list of active devices.
166  * @deva: Device to move in dpm_list.
167  * @devb: Device @deva should come before.
168  */
169 void device_pm_move_before(struct device *deva, struct device *devb)
170 {
171 	pr_debug("Moving %s:%s before %s:%s\n",
172 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
173 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174 	/* Delete deva from dpm_list and reinsert before devb. */
175 	list_move_tail(&deva->power.entry, &devb->power.entry);
176 }
177 
178 /**
179  * device_pm_move_after - Move device in the PM core's list of active devices.
180  * @deva: Device to move in dpm_list.
181  * @devb: Device @deva should come after.
182  */
183 void device_pm_move_after(struct device *deva, struct device *devb)
184 {
185 	pr_debug("Moving %s:%s after %s:%s\n",
186 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188 	/* Delete deva from dpm_list and reinsert after devb. */
189 	list_move(&deva->power.entry, &devb->power.entry);
190 }
191 
192 /**
193  * device_pm_move_last - Move device to end of the PM core's list of devices.
194  * @dev: Device to move in dpm_list.
195  */
196 void device_pm_move_last(struct device *dev)
197 {
198 	pr_debug("Moving %s:%s to end of list\n",
199 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
200 	list_move_tail(&dev->power.entry, &dpm_list);
201 }
202 
203 static ktime_t initcall_debug_start(struct device *dev, void *cb)
204 {
205 	if (!pm_print_times_enabled)
206 		return 0;
207 
208 	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
209 		 task_pid_nr(current),
210 		 dev->parent ? dev_name(dev->parent) : "none");
211 	return ktime_get();
212 }
213 
214 static void initcall_debug_report(struct device *dev, ktime_t calltime,
215 				  void *cb, int error)
216 {
217 	ktime_t rettime;
218 	s64 nsecs;
219 
220 	if (!pm_print_times_enabled)
221 		return;
222 
223 	rettime = ktime_get();
224 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
225 
226 	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
227 		 (unsigned long long)nsecs >> 10);
228 }
229 
230 /**
231  * dpm_wait - Wait for a PM operation to complete.
232  * @dev: Device to wait for.
233  * @async: If unset, wait only if the device's power.async_suspend flag is set.
234  */
235 static void dpm_wait(struct device *dev, bool async)
236 {
237 	if (!dev)
238 		return;
239 
240 	if (async || (pm_async_enabled && dev->power.async_suspend))
241 		wait_for_completion(&dev->power.completion);
242 }
243 
244 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 {
246 	dpm_wait(dev, *((bool *)async_ptr));
247 	return 0;
248 }
249 
250 static void dpm_wait_for_children(struct device *dev, bool async)
251 {
252        device_for_each_child(dev, &async, dpm_wait_fn);
253 }
254 
255 static void dpm_wait_for_suppliers(struct device *dev, bool async)
256 {
257 	struct device_link *link;
258 	int idx;
259 
260 	idx = device_links_read_lock();
261 
262 	/*
263 	 * If the supplier goes away right after we've checked the link to it,
264 	 * we'll wait for its completion to change the state, but that's fine,
265 	 * because the only things that will block as a result are the SRCU
266 	 * callbacks freeing the link objects for the links in the list we're
267 	 * walking.
268 	 */
269 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
270 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271 			dpm_wait(link->supplier, async);
272 
273 	device_links_read_unlock(idx);
274 }
275 
276 static void dpm_wait_for_superior(struct device *dev, bool async)
277 {
278 	dpm_wait(dev->parent, async);
279 	dpm_wait_for_suppliers(dev, async);
280 }
281 
282 static void dpm_wait_for_consumers(struct device *dev, bool async)
283 {
284 	struct device_link *link;
285 	int idx;
286 
287 	idx = device_links_read_lock();
288 
289 	/*
290 	 * The status of a device link can only be changed from "dormant" by a
291 	 * probe, but that cannot happen during system suspend/resume.  In
292 	 * theory it can change to "dormant" at that time, but then it is
293 	 * reasonable to wait for the target device anyway (eg. if it goes
294 	 * away, it's better to wait for it to go away completely and then
295 	 * continue instead of trying to continue in parallel with its
296 	 * unregistration).
297 	 */
298 	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
299 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
300 			dpm_wait(link->consumer, async);
301 
302 	device_links_read_unlock(idx);
303 }
304 
305 static void dpm_wait_for_subordinate(struct device *dev, bool async)
306 {
307 	dpm_wait_for_children(dev, async);
308 	dpm_wait_for_consumers(dev, async);
309 }
310 
311 /**
312  * pm_op - Return the PM operation appropriate for given PM event.
313  * @ops: PM operations to choose from.
314  * @state: PM transition of the system being carried out.
315  */
316 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
317 {
318 	switch (state.event) {
319 #ifdef CONFIG_SUSPEND
320 	case PM_EVENT_SUSPEND:
321 		return ops->suspend;
322 	case PM_EVENT_RESUME:
323 		return ops->resume;
324 #endif /* CONFIG_SUSPEND */
325 #ifdef CONFIG_HIBERNATE_CALLBACKS
326 	case PM_EVENT_FREEZE:
327 	case PM_EVENT_QUIESCE:
328 		return ops->freeze;
329 	case PM_EVENT_HIBERNATE:
330 		return ops->poweroff;
331 	case PM_EVENT_THAW:
332 	case PM_EVENT_RECOVER:
333 		return ops->thaw;
334 		break;
335 	case PM_EVENT_RESTORE:
336 		return ops->restore;
337 #endif /* CONFIG_HIBERNATE_CALLBACKS */
338 	}
339 
340 	return NULL;
341 }
342 
343 /**
344  * pm_late_early_op - Return the PM operation appropriate for given PM event.
345  * @ops: PM operations to choose from.
346  * @state: PM transition of the system being carried out.
347  *
348  * Runtime PM is disabled for @dev while this function is being executed.
349  */
350 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
351 				      pm_message_t state)
352 {
353 	switch (state.event) {
354 #ifdef CONFIG_SUSPEND
355 	case PM_EVENT_SUSPEND:
356 		return ops->suspend_late;
357 	case PM_EVENT_RESUME:
358 		return ops->resume_early;
359 #endif /* CONFIG_SUSPEND */
360 #ifdef CONFIG_HIBERNATE_CALLBACKS
361 	case PM_EVENT_FREEZE:
362 	case PM_EVENT_QUIESCE:
363 		return ops->freeze_late;
364 	case PM_EVENT_HIBERNATE:
365 		return ops->poweroff_late;
366 	case PM_EVENT_THAW:
367 	case PM_EVENT_RECOVER:
368 		return ops->thaw_early;
369 	case PM_EVENT_RESTORE:
370 		return ops->restore_early;
371 #endif /* CONFIG_HIBERNATE_CALLBACKS */
372 	}
373 
374 	return NULL;
375 }
376 
377 /**
378  * pm_noirq_op - Return the PM operation appropriate for given PM event.
379  * @ops: PM operations to choose from.
380  * @state: PM transition of the system being carried out.
381  *
382  * The driver of @dev will not receive interrupts while this function is being
383  * executed.
384  */
385 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
386 {
387 	switch (state.event) {
388 #ifdef CONFIG_SUSPEND
389 	case PM_EVENT_SUSPEND:
390 		return ops->suspend_noirq;
391 	case PM_EVENT_RESUME:
392 		return ops->resume_noirq;
393 #endif /* CONFIG_SUSPEND */
394 #ifdef CONFIG_HIBERNATE_CALLBACKS
395 	case PM_EVENT_FREEZE:
396 	case PM_EVENT_QUIESCE:
397 		return ops->freeze_noirq;
398 	case PM_EVENT_HIBERNATE:
399 		return ops->poweroff_noirq;
400 	case PM_EVENT_THAW:
401 	case PM_EVENT_RECOVER:
402 		return ops->thaw_noirq;
403 	case PM_EVENT_RESTORE:
404 		return ops->restore_noirq;
405 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406 	}
407 
408 	return NULL;
409 }
410 
411 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
412 {
413 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
414 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
415 		", may wakeup" : "");
416 }
417 
418 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
419 			int error)
420 {
421 	pr_err("Device %s failed to %s%s: error %d\n",
422 	       dev_name(dev), pm_verb(state.event), info, error);
423 }
424 
425 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
426 			  const char *info)
427 {
428 	ktime_t calltime;
429 	u64 usecs64;
430 	int usecs;
431 
432 	calltime = ktime_get();
433 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
434 	do_div(usecs64, NSEC_PER_USEC);
435 	usecs = usecs64;
436 	if (usecs == 0)
437 		usecs = 1;
438 
439 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
440 		  info ?: "", info ? " " : "", pm_verb(state.event),
441 		  error ? "aborted" : "complete",
442 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
443 }
444 
445 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
446 			    pm_message_t state, const char *info)
447 {
448 	ktime_t calltime;
449 	int error;
450 
451 	if (!cb)
452 		return 0;
453 
454 	calltime = initcall_debug_start(dev, cb);
455 
456 	pm_dev_dbg(dev, state, info);
457 	trace_device_pm_callback_start(dev, info, state.event);
458 	error = cb(dev);
459 	trace_device_pm_callback_end(dev, error);
460 	suspend_report_result(cb, error);
461 
462 	initcall_debug_report(dev, calltime, cb, error);
463 
464 	return error;
465 }
466 
467 #ifdef CONFIG_DPM_WATCHDOG
468 struct dpm_watchdog {
469 	struct device		*dev;
470 	struct task_struct	*tsk;
471 	struct timer_list	timer;
472 };
473 
474 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
475 	struct dpm_watchdog wd
476 
477 /**
478  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
479  * @t: The timer that PM watchdog depends on.
480  *
481  * Called when a driver has timed out suspending or resuming.
482  * There's not much we can do here to recover so panic() to
483  * capture a crash-dump in pstore.
484  */
485 static void dpm_watchdog_handler(struct timer_list *t)
486 {
487 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
488 
489 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
490 	show_stack(wd->tsk, NULL);
491 	panic("%s %s: unrecoverable failure\n",
492 		dev_driver_string(wd->dev), dev_name(wd->dev));
493 }
494 
495 /**
496  * dpm_watchdog_set - Enable pm watchdog for given device.
497  * @wd: Watchdog. Must be allocated on the stack.
498  * @dev: Device to handle.
499  */
500 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
501 {
502 	struct timer_list *timer = &wd->timer;
503 
504 	wd->dev = dev;
505 	wd->tsk = current;
506 
507 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
508 	/* use same timeout value for both suspend and resume */
509 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
510 	add_timer(timer);
511 }
512 
513 /**
514  * dpm_watchdog_clear - Disable suspend/resume watchdog.
515  * @wd: Watchdog to disable.
516  */
517 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
518 {
519 	struct timer_list *timer = &wd->timer;
520 
521 	del_timer_sync(timer);
522 	destroy_timer_on_stack(timer);
523 }
524 #else
525 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
526 #define dpm_watchdog_set(x, y)
527 #define dpm_watchdog_clear(x)
528 #endif
529 
530 /*------------------------- Resume routines -------------------------*/
531 
532 /**
533  * suspend_event - Return a "suspend" message for given "resume" one.
534  * @resume_msg: PM message representing a system-wide resume transition.
535  */
536 static pm_message_t suspend_event(pm_message_t resume_msg)
537 {
538 	switch (resume_msg.event) {
539 	case PM_EVENT_RESUME:
540 		return PMSG_SUSPEND;
541 	case PM_EVENT_THAW:
542 	case PM_EVENT_RESTORE:
543 		return PMSG_FREEZE;
544 	case PM_EVENT_RECOVER:
545 		return PMSG_HIBERNATE;
546 	}
547 	return PMSG_ON;
548 }
549 
550 /**
551  * dev_pm_may_skip_resume - System-wide device resume optimization check.
552  * @dev: Target device.
553  *
554  * Checks whether or not the device may be left in suspend after a system-wide
555  * transition to the working state.
556  */
557 bool dev_pm_may_skip_resume(struct device *dev)
558 {
559 	return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
560 }
561 
562 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
563 						pm_message_t state,
564 						const char **info_p)
565 {
566 	pm_callback_t callback;
567 	const char *info;
568 
569 	if (dev->pm_domain) {
570 		info = "noirq power domain ";
571 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
572 	} else if (dev->type && dev->type->pm) {
573 		info = "noirq type ";
574 		callback = pm_noirq_op(dev->type->pm, state);
575 	} else if (dev->class && dev->class->pm) {
576 		info = "noirq class ";
577 		callback = pm_noirq_op(dev->class->pm, state);
578 	} else if (dev->bus && dev->bus->pm) {
579 		info = "noirq bus ";
580 		callback = pm_noirq_op(dev->bus->pm, state);
581 	} else {
582 		return NULL;
583 	}
584 
585 	if (info_p)
586 		*info_p = info;
587 
588 	return callback;
589 }
590 
591 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
592 						 pm_message_t state,
593 						 const char **info_p);
594 
595 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
596 						pm_message_t state,
597 						const char **info_p);
598 
599 /**
600  * device_resume_noirq - Execute a "noirq resume" callback for given device.
601  * @dev: Device to handle.
602  * @state: PM transition of the system being carried out.
603  * @async: If true, the device is being resumed asynchronously.
604  *
605  * The driver of @dev will not receive interrupts while this function is being
606  * executed.
607  */
608 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
609 {
610 	pm_callback_t callback;
611 	const char *info;
612 	bool skip_resume;
613 	int error = 0;
614 
615 	TRACE_DEVICE(dev);
616 	TRACE_RESUME(0);
617 
618 	if (dev->power.syscore || dev->power.direct_complete)
619 		goto Out;
620 
621 	if (!dev->power.is_noirq_suspended)
622 		goto Out;
623 
624 	dpm_wait_for_superior(dev, async);
625 
626 	skip_resume = dev_pm_may_skip_resume(dev);
627 
628 	callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
629 	if (callback)
630 		goto Run;
631 
632 	if (skip_resume)
633 		goto Skip;
634 
635 	if (dev_pm_smart_suspend_and_suspended(dev)) {
636 		pm_message_t suspend_msg = suspend_event(state);
637 
638 		/*
639 		 * If "freeze" callbacks have been skipped during a transition
640 		 * related to hibernation, the subsequent "thaw" callbacks must
641 		 * be skipped too or bad things may happen.  Otherwise, resume
642 		 * callbacks are going to be run for the device, so its runtime
643 		 * PM status must be changed to reflect the new state after the
644 		 * transition under way.
645 		 */
646 		if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
647 		    !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
648 			if (state.event == PM_EVENT_THAW) {
649 				skip_resume = true;
650 				goto Skip;
651 			} else {
652 				pm_runtime_set_active(dev);
653 			}
654 		}
655 	}
656 
657 	if (dev->driver && dev->driver->pm) {
658 		info = "noirq driver ";
659 		callback = pm_noirq_op(dev->driver->pm, state);
660 	}
661 
662 Run:
663 	error = dpm_run_callback(callback, dev, state, info);
664 
665 Skip:
666 	dev->power.is_noirq_suspended = false;
667 
668 	if (skip_resume) {
669 		/* Make the next phases of resume skip the device. */
670 		dev->power.is_late_suspended = false;
671 		dev->power.is_suspended = false;
672 		/*
673 		 * The device is going to be left in suspend, but it might not
674 		 * have been in runtime suspend before the system suspended, so
675 		 * its runtime PM status needs to be updated to avoid confusing
676 		 * the runtime PM framework when runtime PM is enabled for the
677 		 * device again.
678 		 */
679 		pm_runtime_set_suspended(dev);
680 	}
681 
682 Out:
683 	complete_all(&dev->power.completion);
684 	TRACE_RESUME(error);
685 	return error;
686 }
687 
688 static bool is_async(struct device *dev)
689 {
690 	return dev->power.async_suspend && pm_async_enabled
691 		&& !pm_trace_is_enabled();
692 }
693 
694 static bool dpm_async_fn(struct device *dev, async_func_t func)
695 {
696 	reinit_completion(&dev->power.completion);
697 
698 	if (is_async(dev)) {
699 		get_device(dev);
700 		async_schedule(func, dev);
701 		return true;
702 	}
703 
704 	return false;
705 }
706 
707 static void async_resume_noirq(void *data, async_cookie_t cookie)
708 {
709 	struct device *dev = (struct device *)data;
710 	int error;
711 
712 	error = device_resume_noirq(dev, pm_transition, true);
713 	if (error)
714 		pm_dev_err(dev, pm_transition, " async", error);
715 
716 	put_device(dev);
717 }
718 
719 static void dpm_noirq_resume_devices(pm_message_t state)
720 {
721 	struct device *dev;
722 	ktime_t starttime = ktime_get();
723 
724 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
725 	mutex_lock(&dpm_list_mtx);
726 	pm_transition = state;
727 
728 	/*
729 	 * Advanced the async threads upfront,
730 	 * in case the starting of async threads is
731 	 * delayed by non-async resuming devices.
732 	 */
733 	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
734 		dpm_async_fn(dev, async_resume_noirq);
735 
736 	while (!list_empty(&dpm_noirq_list)) {
737 		dev = to_device(dpm_noirq_list.next);
738 		get_device(dev);
739 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
740 		mutex_unlock(&dpm_list_mtx);
741 
742 		if (!is_async(dev)) {
743 			int error;
744 
745 			error = device_resume_noirq(dev, state, false);
746 			if (error) {
747 				suspend_stats.failed_resume_noirq++;
748 				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
749 				dpm_save_failed_dev(dev_name(dev));
750 				pm_dev_err(dev, state, " noirq", error);
751 			}
752 		}
753 
754 		mutex_lock(&dpm_list_mtx);
755 		put_device(dev);
756 	}
757 	mutex_unlock(&dpm_list_mtx);
758 	async_synchronize_full();
759 	dpm_show_time(starttime, state, 0, "noirq");
760 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
761 }
762 
763 /**
764  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
765  * @state: PM transition of the system being carried out.
766  *
767  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
768  * allow device drivers' interrupt handlers to be called.
769  */
770 void dpm_resume_noirq(pm_message_t state)
771 {
772 	dpm_noirq_resume_devices(state);
773 
774 	resume_device_irqs();
775 	device_wakeup_disarm_wake_irqs();
776 
777 	cpuidle_resume();
778 }
779 
780 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
781 						pm_message_t state,
782 						const char **info_p)
783 {
784 	pm_callback_t callback;
785 	const char *info;
786 
787 	if (dev->pm_domain) {
788 		info = "early power domain ";
789 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
790 	} else if (dev->type && dev->type->pm) {
791 		info = "early type ";
792 		callback = pm_late_early_op(dev->type->pm, state);
793 	} else if (dev->class && dev->class->pm) {
794 		info = "early class ";
795 		callback = pm_late_early_op(dev->class->pm, state);
796 	} else if (dev->bus && dev->bus->pm) {
797 		info = "early bus ";
798 		callback = pm_late_early_op(dev->bus->pm, state);
799 	} else {
800 		return NULL;
801 	}
802 
803 	if (info_p)
804 		*info_p = info;
805 
806 	return callback;
807 }
808 
809 /**
810  * device_resume_early - Execute an "early resume" callback for given device.
811  * @dev: Device to handle.
812  * @state: PM transition of the system being carried out.
813  * @async: If true, the device is being resumed asynchronously.
814  *
815  * Runtime PM is disabled for @dev while this function is being executed.
816  */
817 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
818 {
819 	pm_callback_t callback;
820 	const char *info;
821 	int error = 0;
822 
823 	TRACE_DEVICE(dev);
824 	TRACE_RESUME(0);
825 
826 	if (dev->power.syscore || dev->power.direct_complete)
827 		goto Out;
828 
829 	if (!dev->power.is_late_suspended)
830 		goto Out;
831 
832 	dpm_wait_for_superior(dev, async);
833 
834 	callback = dpm_subsys_resume_early_cb(dev, state, &info);
835 
836 	if (!callback && dev->driver && dev->driver->pm) {
837 		info = "early driver ";
838 		callback = pm_late_early_op(dev->driver->pm, state);
839 	}
840 
841 	error = dpm_run_callback(callback, dev, state, info);
842 	dev->power.is_late_suspended = false;
843 
844  Out:
845 	TRACE_RESUME(error);
846 
847 	pm_runtime_enable(dev);
848 	complete_all(&dev->power.completion);
849 	return error;
850 }
851 
852 static void async_resume_early(void *data, async_cookie_t cookie)
853 {
854 	struct device *dev = (struct device *)data;
855 	int error;
856 
857 	error = device_resume_early(dev, pm_transition, true);
858 	if (error)
859 		pm_dev_err(dev, pm_transition, " async", error);
860 
861 	put_device(dev);
862 }
863 
864 /**
865  * dpm_resume_early - Execute "early resume" callbacks for all devices.
866  * @state: PM transition of the system being carried out.
867  */
868 void dpm_resume_early(pm_message_t state)
869 {
870 	struct device *dev;
871 	ktime_t starttime = ktime_get();
872 
873 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
874 	mutex_lock(&dpm_list_mtx);
875 	pm_transition = state;
876 
877 	/*
878 	 * Advanced the async threads upfront,
879 	 * in case the starting of async threads is
880 	 * delayed by non-async resuming devices.
881 	 */
882 	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
883 		dpm_async_fn(dev, async_resume_early);
884 
885 	while (!list_empty(&dpm_late_early_list)) {
886 		dev = to_device(dpm_late_early_list.next);
887 		get_device(dev);
888 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
889 		mutex_unlock(&dpm_list_mtx);
890 
891 		if (!is_async(dev)) {
892 			int error;
893 
894 			error = device_resume_early(dev, state, false);
895 			if (error) {
896 				suspend_stats.failed_resume_early++;
897 				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
898 				dpm_save_failed_dev(dev_name(dev));
899 				pm_dev_err(dev, state, " early", error);
900 			}
901 		}
902 		mutex_lock(&dpm_list_mtx);
903 		put_device(dev);
904 	}
905 	mutex_unlock(&dpm_list_mtx);
906 	async_synchronize_full();
907 	dpm_show_time(starttime, state, 0, "early");
908 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
909 }
910 
911 /**
912  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
913  * @state: PM transition of the system being carried out.
914  */
915 void dpm_resume_start(pm_message_t state)
916 {
917 	dpm_resume_noirq(state);
918 	dpm_resume_early(state);
919 }
920 EXPORT_SYMBOL_GPL(dpm_resume_start);
921 
922 /**
923  * device_resume - Execute "resume" callbacks for given device.
924  * @dev: Device to handle.
925  * @state: PM transition of the system being carried out.
926  * @async: If true, the device is being resumed asynchronously.
927  */
928 static int device_resume(struct device *dev, pm_message_t state, bool async)
929 {
930 	pm_callback_t callback = NULL;
931 	const char *info = NULL;
932 	int error = 0;
933 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
934 
935 	TRACE_DEVICE(dev);
936 	TRACE_RESUME(0);
937 
938 	if (dev->power.syscore)
939 		goto Complete;
940 
941 	if (dev->power.direct_complete) {
942 		/* Match the pm_runtime_disable() in __device_suspend(). */
943 		pm_runtime_enable(dev);
944 		goto Complete;
945 	}
946 
947 	dpm_wait_for_superior(dev, async);
948 	dpm_watchdog_set(&wd, dev);
949 	device_lock(dev);
950 
951 	/*
952 	 * This is a fib.  But we'll allow new children to be added below
953 	 * a resumed device, even if the device hasn't been completed yet.
954 	 */
955 	dev->power.is_prepared = false;
956 
957 	if (!dev->power.is_suspended)
958 		goto Unlock;
959 
960 	if (dev->pm_domain) {
961 		info = "power domain ";
962 		callback = pm_op(&dev->pm_domain->ops, state);
963 		goto Driver;
964 	}
965 
966 	if (dev->type && dev->type->pm) {
967 		info = "type ";
968 		callback = pm_op(dev->type->pm, state);
969 		goto Driver;
970 	}
971 
972 	if (dev->class && dev->class->pm) {
973 		info = "class ";
974 		callback = pm_op(dev->class->pm, state);
975 		goto Driver;
976 	}
977 
978 	if (dev->bus) {
979 		if (dev->bus->pm) {
980 			info = "bus ";
981 			callback = pm_op(dev->bus->pm, state);
982 		} else if (dev->bus->resume) {
983 			info = "legacy bus ";
984 			callback = dev->bus->resume;
985 			goto End;
986 		}
987 	}
988 
989  Driver:
990 	if (!callback && dev->driver && dev->driver->pm) {
991 		info = "driver ";
992 		callback = pm_op(dev->driver->pm, state);
993 	}
994 
995  End:
996 	error = dpm_run_callback(callback, dev, state, info);
997 	dev->power.is_suspended = false;
998 
999  Unlock:
1000 	device_unlock(dev);
1001 	dpm_watchdog_clear(&wd);
1002 
1003  Complete:
1004 	complete_all(&dev->power.completion);
1005 
1006 	TRACE_RESUME(error);
1007 
1008 	return error;
1009 }
1010 
1011 static void async_resume(void *data, async_cookie_t cookie)
1012 {
1013 	struct device *dev = (struct device *)data;
1014 	int error;
1015 
1016 	error = device_resume(dev, pm_transition, true);
1017 	if (error)
1018 		pm_dev_err(dev, pm_transition, " async", error);
1019 	put_device(dev);
1020 }
1021 
1022 /**
1023  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1024  * @state: PM transition of the system being carried out.
1025  *
1026  * Execute the appropriate "resume" callback for all devices whose status
1027  * indicates that they are suspended.
1028  */
1029 void dpm_resume(pm_message_t state)
1030 {
1031 	struct device *dev;
1032 	ktime_t starttime = ktime_get();
1033 
1034 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1035 	might_sleep();
1036 
1037 	mutex_lock(&dpm_list_mtx);
1038 	pm_transition = state;
1039 	async_error = 0;
1040 
1041 	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1042 		dpm_async_fn(dev, async_resume);
1043 
1044 	while (!list_empty(&dpm_suspended_list)) {
1045 		dev = to_device(dpm_suspended_list.next);
1046 		get_device(dev);
1047 		if (!is_async(dev)) {
1048 			int error;
1049 
1050 			mutex_unlock(&dpm_list_mtx);
1051 
1052 			error = device_resume(dev, state, false);
1053 			if (error) {
1054 				suspend_stats.failed_resume++;
1055 				dpm_save_failed_step(SUSPEND_RESUME);
1056 				dpm_save_failed_dev(dev_name(dev));
1057 				pm_dev_err(dev, state, "", error);
1058 			}
1059 
1060 			mutex_lock(&dpm_list_mtx);
1061 		}
1062 		if (!list_empty(&dev->power.entry))
1063 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1064 		put_device(dev);
1065 	}
1066 	mutex_unlock(&dpm_list_mtx);
1067 	async_synchronize_full();
1068 	dpm_show_time(starttime, state, 0, NULL);
1069 
1070 	cpufreq_resume();
1071 	devfreq_resume();
1072 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1073 }
1074 
1075 /**
1076  * device_complete - Complete a PM transition for given device.
1077  * @dev: Device to handle.
1078  * @state: PM transition of the system being carried out.
1079  */
1080 static void device_complete(struct device *dev, pm_message_t state)
1081 {
1082 	void (*callback)(struct device *) = NULL;
1083 	const char *info = NULL;
1084 
1085 	if (dev->power.syscore)
1086 		return;
1087 
1088 	device_lock(dev);
1089 
1090 	if (dev->pm_domain) {
1091 		info = "completing power domain ";
1092 		callback = dev->pm_domain->ops.complete;
1093 	} else if (dev->type && dev->type->pm) {
1094 		info = "completing type ";
1095 		callback = dev->type->pm->complete;
1096 	} else if (dev->class && dev->class->pm) {
1097 		info = "completing class ";
1098 		callback = dev->class->pm->complete;
1099 	} else if (dev->bus && dev->bus->pm) {
1100 		info = "completing bus ";
1101 		callback = dev->bus->pm->complete;
1102 	}
1103 
1104 	if (!callback && dev->driver && dev->driver->pm) {
1105 		info = "completing driver ";
1106 		callback = dev->driver->pm->complete;
1107 	}
1108 
1109 	if (callback) {
1110 		pm_dev_dbg(dev, state, info);
1111 		callback(dev);
1112 	}
1113 
1114 	device_unlock(dev);
1115 
1116 	pm_runtime_put(dev);
1117 }
1118 
1119 /**
1120  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1121  * @state: PM transition of the system being carried out.
1122  *
1123  * Execute the ->complete() callbacks for all devices whose PM status is not
1124  * DPM_ON (this allows new devices to be registered).
1125  */
1126 void dpm_complete(pm_message_t state)
1127 {
1128 	struct list_head list;
1129 
1130 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1131 	might_sleep();
1132 
1133 	INIT_LIST_HEAD(&list);
1134 	mutex_lock(&dpm_list_mtx);
1135 	while (!list_empty(&dpm_prepared_list)) {
1136 		struct device *dev = to_device(dpm_prepared_list.prev);
1137 
1138 		get_device(dev);
1139 		dev->power.is_prepared = false;
1140 		list_move(&dev->power.entry, &list);
1141 		mutex_unlock(&dpm_list_mtx);
1142 
1143 		trace_device_pm_callback_start(dev, "", state.event);
1144 		device_complete(dev, state);
1145 		trace_device_pm_callback_end(dev, 0);
1146 
1147 		mutex_lock(&dpm_list_mtx);
1148 		put_device(dev);
1149 	}
1150 	list_splice(&list, &dpm_list);
1151 	mutex_unlock(&dpm_list_mtx);
1152 
1153 	/* Allow device probing and trigger re-probing of deferred devices */
1154 	device_unblock_probing();
1155 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1156 }
1157 
1158 /**
1159  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1160  * @state: PM transition of the system being carried out.
1161  *
1162  * Execute "resume" callbacks for all devices and complete the PM transition of
1163  * the system.
1164  */
1165 void dpm_resume_end(pm_message_t state)
1166 {
1167 	dpm_resume(state);
1168 	dpm_complete(state);
1169 }
1170 EXPORT_SYMBOL_GPL(dpm_resume_end);
1171 
1172 
1173 /*------------------------- Suspend routines -------------------------*/
1174 
1175 /**
1176  * resume_event - Return a "resume" message for given "suspend" sleep state.
1177  * @sleep_state: PM message representing a sleep state.
1178  *
1179  * Return a PM message representing the resume event corresponding to given
1180  * sleep state.
1181  */
1182 static pm_message_t resume_event(pm_message_t sleep_state)
1183 {
1184 	switch (sleep_state.event) {
1185 	case PM_EVENT_SUSPEND:
1186 		return PMSG_RESUME;
1187 	case PM_EVENT_FREEZE:
1188 	case PM_EVENT_QUIESCE:
1189 		return PMSG_RECOVER;
1190 	case PM_EVENT_HIBERNATE:
1191 		return PMSG_RESTORE;
1192 	}
1193 	return PMSG_ON;
1194 }
1195 
1196 static void dpm_superior_set_must_resume(struct device *dev)
1197 {
1198 	struct device_link *link;
1199 	int idx;
1200 
1201 	if (dev->parent)
1202 		dev->parent->power.must_resume = true;
1203 
1204 	idx = device_links_read_lock();
1205 
1206 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1207 		link->supplier->power.must_resume = true;
1208 
1209 	device_links_read_unlock(idx);
1210 }
1211 
1212 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1213 						 pm_message_t state,
1214 						 const char **info_p)
1215 {
1216 	pm_callback_t callback;
1217 	const char *info;
1218 
1219 	if (dev->pm_domain) {
1220 		info = "noirq power domain ";
1221 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1222 	} else if (dev->type && dev->type->pm) {
1223 		info = "noirq type ";
1224 		callback = pm_noirq_op(dev->type->pm, state);
1225 	} else if (dev->class && dev->class->pm) {
1226 		info = "noirq class ";
1227 		callback = pm_noirq_op(dev->class->pm, state);
1228 	} else if (dev->bus && dev->bus->pm) {
1229 		info = "noirq bus ";
1230 		callback = pm_noirq_op(dev->bus->pm, state);
1231 	} else {
1232 		return NULL;
1233 	}
1234 
1235 	if (info_p)
1236 		*info_p = info;
1237 
1238 	return callback;
1239 }
1240 
1241 static bool device_must_resume(struct device *dev, pm_message_t state,
1242 			       bool no_subsys_suspend_noirq)
1243 {
1244 	pm_message_t resume_msg = resume_event(state);
1245 
1246 	/*
1247 	 * If all of the device driver's "noirq", "late" and "early" callbacks
1248 	 * are invoked directly by the core, the decision to allow the device to
1249 	 * stay in suspend can be based on its current runtime PM status and its
1250 	 * wakeup settings.
1251 	 */
1252 	if (no_subsys_suspend_noirq &&
1253 	    !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1254 	    !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1255 	    !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1256 		return !pm_runtime_status_suspended(dev) &&
1257 			(resume_msg.event != PM_EVENT_RESUME ||
1258 			 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1259 
1260 	/*
1261 	 * The only safe strategy here is to require that if the device may not
1262 	 * be left in suspend, resume callbacks must be invoked for it.
1263 	 */
1264 	return !dev->power.may_skip_resume;
1265 }
1266 
1267 /**
1268  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1269  * @dev: Device to handle.
1270  * @state: PM transition of the system being carried out.
1271  * @async: If true, the device is being suspended asynchronously.
1272  *
1273  * The driver of @dev will not receive interrupts while this function is being
1274  * executed.
1275  */
1276 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1277 {
1278 	pm_callback_t callback;
1279 	const char *info;
1280 	bool no_subsys_cb = false;
1281 	int error = 0;
1282 
1283 	TRACE_DEVICE(dev);
1284 	TRACE_SUSPEND(0);
1285 
1286 	dpm_wait_for_subordinate(dev, async);
1287 
1288 	if (async_error)
1289 		goto Complete;
1290 
1291 	if (dev->power.syscore || dev->power.direct_complete)
1292 		goto Complete;
1293 
1294 	callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1295 	if (callback)
1296 		goto Run;
1297 
1298 	no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1299 
1300 	if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1301 		goto Skip;
1302 
1303 	if (dev->driver && dev->driver->pm) {
1304 		info = "noirq driver ";
1305 		callback = pm_noirq_op(dev->driver->pm, state);
1306 	}
1307 
1308 Run:
1309 	error = dpm_run_callback(callback, dev, state, info);
1310 	if (error) {
1311 		async_error = error;
1312 		goto Complete;
1313 	}
1314 
1315 Skip:
1316 	dev->power.is_noirq_suspended = true;
1317 
1318 	if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1319 		dev->power.must_resume = dev->power.must_resume ||
1320 				atomic_read(&dev->power.usage_count) > 1 ||
1321 				device_must_resume(dev, state, no_subsys_cb);
1322 	} else {
1323 		dev->power.must_resume = true;
1324 	}
1325 
1326 	if (dev->power.must_resume)
1327 		dpm_superior_set_must_resume(dev);
1328 
1329 Complete:
1330 	complete_all(&dev->power.completion);
1331 	TRACE_SUSPEND(error);
1332 	return error;
1333 }
1334 
1335 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1336 {
1337 	struct device *dev = (struct device *)data;
1338 	int error;
1339 
1340 	error = __device_suspend_noirq(dev, pm_transition, true);
1341 	if (error) {
1342 		dpm_save_failed_dev(dev_name(dev));
1343 		pm_dev_err(dev, pm_transition, " async", error);
1344 	}
1345 
1346 	put_device(dev);
1347 }
1348 
1349 static int device_suspend_noirq(struct device *dev)
1350 {
1351 	if (dpm_async_fn(dev, async_suspend_noirq))
1352 		return 0;
1353 
1354 	return __device_suspend_noirq(dev, pm_transition, false);
1355 }
1356 
1357 static int dpm_noirq_suspend_devices(pm_message_t state)
1358 {
1359 	ktime_t starttime = ktime_get();
1360 	int error = 0;
1361 
1362 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1363 	mutex_lock(&dpm_list_mtx);
1364 	pm_transition = state;
1365 	async_error = 0;
1366 
1367 	while (!list_empty(&dpm_late_early_list)) {
1368 		struct device *dev = to_device(dpm_late_early_list.prev);
1369 
1370 		get_device(dev);
1371 		mutex_unlock(&dpm_list_mtx);
1372 
1373 		error = device_suspend_noirq(dev);
1374 
1375 		mutex_lock(&dpm_list_mtx);
1376 		if (error) {
1377 			pm_dev_err(dev, state, " noirq", error);
1378 			dpm_save_failed_dev(dev_name(dev));
1379 			put_device(dev);
1380 			break;
1381 		}
1382 		if (!list_empty(&dev->power.entry))
1383 			list_move(&dev->power.entry, &dpm_noirq_list);
1384 		put_device(dev);
1385 
1386 		if (async_error)
1387 			break;
1388 	}
1389 	mutex_unlock(&dpm_list_mtx);
1390 	async_synchronize_full();
1391 	if (!error)
1392 		error = async_error;
1393 
1394 	if (error) {
1395 		suspend_stats.failed_suspend_noirq++;
1396 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1397 	}
1398 	dpm_show_time(starttime, state, error, "noirq");
1399 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1400 	return error;
1401 }
1402 
1403 /**
1404  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1405  * @state: PM transition of the system being carried out.
1406  *
1407  * Prevent device drivers' interrupt handlers from being called and invoke
1408  * "noirq" suspend callbacks for all non-sysdev devices.
1409  */
1410 int dpm_suspend_noirq(pm_message_t state)
1411 {
1412 	int ret;
1413 
1414 	cpuidle_pause();
1415 
1416 	device_wakeup_arm_wake_irqs();
1417 	suspend_device_irqs();
1418 
1419 	ret = dpm_noirq_suspend_devices(state);
1420 	if (ret)
1421 		dpm_resume_noirq(resume_event(state));
1422 
1423 	return ret;
1424 }
1425 
1426 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1427 {
1428 	struct device *parent = dev->parent;
1429 
1430 	if (!parent)
1431 		return;
1432 
1433 	spin_lock_irq(&parent->power.lock);
1434 
1435 	if (dev->power.wakeup_path && !parent->power.ignore_children)
1436 		parent->power.wakeup_path = true;
1437 
1438 	spin_unlock_irq(&parent->power.lock);
1439 }
1440 
1441 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1442 						pm_message_t state,
1443 						const char **info_p)
1444 {
1445 	pm_callback_t callback;
1446 	const char *info;
1447 
1448 	if (dev->pm_domain) {
1449 		info = "late power domain ";
1450 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1451 	} else if (dev->type && dev->type->pm) {
1452 		info = "late type ";
1453 		callback = pm_late_early_op(dev->type->pm, state);
1454 	} else if (dev->class && dev->class->pm) {
1455 		info = "late class ";
1456 		callback = pm_late_early_op(dev->class->pm, state);
1457 	} else if (dev->bus && dev->bus->pm) {
1458 		info = "late bus ";
1459 		callback = pm_late_early_op(dev->bus->pm, state);
1460 	} else {
1461 		return NULL;
1462 	}
1463 
1464 	if (info_p)
1465 		*info_p = info;
1466 
1467 	return callback;
1468 }
1469 
1470 /**
1471  * __device_suspend_late - Execute a "late suspend" callback for given device.
1472  * @dev: Device to handle.
1473  * @state: PM transition of the system being carried out.
1474  * @async: If true, the device is being suspended asynchronously.
1475  *
1476  * Runtime PM is disabled for @dev while this function is being executed.
1477  */
1478 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1479 {
1480 	pm_callback_t callback;
1481 	const char *info;
1482 	int error = 0;
1483 
1484 	TRACE_DEVICE(dev);
1485 	TRACE_SUSPEND(0);
1486 
1487 	__pm_runtime_disable(dev, false);
1488 
1489 	dpm_wait_for_subordinate(dev, async);
1490 
1491 	if (async_error)
1492 		goto Complete;
1493 
1494 	if (pm_wakeup_pending()) {
1495 		async_error = -EBUSY;
1496 		goto Complete;
1497 	}
1498 
1499 	if (dev->power.syscore || dev->power.direct_complete)
1500 		goto Complete;
1501 
1502 	callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1503 	if (callback)
1504 		goto Run;
1505 
1506 	if (dev_pm_smart_suspend_and_suspended(dev) &&
1507 	    !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1508 		goto Skip;
1509 
1510 	if (dev->driver && dev->driver->pm) {
1511 		info = "late driver ";
1512 		callback = pm_late_early_op(dev->driver->pm, state);
1513 	}
1514 
1515 Run:
1516 	error = dpm_run_callback(callback, dev, state, info);
1517 	if (error) {
1518 		async_error = error;
1519 		goto Complete;
1520 	}
1521 	dpm_propagate_wakeup_to_parent(dev);
1522 
1523 Skip:
1524 	dev->power.is_late_suspended = true;
1525 
1526 Complete:
1527 	TRACE_SUSPEND(error);
1528 	complete_all(&dev->power.completion);
1529 	return error;
1530 }
1531 
1532 static void async_suspend_late(void *data, async_cookie_t cookie)
1533 {
1534 	struct device *dev = (struct device *)data;
1535 	int error;
1536 
1537 	error = __device_suspend_late(dev, pm_transition, true);
1538 	if (error) {
1539 		dpm_save_failed_dev(dev_name(dev));
1540 		pm_dev_err(dev, pm_transition, " async", error);
1541 	}
1542 	put_device(dev);
1543 }
1544 
1545 static int device_suspend_late(struct device *dev)
1546 {
1547 	if (dpm_async_fn(dev, async_suspend_late))
1548 		return 0;
1549 
1550 	return __device_suspend_late(dev, pm_transition, false);
1551 }
1552 
1553 /**
1554  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1555  * @state: PM transition of the system being carried out.
1556  */
1557 int dpm_suspend_late(pm_message_t state)
1558 {
1559 	ktime_t starttime = ktime_get();
1560 	int error = 0;
1561 
1562 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1563 	mutex_lock(&dpm_list_mtx);
1564 	pm_transition = state;
1565 	async_error = 0;
1566 
1567 	while (!list_empty(&dpm_suspended_list)) {
1568 		struct device *dev = to_device(dpm_suspended_list.prev);
1569 
1570 		get_device(dev);
1571 		mutex_unlock(&dpm_list_mtx);
1572 
1573 		error = device_suspend_late(dev);
1574 
1575 		mutex_lock(&dpm_list_mtx);
1576 		if (!list_empty(&dev->power.entry))
1577 			list_move(&dev->power.entry, &dpm_late_early_list);
1578 
1579 		if (error) {
1580 			pm_dev_err(dev, state, " late", error);
1581 			dpm_save_failed_dev(dev_name(dev));
1582 			put_device(dev);
1583 			break;
1584 		}
1585 		put_device(dev);
1586 
1587 		if (async_error)
1588 			break;
1589 	}
1590 	mutex_unlock(&dpm_list_mtx);
1591 	async_synchronize_full();
1592 	if (!error)
1593 		error = async_error;
1594 	if (error) {
1595 		suspend_stats.failed_suspend_late++;
1596 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1597 		dpm_resume_early(resume_event(state));
1598 	}
1599 	dpm_show_time(starttime, state, error, "late");
1600 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1601 	return error;
1602 }
1603 
1604 /**
1605  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1606  * @state: PM transition of the system being carried out.
1607  */
1608 int dpm_suspend_end(pm_message_t state)
1609 {
1610 	ktime_t starttime = ktime_get();
1611 	int error;
1612 
1613 	error = dpm_suspend_late(state);
1614 	if (error)
1615 		goto out;
1616 
1617 	error = dpm_suspend_noirq(state);
1618 	if (error)
1619 		dpm_resume_early(resume_event(state));
1620 
1621 out:
1622 	dpm_show_time(starttime, state, error, "end");
1623 	return error;
1624 }
1625 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1626 
1627 /**
1628  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1629  * @dev: Device to suspend.
1630  * @state: PM transition of the system being carried out.
1631  * @cb: Suspend callback to execute.
1632  * @info: string description of caller.
1633  */
1634 static int legacy_suspend(struct device *dev, pm_message_t state,
1635 			  int (*cb)(struct device *dev, pm_message_t state),
1636 			  const char *info)
1637 {
1638 	int error;
1639 	ktime_t calltime;
1640 
1641 	calltime = initcall_debug_start(dev, cb);
1642 
1643 	trace_device_pm_callback_start(dev, info, state.event);
1644 	error = cb(dev, state);
1645 	trace_device_pm_callback_end(dev, error);
1646 	suspend_report_result(cb, error);
1647 
1648 	initcall_debug_report(dev, calltime, cb, error);
1649 
1650 	return error;
1651 }
1652 
1653 static void dpm_clear_superiors_direct_complete(struct device *dev)
1654 {
1655 	struct device_link *link;
1656 	int idx;
1657 
1658 	if (dev->parent) {
1659 		spin_lock_irq(&dev->parent->power.lock);
1660 		dev->parent->power.direct_complete = false;
1661 		spin_unlock_irq(&dev->parent->power.lock);
1662 	}
1663 
1664 	idx = device_links_read_lock();
1665 
1666 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1667 		spin_lock_irq(&link->supplier->power.lock);
1668 		link->supplier->power.direct_complete = false;
1669 		spin_unlock_irq(&link->supplier->power.lock);
1670 	}
1671 
1672 	device_links_read_unlock(idx);
1673 }
1674 
1675 /**
1676  * __device_suspend - Execute "suspend" callbacks for given device.
1677  * @dev: Device to handle.
1678  * @state: PM transition of the system being carried out.
1679  * @async: If true, the device is being suspended asynchronously.
1680  */
1681 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1682 {
1683 	pm_callback_t callback = NULL;
1684 	const char *info = NULL;
1685 	int error = 0;
1686 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1687 
1688 	TRACE_DEVICE(dev);
1689 	TRACE_SUSPEND(0);
1690 
1691 	dpm_wait_for_subordinate(dev, async);
1692 
1693 	if (async_error) {
1694 		dev->power.direct_complete = false;
1695 		goto Complete;
1696 	}
1697 
1698 	/*
1699 	 * If a device configured to wake up the system from sleep states
1700 	 * has been suspended at run time and there's a resume request pending
1701 	 * for it, this is equivalent to the device signaling wakeup, so the
1702 	 * system suspend operation should be aborted.
1703 	 */
1704 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1705 		pm_wakeup_event(dev, 0);
1706 
1707 	if (pm_wakeup_pending()) {
1708 		dev->power.direct_complete = false;
1709 		async_error = -EBUSY;
1710 		goto Complete;
1711 	}
1712 
1713 	if (dev->power.syscore)
1714 		goto Complete;
1715 
1716 	/* Avoid direct_complete to let wakeup_path propagate. */
1717 	if (device_may_wakeup(dev) || dev->power.wakeup_path)
1718 		dev->power.direct_complete = false;
1719 
1720 	if (dev->power.direct_complete) {
1721 		if (pm_runtime_status_suspended(dev)) {
1722 			pm_runtime_disable(dev);
1723 			if (pm_runtime_status_suspended(dev)) {
1724 				pm_dev_dbg(dev, state, "direct-complete ");
1725 				goto Complete;
1726 			}
1727 
1728 			pm_runtime_enable(dev);
1729 		}
1730 		dev->power.direct_complete = false;
1731 	}
1732 
1733 	dev->power.may_skip_resume = false;
1734 	dev->power.must_resume = false;
1735 
1736 	dpm_watchdog_set(&wd, dev);
1737 	device_lock(dev);
1738 
1739 	if (dev->pm_domain) {
1740 		info = "power domain ";
1741 		callback = pm_op(&dev->pm_domain->ops, state);
1742 		goto Run;
1743 	}
1744 
1745 	if (dev->type && dev->type->pm) {
1746 		info = "type ";
1747 		callback = pm_op(dev->type->pm, state);
1748 		goto Run;
1749 	}
1750 
1751 	if (dev->class && dev->class->pm) {
1752 		info = "class ";
1753 		callback = pm_op(dev->class->pm, state);
1754 		goto Run;
1755 	}
1756 
1757 	if (dev->bus) {
1758 		if (dev->bus->pm) {
1759 			info = "bus ";
1760 			callback = pm_op(dev->bus->pm, state);
1761 		} else if (dev->bus->suspend) {
1762 			pm_dev_dbg(dev, state, "legacy bus ");
1763 			error = legacy_suspend(dev, state, dev->bus->suspend,
1764 						"legacy bus ");
1765 			goto End;
1766 		}
1767 	}
1768 
1769  Run:
1770 	if (!callback && dev->driver && dev->driver->pm) {
1771 		info = "driver ";
1772 		callback = pm_op(dev->driver->pm, state);
1773 	}
1774 
1775 	error = dpm_run_callback(callback, dev, state, info);
1776 
1777  End:
1778 	if (!error) {
1779 		dev->power.is_suspended = true;
1780 		if (device_may_wakeup(dev))
1781 			dev->power.wakeup_path = true;
1782 
1783 		dpm_propagate_wakeup_to_parent(dev);
1784 		dpm_clear_superiors_direct_complete(dev);
1785 	}
1786 
1787 	device_unlock(dev);
1788 	dpm_watchdog_clear(&wd);
1789 
1790  Complete:
1791 	if (error)
1792 		async_error = error;
1793 
1794 	complete_all(&dev->power.completion);
1795 	TRACE_SUSPEND(error);
1796 	return error;
1797 }
1798 
1799 static void async_suspend(void *data, async_cookie_t cookie)
1800 {
1801 	struct device *dev = (struct device *)data;
1802 	int error;
1803 
1804 	error = __device_suspend(dev, pm_transition, true);
1805 	if (error) {
1806 		dpm_save_failed_dev(dev_name(dev));
1807 		pm_dev_err(dev, pm_transition, " async", error);
1808 	}
1809 
1810 	put_device(dev);
1811 }
1812 
1813 static int device_suspend(struct device *dev)
1814 {
1815 	if (dpm_async_fn(dev, async_suspend))
1816 		return 0;
1817 
1818 	return __device_suspend(dev, pm_transition, false);
1819 }
1820 
1821 /**
1822  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1823  * @state: PM transition of the system being carried out.
1824  */
1825 int dpm_suspend(pm_message_t state)
1826 {
1827 	ktime_t starttime = ktime_get();
1828 	int error = 0;
1829 
1830 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1831 	might_sleep();
1832 
1833 	devfreq_suspend();
1834 	cpufreq_suspend();
1835 
1836 	mutex_lock(&dpm_list_mtx);
1837 	pm_transition = state;
1838 	async_error = 0;
1839 	while (!list_empty(&dpm_prepared_list)) {
1840 		struct device *dev = to_device(dpm_prepared_list.prev);
1841 
1842 		get_device(dev);
1843 		mutex_unlock(&dpm_list_mtx);
1844 
1845 		error = device_suspend(dev);
1846 
1847 		mutex_lock(&dpm_list_mtx);
1848 		if (error) {
1849 			pm_dev_err(dev, state, "", error);
1850 			dpm_save_failed_dev(dev_name(dev));
1851 			put_device(dev);
1852 			break;
1853 		}
1854 		if (!list_empty(&dev->power.entry))
1855 			list_move(&dev->power.entry, &dpm_suspended_list);
1856 		put_device(dev);
1857 		if (async_error)
1858 			break;
1859 	}
1860 	mutex_unlock(&dpm_list_mtx);
1861 	async_synchronize_full();
1862 	if (!error)
1863 		error = async_error;
1864 	if (error) {
1865 		suspend_stats.failed_suspend++;
1866 		dpm_save_failed_step(SUSPEND_SUSPEND);
1867 	}
1868 	dpm_show_time(starttime, state, error, NULL);
1869 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1870 	return error;
1871 }
1872 
1873 /**
1874  * device_prepare - Prepare a device for system power transition.
1875  * @dev: Device to handle.
1876  * @state: PM transition of the system being carried out.
1877  *
1878  * Execute the ->prepare() callback(s) for given device.  No new children of the
1879  * device may be registered after this function has returned.
1880  */
1881 static int device_prepare(struct device *dev, pm_message_t state)
1882 {
1883 	int (*callback)(struct device *) = NULL;
1884 	int ret = 0;
1885 
1886 	if (dev->power.syscore)
1887 		return 0;
1888 
1889 	WARN_ON(!pm_runtime_enabled(dev) &&
1890 		dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1891 					      DPM_FLAG_LEAVE_SUSPENDED));
1892 
1893 	/*
1894 	 * If a device's parent goes into runtime suspend at the wrong time,
1895 	 * it won't be possible to resume the device.  To prevent this we
1896 	 * block runtime suspend here, during the prepare phase, and allow
1897 	 * it again during the complete phase.
1898 	 */
1899 	pm_runtime_get_noresume(dev);
1900 
1901 	device_lock(dev);
1902 
1903 	dev->power.wakeup_path = false;
1904 
1905 	if (dev->power.no_pm_callbacks)
1906 		goto unlock;
1907 
1908 	if (dev->pm_domain)
1909 		callback = dev->pm_domain->ops.prepare;
1910 	else if (dev->type && dev->type->pm)
1911 		callback = dev->type->pm->prepare;
1912 	else if (dev->class && dev->class->pm)
1913 		callback = dev->class->pm->prepare;
1914 	else if (dev->bus && dev->bus->pm)
1915 		callback = dev->bus->pm->prepare;
1916 
1917 	if (!callback && dev->driver && dev->driver->pm)
1918 		callback = dev->driver->pm->prepare;
1919 
1920 	if (callback)
1921 		ret = callback(dev);
1922 
1923 unlock:
1924 	device_unlock(dev);
1925 
1926 	if (ret < 0) {
1927 		suspend_report_result(callback, ret);
1928 		pm_runtime_put(dev);
1929 		return ret;
1930 	}
1931 	/*
1932 	 * A positive return value from ->prepare() means "this device appears
1933 	 * to be runtime-suspended and its state is fine, so if it really is
1934 	 * runtime-suspended, you can leave it in that state provided that you
1935 	 * will do the same thing with all of its descendants".  This only
1936 	 * applies to suspend transitions, however.
1937 	 */
1938 	spin_lock_irq(&dev->power.lock);
1939 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1940 		((pm_runtime_suspended(dev) && ret > 0) ||
1941 		 dev->power.no_pm_callbacks) &&
1942 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1943 	spin_unlock_irq(&dev->power.lock);
1944 	return 0;
1945 }
1946 
1947 /**
1948  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1949  * @state: PM transition of the system being carried out.
1950  *
1951  * Execute the ->prepare() callback(s) for all devices.
1952  */
1953 int dpm_prepare(pm_message_t state)
1954 {
1955 	int error = 0;
1956 
1957 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1958 	might_sleep();
1959 
1960 	/*
1961 	 * Give a chance for the known devices to complete their probes, before
1962 	 * disable probing of devices. This sync point is important at least
1963 	 * at boot time + hibernation restore.
1964 	 */
1965 	wait_for_device_probe();
1966 	/*
1967 	 * It is unsafe if probing of devices will happen during suspend or
1968 	 * hibernation and system behavior will be unpredictable in this case.
1969 	 * So, let's prohibit device's probing here and defer their probes
1970 	 * instead. The normal behavior will be restored in dpm_complete().
1971 	 */
1972 	device_block_probing();
1973 
1974 	mutex_lock(&dpm_list_mtx);
1975 	while (!list_empty(&dpm_list)) {
1976 		struct device *dev = to_device(dpm_list.next);
1977 
1978 		get_device(dev);
1979 		mutex_unlock(&dpm_list_mtx);
1980 
1981 		trace_device_pm_callback_start(dev, "", state.event);
1982 		error = device_prepare(dev, state);
1983 		trace_device_pm_callback_end(dev, error);
1984 
1985 		mutex_lock(&dpm_list_mtx);
1986 		if (error) {
1987 			if (error == -EAGAIN) {
1988 				put_device(dev);
1989 				error = 0;
1990 				continue;
1991 			}
1992 			pr_info("Device %s not prepared for power transition: code %d\n",
1993 				dev_name(dev), error);
1994 			put_device(dev);
1995 			break;
1996 		}
1997 		dev->power.is_prepared = true;
1998 		if (!list_empty(&dev->power.entry))
1999 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
2000 		put_device(dev);
2001 	}
2002 	mutex_unlock(&dpm_list_mtx);
2003 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2004 	return error;
2005 }
2006 
2007 /**
2008  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2009  * @state: PM transition of the system being carried out.
2010  *
2011  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2012  * callbacks for them.
2013  */
2014 int dpm_suspend_start(pm_message_t state)
2015 {
2016 	ktime_t starttime = ktime_get();
2017 	int error;
2018 
2019 	error = dpm_prepare(state);
2020 	if (error) {
2021 		suspend_stats.failed_prepare++;
2022 		dpm_save_failed_step(SUSPEND_PREPARE);
2023 	} else
2024 		error = dpm_suspend(state);
2025 	dpm_show_time(starttime, state, error, "start");
2026 	return error;
2027 }
2028 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2029 
2030 void __suspend_report_result(const char *function, void *fn, int ret)
2031 {
2032 	if (ret)
2033 		pr_err("%s(): %pS returns %d\n", function, fn, ret);
2034 }
2035 EXPORT_SYMBOL_GPL(__suspend_report_result);
2036 
2037 /**
2038  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2039  * @subordinate: Device that needs to wait for @dev.
2040  * @dev: Device to wait for.
2041  */
2042 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2043 {
2044 	dpm_wait(dev, subordinate->power.async_suspend);
2045 	return async_error;
2046 }
2047 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2048 
2049 /**
2050  * dpm_for_each_dev - device iterator.
2051  * @data: data for the callback.
2052  * @fn: function to be called for each device.
2053  *
2054  * Iterate over devices in dpm_list, and call @fn for each device,
2055  * passing it @data.
2056  */
2057 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2058 {
2059 	struct device *dev;
2060 
2061 	if (!fn)
2062 		return;
2063 
2064 	device_pm_lock();
2065 	list_for_each_entry(dev, &dpm_list, power.entry)
2066 		fn(dev, data);
2067 	device_pm_unlock();
2068 }
2069 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2070 
2071 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2072 {
2073 	if (!ops)
2074 		return true;
2075 
2076 	return !ops->prepare &&
2077 	       !ops->suspend &&
2078 	       !ops->suspend_late &&
2079 	       !ops->suspend_noirq &&
2080 	       !ops->resume_noirq &&
2081 	       !ops->resume_early &&
2082 	       !ops->resume &&
2083 	       !ops->complete;
2084 }
2085 
2086 void device_pm_check_callbacks(struct device *dev)
2087 {
2088 	spin_lock_irq(&dev->power.lock);
2089 	dev->power.no_pm_callbacks =
2090 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2091 		 !dev->bus->suspend && !dev->bus->resume)) &&
2092 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2093 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2094 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2095 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2096 		 !dev->driver->suspend && !dev->driver->resume));
2097 	spin_unlock_irq(&dev->power.lock);
2098 }
2099 
2100 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2101 {
2102 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2103 		pm_runtime_status_suspended(dev);
2104 }
2105