xref: /openbmc/linux/drivers/base/power/main.c (revision efe4a1ac)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52 
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58 
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62 
63 static int async_error;
64 
65 static char *pm_verb(int event)
66 {
67 	switch (event) {
68 	case PM_EVENT_SUSPEND:
69 		return "suspend";
70 	case PM_EVENT_RESUME:
71 		return "resume";
72 	case PM_EVENT_FREEZE:
73 		return "freeze";
74 	case PM_EVENT_QUIESCE:
75 		return "quiesce";
76 	case PM_EVENT_HIBERNATE:
77 		return "hibernate";
78 	case PM_EVENT_THAW:
79 		return "thaw";
80 	case PM_EVENT_RESTORE:
81 		return "restore";
82 	case PM_EVENT_RECOVER:
83 		return "recover";
84 	default:
85 		return "(unknown PM event)";
86 	}
87 }
88 
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95 	dev->power.is_prepared = false;
96 	dev->power.is_suspended = false;
97 	dev->power.is_noirq_suspended = false;
98 	dev->power.is_late_suspended = false;
99 	init_completion(&dev->power.completion);
100 	complete_all(&dev->power.completion);
101 	dev->power.wakeup = NULL;
102 	INIT_LIST_HEAD(&dev->power.entry);
103 }
104 
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110 	mutex_lock(&dpm_list_mtx);
111 }
112 
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118 	mutex_unlock(&dpm_list_mtx);
119 }
120 
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127 	pr_debug("PM: Adding info for %s:%s\n",
128 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 	device_pm_check_callbacks(dev);
130 	mutex_lock(&dpm_list_mtx);
131 	if (dev->parent && dev->parent->power.is_prepared)
132 		dev_warn(dev, "parent %s should not be sleeping\n",
133 			dev_name(dev->parent));
134 	list_add_tail(&dev->power.entry, &dpm_list);
135 	dev->power.in_dpm_list = true;
136 	mutex_unlock(&dpm_list_mtx);
137 }
138 
139 /**
140  * device_pm_remove - Remove a device from the PM core's list of active devices.
141  * @dev: Device to be removed from the list.
142  */
143 void device_pm_remove(struct device *dev)
144 {
145 	pr_debug("PM: Removing info for %s:%s\n",
146 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 	complete_all(&dev->power.completion);
148 	mutex_lock(&dpm_list_mtx);
149 	list_del_init(&dev->power.entry);
150 	dev->power.in_dpm_list = false;
151 	mutex_unlock(&dpm_list_mtx);
152 	device_wakeup_disable(dev);
153 	pm_runtime_remove(dev);
154 	device_pm_check_callbacks(dev);
155 }
156 
157 /**
158  * device_pm_move_before - Move device in the PM core's list of active devices.
159  * @deva: Device to move in dpm_list.
160  * @devb: Device @deva should come before.
161  */
162 void device_pm_move_before(struct device *deva, struct device *devb)
163 {
164 	pr_debug("PM: Moving %s:%s before %s:%s\n",
165 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
167 	/* Delete deva from dpm_list and reinsert before devb. */
168 	list_move_tail(&deva->power.entry, &devb->power.entry);
169 }
170 
171 /**
172  * device_pm_move_after - Move device in the PM core's list of active devices.
173  * @deva: Device to move in dpm_list.
174  * @devb: Device @deva should come after.
175  */
176 void device_pm_move_after(struct device *deva, struct device *devb)
177 {
178 	pr_debug("PM: Moving %s:%s after %s:%s\n",
179 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181 	/* Delete deva from dpm_list and reinsert after devb. */
182 	list_move(&deva->power.entry, &devb->power.entry);
183 }
184 
185 /**
186  * device_pm_move_last - Move device to end of the PM core's list of devices.
187  * @dev: Device to move in dpm_list.
188  */
189 void device_pm_move_last(struct device *dev)
190 {
191 	pr_debug("PM: Moving %s:%s to end of list\n",
192 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
193 	list_move_tail(&dev->power.entry, &dpm_list);
194 }
195 
196 static ktime_t initcall_debug_start(struct device *dev)
197 {
198 	ktime_t calltime = 0;
199 
200 	if (pm_print_times_enabled) {
201 		pr_info("calling  %s+ @ %i, parent: %s\n",
202 			dev_name(dev), task_pid_nr(current),
203 			dev->parent ? dev_name(dev->parent) : "none");
204 		calltime = ktime_get();
205 	}
206 
207 	return calltime;
208 }
209 
210 static void initcall_debug_report(struct device *dev, ktime_t calltime,
211 				  int error, pm_message_t state, char *info)
212 {
213 	ktime_t rettime;
214 	s64 nsecs;
215 
216 	rettime = ktime_get();
217 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
218 
219 	if (pm_print_times_enabled) {
220 		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
221 			error, (unsigned long long)nsecs >> 10);
222 	}
223 }
224 
225 /**
226  * dpm_wait - Wait for a PM operation to complete.
227  * @dev: Device to wait for.
228  * @async: If unset, wait only if the device's power.async_suspend flag is set.
229  */
230 static void dpm_wait(struct device *dev, bool async)
231 {
232 	if (!dev)
233 		return;
234 
235 	if (async || (pm_async_enabled && dev->power.async_suspend))
236 		wait_for_completion(&dev->power.completion);
237 }
238 
239 static int dpm_wait_fn(struct device *dev, void *async_ptr)
240 {
241 	dpm_wait(dev, *((bool *)async_ptr));
242 	return 0;
243 }
244 
245 static void dpm_wait_for_children(struct device *dev, bool async)
246 {
247        device_for_each_child(dev, &async, dpm_wait_fn);
248 }
249 
250 static void dpm_wait_for_suppliers(struct device *dev, bool async)
251 {
252 	struct device_link *link;
253 	int idx;
254 
255 	idx = device_links_read_lock();
256 
257 	/*
258 	 * If the supplier goes away right after we've checked the link to it,
259 	 * we'll wait for its completion to change the state, but that's fine,
260 	 * because the only things that will block as a result are the SRCU
261 	 * callbacks freeing the link objects for the links in the list we're
262 	 * walking.
263 	 */
264 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
265 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
266 			dpm_wait(link->supplier, async);
267 
268 	device_links_read_unlock(idx);
269 }
270 
271 static void dpm_wait_for_superior(struct device *dev, bool async)
272 {
273 	dpm_wait(dev->parent, async);
274 	dpm_wait_for_suppliers(dev, async);
275 }
276 
277 static void dpm_wait_for_consumers(struct device *dev, bool async)
278 {
279 	struct device_link *link;
280 	int idx;
281 
282 	idx = device_links_read_lock();
283 
284 	/*
285 	 * The status of a device link can only be changed from "dormant" by a
286 	 * probe, but that cannot happen during system suspend/resume.  In
287 	 * theory it can change to "dormant" at that time, but then it is
288 	 * reasonable to wait for the target device anyway (eg. if it goes
289 	 * away, it's better to wait for it to go away completely and then
290 	 * continue instead of trying to continue in parallel with its
291 	 * unregistration).
292 	 */
293 	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
294 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
295 			dpm_wait(link->consumer, async);
296 
297 	device_links_read_unlock(idx);
298 }
299 
300 static void dpm_wait_for_subordinate(struct device *dev, bool async)
301 {
302 	dpm_wait_for_children(dev, async);
303 	dpm_wait_for_consumers(dev, async);
304 }
305 
306 /**
307  * pm_op - Return the PM operation appropriate for given PM event.
308  * @ops: PM operations to choose from.
309  * @state: PM transition of the system being carried out.
310  */
311 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
312 {
313 	switch (state.event) {
314 #ifdef CONFIG_SUSPEND
315 	case PM_EVENT_SUSPEND:
316 		return ops->suspend;
317 	case PM_EVENT_RESUME:
318 		return ops->resume;
319 #endif /* CONFIG_SUSPEND */
320 #ifdef CONFIG_HIBERNATE_CALLBACKS
321 	case PM_EVENT_FREEZE:
322 	case PM_EVENT_QUIESCE:
323 		return ops->freeze;
324 	case PM_EVENT_HIBERNATE:
325 		return ops->poweroff;
326 	case PM_EVENT_THAW:
327 	case PM_EVENT_RECOVER:
328 		return ops->thaw;
329 		break;
330 	case PM_EVENT_RESTORE:
331 		return ops->restore;
332 #endif /* CONFIG_HIBERNATE_CALLBACKS */
333 	}
334 
335 	return NULL;
336 }
337 
338 /**
339  * pm_late_early_op - Return the PM operation appropriate for given PM event.
340  * @ops: PM operations to choose from.
341  * @state: PM transition of the system being carried out.
342  *
343  * Runtime PM is disabled for @dev while this function is being executed.
344  */
345 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
346 				      pm_message_t state)
347 {
348 	switch (state.event) {
349 #ifdef CONFIG_SUSPEND
350 	case PM_EVENT_SUSPEND:
351 		return ops->suspend_late;
352 	case PM_EVENT_RESUME:
353 		return ops->resume_early;
354 #endif /* CONFIG_SUSPEND */
355 #ifdef CONFIG_HIBERNATE_CALLBACKS
356 	case PM_EVENT_FREEZE:
357 	case PM_EVENT_QUIESCE:
358 		return ops->freeze_late;
359 	case PM_EVENT_HIBERNATE:
360 		return ops->poweroff_late;
361 	case PM_EVENT_THAW:
362 	case PM_EVENT_RECOVER:
363 		return ops->thaw_early;
364 	case PM_EVENT_RESTORE:
365 		return ops->restore_early;
366 #endif /* CONFIG_HIBERNATE_CALLBACKS */
367 	}
368 
369 	return NULL;
370 }
371 
372 /**
373  * pm_noirq_op - Return the PM operation appropriate for given PM event.
374  * @ops: PM operations to choose from.
375  * @state: PM transition of the system being carried out.
376  *
377  * The driver of @dev will not receive interrupts while this function is being
378  * executed.
379  */
380 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
381 {
382 	switch (state.event) {
383 #ifdef CONFIG_SUSPEND
384 	case PM_EVENT_SUSPEND:
385 		return ops->suspend_noirq;
386 	case PM_EVENT_RESUME:
387 		return ops->resume_noirq;
388 #endif /* CONFIG_SUSPEND */
389 #ifdef CONFIG_HIBERNATE_CALLBACKS
390 	case PM_EVENT_FREEZE:
391 	case PM_EVENT_QUIESCE:
392 		return ops->freeze_noirq;
393 	case PM_EVENT_HIBERNATE:
394 		return ops->poweroff_noirq;
395 	case PM_EVENT_THAW:
396 	case PM_EVENT_RECOVER:
397 		return ops->thaw_noirq;
398 	case PM_EVENT_RESTORE:
399 		return ops->restore_noirq;
400 #endif /* CONFIG_HIBERNATE_CALLBACKS */
401 	}
402 
403 	return NULL;
404 }
405 
406 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
407 {
408 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
409 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
410 		", may wakeup" : "");
411 }
412 
413 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
414 			int error)
415 {
416 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
417 		dev_name(dev), pm_verb(state.event), info, error);
418 }
419 
420 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
421 {
422 	ktime_t calltime;
423 	u64 usecs64;
424 	int usecs;
425 
426 	calltime = ktime_get();
427 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
428 	do_div(usecs64, NSEC_PER_USEC);
429 	usecs = usecs64;
430 	if (usecs == 0)
431 		usecs = 1;
432 	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
433 		info ?: "", info ? " " : "", pm_verb(state.event),
434 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
435 }
436 
437 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
438 			    pm_message_t state, char *info)
439 {
440 	ktime_t calltime;
441 	int error;
442 
443 	if (!cb)
444 		return 0;
445 
446 	calltime = initcall_debug_start(dev);
447 
448 	pm_dev_dbg(dev, state, info);
449 	trace_device_pm_callback_start(dev, info, state.event);
450 	error = cb(dev);
451 	trace_device_pm_callback_end(dev, error);
452 	suspend_report_result(cb, error);
453 
454 	initcall_debug_report(dev, calltime, error, state, info);
455 
456 	return error;
457 }
458 
459 #ifdef CONFIG_DPM_WATCHDOG
460 struct dpm_watchdog {
461 	struct device		*dev;
462 	struct task_struct	*tsk;
463 	struct timer_list	timer;
464 };
465 
466 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
467 	struct dpm_watchdog wd
468 
469 /**
470  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
471  * @data: Watchdog object address.
472  *
473  * Called when a driver has timed out suspending or resuming.
474  * There's not much we can do here to recover so panic() to
475  * capture a crash-dump in pstore.
476  */
477 static void dpm_watchdog_handler(unsigned long data)
478 {
479 	struct dpm_watchdog *wd = (void *)data;
480 
481 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
482 	show_stack(wd->tsk, NULL);
483 	panic("%s %s: unrecoverable failure\n",
484 		dev_driver_string(wd->dev), dev_name(wd->dev));
485 }
486 
487 /**
488  * dpm_watchdog_set - Enable pm watchdog for given device.
489  * @wd: Watchdog. Must be allocated on the stack.
490  * @dev: Device to handle.
491  */
492 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
493 {
494 	struct timer_list *timer = &wd->timer;
495 
496 	wd->dev = dev;
497 	wd->tsk = current;
498 
499 	init_timer_on_stack(timer);
500 	/* use same timeout value for both suspend and resume */
501 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
502 	timer->function = dpm_watchdog_handler;
503 	timer->data = (unsigned long)wd;
504 	add_timer(timer);
505 }
506 
507 /**
508  * dpm_watchdog_clear - Disable suspend/resume watchdog.
509  * @wd: Watchdog to disable.
510  */
511 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
512 {
513 	struct timer_list *timer = &wd->timer;
514 
515 	del_timer_sync(timer);
516 	destroy_timer_on_stack(timer);
517 }
518 #else
519 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
520 #define dpm_watchdog_set(x, y)
521 #define dpm_watchdog_clear(x)
522 #endif
523 
524 /*------------------------- Resume routines -------------------------*/
525 
526 /**
527  * device_resume_noirq - Execute an "early resume" callback for given device.
528  * @dev: Device to handle.
529  * @state: PM transition of the system being carried out.
530  * @async: If true, the device is being resumed asynchronously.
531  *
532  * The driver of @dev will not receive interrupts while this function is being
533  * executed.
534  */
535 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
536 {
537 	pm_callback_t callback = NULL;
538 	char *info = NULL;
539 	int error = 0;
540 
541 	TRACE_DEVICE(dev);
542 	TRACE_RESUME(0);
543 
544 	if (dev->power.syscore || dev->power.direct_complete)
545 		goto Out;
546 
547 	if (!dev->power.is_noirq_suspended)
548 		goto Out;
549 
550 	dpm_wait_for_superior(dev, async);
551 
552 	if (dev->pm_domain) {
553 		info = "noirq power domain ";
554 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
555 	} else if (dev->type && dev->type->pm) {
556 		info = "noirq type ";
557 		callback = pm_noirq_op(dev->type->pm, state);
558 	} else if (dev->class && dev->class->pm) {
559 		info = "noirq class ";
560 		callback = pm_noirq_op(dev->class->pm, state);
561 	} else if (dev->bus && dev->bus->pm) {
562 		info = "noirq bus ";
563 		callback = pm_noirq_op(dev->bus->pm, state);
564 	}
565 
566 	if (!callback && dev->driver && dev->driver->pm) {
567 		info = "noirq driver ";
568 		callback = pm_noirq_op(dev->driver->pm, state);
569 	}
570 
571 	error = dpm_run_callback(callback, dev, state, info);
572 	dev->power.is_noirq_suspended = false;
573 
574  Out:
575 	complete_all(&dev->power.completion);
576 	TRACE_RESUME(error);
577 	return error;
578 }
579 
580 static bool is_async(struct device *dev)
581 {
582 	return dev->power.async_suspend && pm_async_enabled
583 		&& !pm_trace_is_enabled();
584 }
585 
586 static void async_resume_noirq(void *data, async_cookie_t cookie)
587 {
588 	struct device *dev = (struct device *)data;
589 	int error;
590 
591 	error = device_resume_noirq(dev, pm_transition, true);
592 	if (error)
593 		pm_dev_err(dev, pm_transition, " async", error);
594 
595 	put_device(dev);
596 }
597 
598 /**
599  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
600  * @state: PM transition of the system being carried out.
601  *
602  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
603  * enable device drivers to receive interrupts.
604  */
605 void dpm_resume_noirq(pm_message_t state)
606 {
607 	struct device *dev;
608 	ktime_t starttime = ktime_get();
609 
610 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
611 	mutex_lock(&dpm_list_mtx);
612 	pm_transition = state;
613 
614 	/*
615 	 * Advanced the async threads upfront,
616 	 * in case the starting of async threads is
617 	 * delayed by non-async resuming devices.
618 	 */
619 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
620 		reinit_completion(&dev->power.completion);
621 		if (is_async(dev)) {
622 			get_device(dev);
623 			async_schedule(async_resume_noirq, dev);
624 		}
625 	}
626 
627 	while (!list_empty(&dpm_noirq_list)) {
628 		dev = to_device(dpm_noirq_list.next);
629 		get_device(dev);
630 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
631 		mutex_unlock(&dpm_list_mtx);
632 
633 		if (!is_async(dev)) {
634 			int error;
635 
636 			error = device_resume_noirq(dev, state, false);
637 			if (error) {
638 				suspend_stats.failed_resume_noirq++;
639 				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
640 				dpm_save_failed_dev(dev_name(dev));
641 				pm_dev_err(dev, state, " noirq", error);
642 			}
643 		}
644 
645 		mutex_lock(&dpm_list_mtx);
646 		put_device(dev);
647 	}
648 	mutex_unlock(&dpm_list_mtx);
649 	async_synchronize_full();
650 	dpm_show_time(starttime, state, "noirq");
651 	resume_device_irqs();
652 	device_wakeup_disarm_wake_irqs();
653 	cpuidle_resume();
654 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
655 }
656 
657 /**
658  * device_resume_early - Execute an "early resume" callback for given device.
659  * @dev: Device to handle.
660  * @state: PM transition of the system being carried out.
661  * @async: If true, the device is being resumed asynchronously.
662  *
663  * Runtime PM is disabled for @dev while this function is being executed.
664  */
665 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
666 {
667 	pm_callback_t callback = NULL;
668 	char *info = NULL;
669 	int error = 0;
670 
671 	TRACE_DEVICE(dev);
672 	TRACE_RESUME(0);
673 
674 	if (dev->power.syscore || dev->power.direct_complete)
675 		goto Out;
676 
677 	if (!dev->power.is_late_suspended)
678 		goto Out;
679 
680 	dpm_wait_for_superior(dev, async);
681 
682 	if (dev->pm_domain) {
683 		info = "early power domain ";
684 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
685 	} else if (dev->type && dev->type->pm) {
686 		info = "early type ";
687 		callback = pm_late_early_op(dev->type->pm, state);
688 	} else if (dev->class && dev->class->pm) {
689 		info = "early class ";
690 		callback = pm_late_early_op(dev->class->pm, state);
691 	} else if (dev->bus && dev->bus->pm) {
692 		info = "early bus ";
693 		callback = pm_late_early_op(dev->bus->pm, state);
694 	}
695 
696 	if (!callback && dev->driver && dev->driver->pm) {
697 		info = "early driver ";
698 		callback = pm_late_early_op(dev->driver->pm, state);
699 	}
700 
701 	error = dpm_run_callback(callback, dev, state, info);
702 	dev->power.is_late_suspended = false;
703 
704  Out:
705 	TRACE_RESUME(error);
706 
707 	pm_runtime_enable(dev);
708 	complete_all(&dev->power.completion);
709 	return error;
710 }
711 
712 static void async_resume_early(void *data, async_cookie_t cookie)
713 {
714 	struct device *dev = (struct device *)data;
715 	int error;
716 
717 	error = device_resume_early(dev, pm_transition, true);
718 	if (error)
719 		pm_dev_err(dev, pm_transition, " async", error);
720 
721 	put_device(dev);
722 }
723 
724 /**
725  * dpm_resume_early - Execute "early resume" callbacks for all devices.
726  * @state: PM transition of the system being carried out.
727  */
728 void dpm_resume_early(pm_message_t state)
729 {
730 	struct device *dev;
731 	ktime_t starttime = ktime_get();
732 
733 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
734 	mutex_lock(&dpm_list_mtx);
735 	pm_transition = state;
736 
737 	/*
738 	 * Advanced the async threads upfront,
739 	 * in case the starting of async threads is
740 	 * delayed by non-async resuming devices.
741 	 */
742 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
743 		reinit_completion(&dev->power.completion);
744 		if (is_async(dev)) {
745 			get_device(dev);
746 			async_schedule(async_resume_early, dev);
747 		}
748 	}
749 
750 	while (!list_empty(&dpm_late_early_list)) {
751 		dev = to_device(dpm_late_early_list.next);
752 		get_device(dev);
753 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
754 		mutex_unlock(&dpm_list_mtx);
755 
756 		if (!is_async(dev)) {
757 			int error;
758 
759 			error = device_resume_early(dev, state, false);
760 			if (error) {
761 				suspend_stats.failed_resume_early++;
762 				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
763 				dpm_save_failed_dev(dev_name(dev));
764 				pm_dev_err(dev, state, " early", error);
765 			}
766 		}
767 		mutex_lock(&dpm_list_mtx);
768 		put_device(dev);
769 	}
770 	mutex_unlock(&dpm_list_mtx);
771 	async_synchronize_full();
772 	dpm_show_time(starttime, state, "early");
773 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
774 }
775 
776 /**
777  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
778  * @state: PM transition of the system being carried out.
779  */
780 void dpm_resume_start(pm_message_t state)
781 {
782 	dpm_resume_noirq(state);
783 	dpm_resume_early(state);
784 }
785 EXPORT_SYMBOL_GPL(dpm_resume_start);
786 
787 /**
788  * device_resume - Execute "resume" callbacks for given device.
789  * @dev: Device to handle.
790  * @state: PM transition of the system being carried out.
791  * @async: If true, the device is being resumed asynchronously.
792  */
793 static int device_resume(struct device *dev, pm_message_t state, bool async)
794 {
795 	pm_callback_t callback = NULL;
796 	char *info = NULL;
797 	int error = 0;
798 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
799 
800 	TRACE_DEVICE(dev);
801 	TRACE_RESUME(0);
802 
803 	if (dev->power.syscore)
804 		goto Complete;
805 
806 	if (dev->power.direct_complete) {
807 		/* Match the pm_runtime_disable() in __device_suspend(). */
808 		pm_runtime_enable(dev);
809 		goto Complete;
810 	}
811 
812 	dpm_wait_for_superior(dev, async);
813 	dpm_watchdog_set(&wd, dev);
814 	device_lock(dev);
815 
816 	/*
817 	 * This is a fib.  But we'll allow new children to be added below
818 	 * a resumed device, even if the device hasn't been completed yet.
819 	 */
820 	dev->power.is_prepared = false;
821 
822 	if (!dev->power.is_suspended)
823 		goto Unlock;
824 
825 	if (dev->pm_domain) {
826 		info = "power domain ";
827 		callback = pm_op(&dev->pm_domain->ops, state);
828 		goto Driver;
829 	}
830 
831 	if (dev->type && dev->type->pm) {
832 		info = "type ";
833 		callback = pm_op(dev->type->pm, state);
834 		goto Driver;
835 	}
836 
837 	if (dev->class) {
838 		if (dev->class->pm) {
839 			info = "class ";
840 			callback = pm_op(dev->class->pm, state);
841 			goto Driver;
842 		} else if (dev->class->resume) {
843 			info = "legacy class ";
844 			callback = dev->class->resume;
845 			goto End;
846 		}
847 	}
848 
849 	if (dev->bus) {
850 		if (dev->bus->pm) {
851 			info = "bus ";
852 			callback = pm_op(dev->bus->pm, state);
853 		} else if (dev->bus->resume) {
854 			info = "legacy bus ";
855 			callback = dev->bus->resume;
856 			goto End;
857 		}
858 	}
859 
860  Driver:
861 	if (!callback && dev->driver && dev->driver->pm) {
862 		info = "driver ";
863 		callback = pm_op(dev->driver->pm, state);
864 	}
865 
866  End:
867 	error = dpm_run_callback(callback, dev, state, info);
868 	dev->power.is_suspended = false;
869 
870  Unlock:
871 	device_unlock(dev);
872 	dpm_watchdog_clear(&wd);
873 
874  Complete:
875 	complete_all(&dev->power.completion);
876 
877 	TRACE_RESUME(error);
878 
879 	return error;
880 }
881 
882 static void async_resume(void *data, async_cookie_t cookie)
883 {
884 	struct device *dev = (struct device *)data;
885 	int error;
886 
887 	error = device_resume(dev, pm_transition, true);
888 	if (error)
889 		pm_dev_err(dev, pm_transition, " async", error);
890 	put_device(dev);
891 }
892 
893 /**
894  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
895  * @state: PM transition of the system being carried out.
896  *
897  * Execute the appropriate "resume" callback for all devices whose status
898  * indicates that they are suspended.
899  */
900 void dpm_resume(pm_message_t state)
901 {
902 	struct device *dev;
903 	ktime_t starttime = ktime_get();
904 
905 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
906 	might_sleep();
907 
908 	mutex_lock(&dpm_list_mtx);
909 	pm_transition = state;
910 	async_error = 0;
911 
912 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
913 		reinit_completion(&dev->power.completion);
914 		if (is_async(dev)) {
915 			get_device(dev);
916 			async_schedule(async_resume, dev);
917 		}
918 	}
919 
920 	while (!list_empty(&dpm_suspended_list)) {
921 		dev = to_device(dpm_suspended_list.next);
922 		get_device(dev);
923 		if (!is_async(dev)) {
924 			int error;
925 
926 			mutex_unlock(&dpm_list_mtx);
927 
928 			error = device_resume(dev, state, false);
929 			if (error) {
930 				suspend_stats.failed_resume++;
931 				dpm_save_failed_step(SUSPEND_RESUME);
932 				dpm_save_failed_dev(dev_name(dev));
933 				pm_dev_err(dev, state, "", error);
934 			}
935 
936 			mutex_lock(&dpm_list_mtx);
937 		}
938 		if (!list_empty(&dev->power.entry))
939 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
940 		put_device(dev);
941 	}
942 	mutex_unlock(&dpm_list_mtx);
943 	async_synchronize_full();
944 	dpm_show_time(starttime, state, NULL);
945 
946 	cpufreq_resume();
947 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
948 }
949 
950 /**
951  * device_complete - Complete a PM transition for given device.
952  * @dev: Device to handle.
953  * @state: PM transition of the system being carried out.
954  */
955 static void device_complete(struct device *dev, pm_message_t state)
956 {
957 	void (*callback)(struct device *) = NULL;
958 	char *info = NULL;
959 
960 	if (dev->power.syscore)
961 		return;
962 
963 	device_lock(dev);
964 
965 	if (dev->pm_domain) {
966 		info = "completing power domain ";
967 		callback = dev->pm_domain->ops.complete;
968 	} else if (dev->type && dev->type->pm) {
969 		info = "completing type ";
970 		callback = dev->type->pm->complete;
971 	} else if (dev->class && dev->class->pm) {
972 		info = "completing class ";
973 		callback = dev->class->pm->complete;
974 	} else if (dev->bus && dev->bus->pm) {
975 		info = "completing bus ";
976 		callback = dev->bus->pm->complete;
977 	}
978 
979 	if (!callback && dev->driver && dev->driver->pm) {
980 		info = "completing driver ";
981 		callback = dev->driver->pm->complete;
982 	}
983 
984 	if (callback) {
985 		pm_dev_dbg(dev, state, info);
986 		callback(dev);
987 	}
988 
989 	device_unlock(dev);
990 
991 	pm_runtime_put(dev);
992 }
993 
994 /**
995  * dpm_complete - Complete a PM transition for all non-sysdev devices.
996  * @state: PM transition of the system being carried out.
997  *
998  * Execute the ->complete() callbacks for all devices whose PM status is not
999  * DPM_ON (this allows new devices to be registered).
1000  */
1001 void dpm_complete(pm_message_t state)
1002 {
1003 	struct list_head list;
1004 
1005 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1006 	might_sleep();
1007 
1008 	INIT_LIST_HEAD(&list);
1009 	mutex_lock(&dpm_list_mtx);
1010 	while (!list_empty(&dpm_prepared_list)) {
1011 		struct device *dev = to_device(dpm_prepared_list.prev);
1012 
1013 		get_device(dev);
1014 		dev->power.is_prepared = false;
1015 		list_move(&dev->power.entry, &list);
1016 		mutex_unlock(&dpm_list_mtx);
1017 
1018 		trace_device_pm_callback_start(dev, "", state.event);
1019 		device_complete(dev, state);
1020 		trace_device_pm_callback_end(dev, 0);
1021 
1022 		mutex_lock(&dpm_list_mtx);
1023 		put_device(dev);
1024 	}
1025 	list_splice(&list, &dpm_list);
1026 	mutex_unlock(&dpm_list_mtx);
1027 
1028 	/* Allow device probing and trigger re-probing of deferred devices */
1029 	device_unblock_probing();
1030 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1031 }
1032 
1033 /**
1034  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1035  * @state: PM transition of the system being carried out.
1036  *
1037  * Execute "resume" callbacks for all devices and complete the PM transition of
1038  * the system.
1039  */
1040 void dpm_resume_end(pm_message_t state)
1041 {
1042 	dpm_resume(state);
1043 	dpm_complete(state);
1044 }
1045 EXPORT_SYMBOL_GPL(dpm_resume_end);
1046 
1047 
1048 /*------------------------- Suspend routines -------------------------*/
1049 
1050 /**
1051  * resume_event - Return a "resume" message for given "suspend" sleep state.
1052  * @sleep_state: PM message representing a sleep state.
1053  *
1054  * Return a PM message representing the resume event corresponding to given
1055  * sleep state.
1056  */
1057 static pm_message_t resume_event(pm_message_t sleep_state)
1058 {
1059 	switch (sleep_state.event) {
1060 	case PM_EVENT_SUSPEND:
1061 		return PMSG_RESUME;
1062 	case PM_EVENT_FREEZE:
1063 	case PM_EVENT_QUIESCE:
1064 		return PMSG_RECOVER;
1065 	case PM_EVENT_HIBERNATE:
1066 		return PMSG_RESTORE;
1067 	}
1068 	return PMSG_ON;
1069 }
1070 
1071 /**
1072  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1073  * @dev: Device to handle.
1074  * @state: PM transition of the system being carried out.
1075  * @async: If true, the device is being suspended asynchronously.
1076  *
1077  * The driver of @dev will not receive interrupts while this function is being
1078  * executed.
1079  */
1080 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1081 {
1082 	pm_callback_t callback = NULL;
1083 	char *info = NULL;
1084 	int error = 0;
1085 
1086 	TRACE_DEVICE(dev);
1087 	TRACE_SUSPEND(0);
1088 
1089 	dpm_wait_for_subordinate(dev, async);
1090 
1091 	if (async_error)
1092 		goto Complete;
1093 
1094 	if (pm_wakeup_pending()) {
1095 		async_error = -EBUSY;
1096 		goto Complete;
1097 	}
1098 
1099 	if (dev->power.syscore || dev->power.direct_complete)
1100 		goto Complete;
1101 
1102 	if (dev->pm_domain) {
1103 		info = "noirq power domain ";
1104 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1105 	} else if (dev->type && dev->type->pm) {
1106 		info = "noirq type ";
1107 		callback = pm_noirq_op(dev->type->pm, state);
1108 	} else if (dev->class && dev->class->pm) {
1109 		info = "noirq class ";
1110 		callback = pm_noirq_op(dev->class->pm, state);
1111 	} else if (dev->bus && dev->bus->pm) {
1112 		info = "noirq bus ";
1113 		callback = pm_noirq_op(dev->bus->pm, state);
1114 	}
1115 
1116 	if (!callback && dev->driver && dev->driver->pm) {
1117 		info = "noirq driver ";
1118 		callback = pm_noirq_op(dev->driver->pm, state);
1119 	}
1120 
1121 	error = dpm_run_callback(callback, dev, state, info);
1122 	if (!error)
1123 		dev->power.is_noirq_suspended = true;
1124 	else
1125 		async_error = error;
1126 
1127 Complete:
1128 	complete_all(&dev->power.completion);
1129 	TRACE_SUSPEND(error);
1130 	return error;
1131 }
1132 
1133 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1134 {
1135 	struct device *dev = (struct device *)data;
1136 	int error;
1137 
1138 	error = __device_suspend_noirq(dev, pm_transition, true);
1139 	if (error) {
1140 		dpm_save_failed_dev(dev_name(dev));
1141 		pm_dev_err(dev, pm_transition, " async", error);
1142 	}
1143 
1144 	put_device(dev);
1145 }
1146 
1147 static int device_suspend_noirq(struct device *dev)
1148 {
1149 	reinit_completion(&dev->power.completion);
1150 
1151 	if (is_async(dev)) {
1152 		get_device(dev);
1153 		async_schedule(async_suspend_noirq, dev);
1154 		return 0;
1155 	}
1156 	return __device_suspend_noirq(dev, pm_transition, false);
1157 }
1158 
1159 /**
1160  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1161  * @state: PM transition of the system being carried out.
1162  *
1163  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1164  * handlers for all non-sysdev devices.
1165  */
1166 int dpm_suspend_noirq(pm_message_t state)
1167 {
1168 	ktime_t starttime = ktime_get();
1169 	int error = 0;
1170 
1171 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1172 	cpuidle_pause();
1173 	device_wakeup_arm_wake_irqs();
1174 	suspend_device_irqs();
1175 	mutex_lock(&dpm_list_mtx);
1176 	pm_transition = state;
1177 	async_error = 0;
1178 
1179 	while (!list_empty(&dpm_late_early_list)) {
1180 		struct device *dev = to_device(dpm_late_early_list.prev);
1181 
1182 		get_device(dev);
1183 		mutex_unlock(&dpm_list_mtx);
1184 
1185 		error = device_suspend_noirq(dev);
1186 
1187 		mutex_lock(&dpm_list_mtx);
1188 		if (error) {
1189 			pm_dev_err(dev, state, " noirq", error);
1190 			dpm_save_failed_dev(dev_name(dev));
1191 			put_device(dev);
1192 			break;
1193 		}
1194 		if (!list_empty(&dev->power.entry))
1195 			list_move(&dev->power.entry, &dpm_noirq_list);
1196 		put_device(dev);
1197 
1198 		if (async_error)
1199 			break;
1200 	}
1201 	mutex_unlock(&dpm_list_mtx);
1202 	async_synchronize_full();
1203 	if (!error)
1204 		error = async_error;
1205 
1206 	if (error) {
1207 		suspend_stats.failed_suspend_noirq++;
1208 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1209 		dpm_resume_noirq(resume_event(state));
1210 	} else {
1211 		dpm_show_time(starttime, state, "noirq");
1212 	}
1213 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1214 	return error;
1215 }
1216 
1217 /**
1218  * device_suspend_late - Execute a "late suspend" callback for given device.
1219  * @dev: Device to handle.
1220  * @state: PM transition of the system being carried out.
1221  * @async: If true, the device is being suspended asynchronously.
1222  *
1223  * Runtime PM is disabled for @dev while this function is being executed.
1224  */
1225 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1226 {
1227 	pm_callback_t callback = NULL;
1228 	char *info = NULL;
1229 	int error = 0;
1230 
1231 	TRACE_DEVICE(dev);
1232 	TRACE_SUSPEND(0);
1233 
1234 	__pm_runtime_disable(dev, false);
1235 
1236 	dpm_wait_for_subordinate(dev, async);
1237 
1238 	if (async_error)
1239 		goto Complete;
1240 
1241 	if (pm_wakeup_pending()) {
1242 		async_error = -EBUSY;
1243 		goto Complete;
1244 	}
1245 
1246 	if (dev->power.syscore || dev->power.direct_complete)
1247 		goto Complete;
1248 
1249 	if (dev->pm_domain) {
1250 		info = "late power domain ";
1251 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1252 	} else if (dev->type && dev->type->pm) {
1253 		info = "late type ";
1254 		callback = pm_late_early_op(dev->type->pm, state);
1255 	} else if (dev->class && dev->class->pm) {
1256 		info = "late class ";
1257 		callback = pm_late_early_op(dev->class->pm, state);
1258 	} else if (dev->bus && dev->bus->pm) {
1259 		info = "late bus ";
1260 		callback = pm_late_early_op(dev->bus->pm, state);
1261 	}
1262 
1263 	if (!callback && dev->driver && dev->driver->pm) {
1264 		info = "late driver ";
1265 		callback = pm_late_early_op(dev->driver->pm, state);
1266 	}
1267 
1268 	error = dpm_run_callback(callback, dev, state, info);
1269 	if (!error)
1270 		dev->power.is_late_suspended = true;
1271 	else
1272 		async_error = error;
1273 
1274 Complete:
1275 	TRACE_SUSPEND(error);
1276 	complete_all(&dev->power.completion);
1277 	return error;
1278 }
1279 
1280 static void async_suspend_late(void *data, async_cookie_t cookie)
1281 {
1282 	struct device *dev = (struct device *)data;
1283 	int error;
1284 
1285 	error = __device_suspend_late(dev, pm_transition, true);
1286 	if (error) {
1287 		dpm_save_failed_dev(dev_name(dev));
1288 		pm_dev_err(dev, pm_transition, " async", error);
1289 	}
1290 	put_device(dev);
1291 }
1292 
1293 static int device_suspend_late(struct device *dev)
1294 {
1295 	reinit_completion(&dev->power.completion);
1296 
1297 	if (is_async(dev)) {
1298 		get_device(dev);
1299 		async_schedule(async_suspend_late, dev);
1300 		return 0;
1301 	}
1302 
1303 	return __device_suspend_late(dev, pm_transition, false);
1304 }
1305 
1306 /**
1307  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1308  * @state: PM transition of the system being carried out.
1309  */
1310 int dpm_suspend_late(pm_message_t state)
1311 {
1312 	ktime_t starttime = ktime_get();
1313 	int error = 0;
1314 
1315 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1316 	mutex_lock(&dpm_list_mtx);
1317 	pm_transition = state;
1318 	async_error = 0;
1319 
1320 	while (!list_empty(&dpm_suspended_list)) {
1321 		struct device *dev = to_device(dpm_suspended_list.prev);
1322 
1323 		get_device(dev);
1324 		mutex_unlock(&dpm_list_mtx);
1325 
1326 		error = device_suspend_late(dev);
1327 
1328 		mutex_lock(&dpm_list_mtx);
1329 		if (!list_empty(&dev->power.entry))
1330 			list_move(&dev->power.entry, &dpm_late_early_list);
1331 
1332 		if (error) {
1333 			pm_dev_err(dev, state, " late", error);
1334 			dpm_save_failed_dev(dev_name(dev));
1335 			put_device(dev);
1336 			break;
1337 		}
1338 		put_device(dev);
1339 
1340 		if (async_error)
1341 			break;
1342 	}
1343 	mutex_unlock(&dpm_list_mtx);
1344 	async_synchronize_full();
1345 	if (!error)
1346 		error = async_error;
1347 	if (error) {
1348 		suspend_stats.failed_suspend_late++;
1349 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1350 		dpm_resume_early(resume_event(state));
1351 	} else {
1352 		dpm_show_time(starttime, state, "late");
1353 	}
1354 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1355 	return error;
1356 }
1357 
1358 /**
1359  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1360  * @state: PM transition of the system being carried out.
1361  */
1362 int dpm_suspend_end(pm_message_t state)
1363 {
1364 	int error = dpm_suspend_late(state);
1365 	if (error)
1366 		return error;
1367 
1368 	error = dpm_suspend_noirq(state);
1369 	if (error) {
1370 		dpm_resume_early(resume_event(state));
1371 		return error;
1372 	}
1373 
1374 	return 0;
1375 }
1376 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1377 
1378 /**
1379  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1380  * @dev: Device to suspend.
1381  * @state: PM transition of the system being carried out.
1382  * @cb: Suspend callback to execute.
1383  * @info: string description of caller.
1384  */
1385 static int legacy_suspend(struct device *dev, pm_message_t state,
1386 			  int (*cb)(struct device *dev, pm_message_t state),
1387 			  char *info)
1388 {
1389 	int error;
1390 	ktime_t calltime;
1391 
1392 	calltime = initcall_debug_start(dev);
1393 
1394 	trace_device_pm_callback_start(dev, info, state.event);
1395 	error = cb(dev, state);
1396 	trace_device_pm_callback_end(dev, error);
1397 	suspend_report_result(cb, error);
1398 
1399 	initcall_debug_report(dev, calltime, error, state, info);
1400 
1401 	return error;
1402 }
1403 
1404 static void dpm_clear_suppliers_direct_complete(struct device *dev)
1405 {
1406 	struct device_link *link;
1407 	int idx;
1408 
1409 	idx = device_links_read_lock();
1410 
1411 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1412 		spin_lock_irq(&link->supplier->power.lock);
1413 		link->supplier->power.direct_complete = false;
1414 		spin_unlock_irq(&link->supplier->power.lock);
1415 	}
1416 
1417 	device_links_read_unlock(idx);
1418 }
1419 
1420 /**
1421  * device_suspend - Execute "suspend" callbacks for given device.
1422  * @dev: Device to handle.
1423  * @state: PM transition of the system being carried out.
1424  * @async: If true, the device is being suspended asynchronously.
1425  */
1426 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1427 {
1428 	pm_callback_t callback = NULL;
1429 	char *info = NULL;
1430 	int error = 0;
1431 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1432 
1433 	TRACE_DEVICE(dev);
1434 	TRACE_SUSPEND(0);
1435 
1436 	dpm_wait_for_subordinate(dev, async);
1437 
1438 	if (async_error)
1439 		goto Complete;
1440 
1441 	/*
1442 	 * If a device configured to wake up the system from sleep states
1443 	 * has been suspended at run time and there's a resume request pending
1444 	 * for it, this is equivalent to the device signaling wakeup, so the
1445 	 * system suspend operation should be aborted.
1446 	 */
1447 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1448 		pm_wakeup_event(dev, 0);
1449 
1450 	if (pm_wakeup_pending()) {
1451 		async_error = -EBUSY;
1452 		goto Complete;
1453 	}
1454 
1455 	if (dev->power.syscore)
1456 		goto Complete;
1457 
1458 	if (dev->power.direct_complete) {
1459 		if (pm_runtime_status_suspended(dev)) {
1460 			pm_runtime_disable(dev);
1461 			if (pm_runtime_status_suspended(dev))
1462 				goto Complete;
1463 
1464 			pm_runtime_enable(dev);
1465 		}
1466 		dev->power.direct_complete = false;
1467 	}
1468 
1469 	dpm_watchdog_set(&wd, dev);
1470 	device_lock(dev);
1471 
1472 	if (dev->pm_domain) {
1473 		info = "power domain ";
1474 		callback = pm_op(&dev->pm_domain->ops, state);
1475 		goto Run;
1476 	}
1477 
1478 	if (dev->type && dev->type->pm) {
1479 		info = "type ";
1480 		callback = pm_op(dev->type->pm, state);
1481 		goto Run;
1482 	}
1483 
1484 	if (dev->class) {
1485 		if (dev->class->pm) {
1486 			info = "class ";
1487 			callback = pm_op(dev->class->pm, state);
1488 			goto Run;
1489 		} else if (dev->class->suspend) {
1490 			pm_dev_dbg(dev, state, "legacy class ");
1491 			error = legacy_suspend(dev, state, dev->class->suspend,
1492 						"legacy class ");
1493 			goto End;
1494 		}
1495 	}
1496 
1497 	if (dev->bus) {
1498 		if (dev->bus->pm) {
1499 			info = "bus ";
1500 			callback = pm_op(dev->bus->pm, state);
1501 		} else if (dev->bus->suspend) {
1502 			pm_dev_dbg(dev, state, "legacy bus ");
1503 			error = legacy_suspend(dev, state, dev->bus->suspend,
1504 						"legacy bus ");
1505 			goto End;
1506 		}
1507 	}
1508 
1509  Run:
1510 	if (!callback && dev->driver && dev->driver->pm) {
1511 		info = "driver ";
1512 		callback = pm_op(dev->driver->pm, state);
1513 	}
1514 
1515 	error = dpm_run_callback(callback, dev, state, info);
1516 
1517  End:
1518 	if (!error) {
1519 		struct device *parent = dev->parent;
1520 
1521 		dev->power.is_suspended = true;
1522 		if (parent) {
1523 			spin_lock_irq(&parent->power.lock);
1524 
1525 			dev->parent->power.direct_complete = false;
1526 			if (dev->power.wakeup_path
1527 			    && !dev->parent->power.ignore_children)
1528 				dev->parent->power.wakeup_path = true;
1529 
1530 			spin_unlock_irq(&parent->power.lock);
1531 		}
1532 		dpm_clear_suppliers_direct_complete(dev);
1533 	}
1534 
1535 	device_unlock(dev);
1536 	dpm_watchdog_clear(&wd);
1537 
1538  Complete:
1539 	if (error)
1540 		async_error = error;
1541 
1542 	complete_all(&dev->power.completion);
1543 	TRACE_SUSPEND(error);
1544 	return error;
1545 }
1546 
1547 static void async_suspend(void *data, async_cookie_t cookie)
1548 {
1549 	struct device *dev = (struct device *)data;
1550 	int error;
1551 
1552 	error = __device_suspend(dev, pm_transition, true);
1553 	if (error) {
1554 		dpm_save_failed_dev(dev_name(dev));
1555 		pm_dev_err(dev, pm_transition, " async", error);
1556 	}
1557 
1558 	put_device(dev);
1559 }
1560 
1561 static int device_suspend(struct device *dev)
1562 {
1563 	reinit_completion(&dev->power.completion);
1564 
1565 	if (is_async(dev)) {
1566 		get_device(dev);
1567 		async_schedule(async_suspend, dev);
1568 		return 0;
1569 	}
1570 
1571 	return __device_suspend(dev, pm_transition, false);
1572 }
1573 
1574 /**
1575  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1576  * @state: PM transition of the system being carried out.
1577  */
1578 int dpm_suspend(pm_message_t state)
1579 {
1580 	ktime_t starttime = ktime_get();
1581 	int error = 0;
1582 
1583 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1584 	might_sleep();
1585 
1586 	cpufreq_suspend();
1587 
1588 	mutex_lock(&dpm_list_mtx);
1589 	pm_transition = state;
1590 	async_error = 0;
1591 	while (!list_empty(&dpm_prepared_list)) {
1592 		struct device *dev = to_device(dpm_prepared_list.prev);
1593 
1594 		get_device(dev);
1595 		mutex_unlock(&dpm_list_mtx);
1596 
1597 		error = device_suspend(dev);
1598 
1599 		mutex_lock(&dpm_list_mtx);
1600 		if (error) {
1601 			pm_dev_err(dev, state, "", error);
1602 			dpm_save_failed_dev(dev_name(dev));
1603 			put_device(dev);
1604 			break;
1605 		}
1606 		if (!list_empty(&dev->power.entry))
1607 			list_move(&dev->power.entry, &dpm_suspended_list);
1608 		put_device(dev);
1609 		if (async_error)
1610 			break;
1611 	}
1612 	mutex_unlock(&dpm_list_mtx);
1613 	async_synchronize_full();
1614 	if (!error)
1615 		error = async_error;
1616 	if (error) {
1617 		suspend_stats.failed_suspend++;
1618 		dpm_save_failed_step(SUSPEND_SUSPEND);
1619 	} else
1620 		dpm_show_time(starttime, state, NULL);
1621 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1622 	return error;
1623 }
1624 
1625 /**
1626  * device_prepare - Prepare a device for system power transition.
1627  * @dev: Device to handle.
1628  * @state: PM transition of the system being carried out.
1629  *
1630  * Execute the ->prepare() callback(s) for given device.  No new children of the
1631  * device may be registered after this function has returned.
1632  */
1633 static int device_prepare(struct device *dev, pm_message_t state)
1634 {
1635 	int (*callback)(struct device *) = NULL;
1636 	int ret = 0;
1637 
1638 	if (dev->power.syscore)
1639 		return 0;
1640 
1641 	/*
1642 	 * If a device's parent goes into runtime suspend at the wrong time,
1643 	 * it won't be possible to resume the device.  To prevent this we
1644 	 * block runtime suspend here, during the prepare phase, and allow
1645 	 * it again during the complete phase.
1646 	 */
1647 	pm_runtime_get_noresume(dev);
1648 
1649 	device_lock(dev);
1650 
1651 	dev->power.wakeup_path = device_may_wakeup(dev);
1652 
1653 	if (dev->power.no_pm_callbacks) {
1654 		ret = 1;	/* Let device go direct_complete */
1655 		goto unlock;
1656 	}
1657 
1658 	if (dev->pm_domain)
1659 		callback = dev->pm_domain->ops.prepare;
1660 	else if (dev->type && dev->type->pm)
1661 		callback = dev->type->pm->prepare;
1662 	else if (dev->class && dev->class->pm)
1663 		callback = dev->class->pm->prepare;
1664 	else if (dev->bus && dev->bus->pm)
1665 		callback = dev->bus->pm->prepare;
1666 
1667 	if (!callback && dev->driver && dev->driver->pm)
1668 		callback = dev->driver->pm->prepare;
1669 
1670 	if (callback)
1671 		ret = callback(dev);
1672 
1673 unlock:
1674 	device_unlock(dev);
1675 
1676 	if (ret < 0) {
1677 		suspend_report_result(callback, ret);
1678 		pm_runtime_put(dev);
1679 		return ret;
1680 	}
1681 	/*
1682 	 * A positive return value from ->prepare() means "this device appears
1683 	 * to be runtime-suspended and its state is fine, so if it really is
1684 	 * runtime-suspended, you can leave it in that state provided that you
1685 	 * will do the same thing with all of its descendants".  This only
1686 	 * applies to suspend transitions, however.
1687 	 */
1688 	spin_lock_irq(&dev->power.lock);
1689 	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1690 	spin_unlock_irq(&dev->power.lock);
1691 	return 0;
1692 }
1693 
1694 /**
1695  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1696  * @state: PM transition of the system being carried out.
1697  *
1698  * Execute the ->prepare() callback(s) for all devices.
1699  */
1700 int dpm_prepare(pm_message_t state)
1701 {
1702 	int error = 0;
1703 
1704 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1705 	might_sleep();
1706 
1707 	/*
1708 	 * Give a chance for the known devices to complete their probes, before
1709 	 * disable probing of devices. This sync point is important at least
1710 	 * at boot time + hibernation restore.
1711 	 */
1712 	wait_for_device_probe();
1713 	/*
1714 	 * It is unsafe if probing of devices will happen during suspend or
1715 	 * hibernation and system behavior will be unpredictable in this case.
1716 	 * So, let's prohibit device's probing here and defer their probes
1717 	 * instead. The normal behavior will be restored in dpm_complete().
1718 	 */
1719 	device_block_probing();
1720 
1721 	mutex_lock(&dpm_list_mtx);
1722 	while (!list_empty(&dpm_list)) {
1723 		struct device *dev = to_device(dpm_list.next);
1724 
1725 		get_device(dev);
1726 		mutex_unlock(&dpm_list_mtx);
1727 
1728 		trace_device_pm_callback_start(dev, "", state.event);
1729 		error = device_prepare(dev, state);
1730 		trace_device_pm_callback_end(dev, error);
1731 
1732 		mutex_lock(&dpm_list_mtx);
1733 		if (error) {
1734 			if (error == -EAGAIN) {
1735 				put_device(dev);
1736 				error = 0;
1737 				continue;
1738 			}
1739 			printk(KERN_INFO "PM: Device %s not prepared "
1740 				"for power transition: code %d\n",
1741 				dev_name(dev), error);
1742 			put_device(dev);
1743 			break;
1744 		}
1745 		dev->power.is_prepared = true;
1746 		if (!list_empty(&dev->power.entry))
1747 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1748 		put_device(dev);
1749 	}
1750 	mutex_unlock(&dpm_list_mtx);
1751 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1752 	return error;
1753 }
1754 
1755 /**
1756  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1757  * @state: PM transition of the system being carried out.
1758  *
1759  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1760  * callbacks for them.
1761  */
1762 int dpm_suspend_start(pm_message_t state)
1763 {
1764 	int error;
1765 
1766 	error = dpm_prepare(state);
1767 	if (error) {
1768 		suspend_stats.failed_prepare++;
1769 		dpm_save_failed_step(SUSPEND_PREPARE);
1770 	} else
1771 		error = dpm_suspend(state);
1772 	return error;
1773 }
1774 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1775 
1776 void __suspend_report_result(const char *function, void *fn, int ret)
1777 {
1778 	if (ret)
1779 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1780 }
1781 EXPORT_SYMBOL_GPL(__suspend_report_result);
1782 
1783 /**
1784  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1785  * @dev: Device to wait for.
1786  * @subordinate: Device that needs to wait for @dev.
1787  */
1788 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1789 {
1790 	dpm_wait(dev, subordinate->power.async_suspend);
1791 	return async_error;
1792 }
1793 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1794 
1795 /**
1796  * dpm_for_each_dev - device iterator.
1797  * @data: data for the callback.
1798  * @fn: function to be called for each device.
1799  *
1800  * Iterate over devices in dpm_list, and call @fn for each device,
1801  * passing it @data.
1802  */
1803 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1804 {
1805 	struct device *dev;
1806 
1807 	if (!fn)
1808 		return;
1809 
1810 	device_pm_lock();
1811 	list_for_each_entry(dev, &dpm_list, power.entry)
1812 		fn(dev, data);
1813 	device_pm_unlock();
1814 }
1815 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1816 
1817 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1818 {
1819 	if (!ops)
1820 		return true;
1821 
1822 	return !ops->prepare &&
1823 	       !ops->suspend &&
1824 	       !ops->suspend_late &&
1825 	       !ops->suspend_noirq &&
1826 	       !ops->resume_noirq &&
1827 	       !ops->resume_early &&
1828 	       !ops->resume &&
1829 	       !ops->complete;
1830 }
1831 
1832 void device_pm_check_callbacks(struct device *dev)
1833 {
1834 	spin_lock_irq(&dev->power.lock);
1835 	dev->power.no_pm_callbacks =
1836 		(!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1837 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1838 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1839 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1840 		(!dev->driver || pm_ops_is_empty(dev->driver->pm));
1841 	spin_unlock_irq(&dev->power.lock);
1842 }
1843