xref: /openbmc/linux/drivers/base/power/main.c (revision f7777dcc)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpuidle.h>
33 #include "../base.h"
34 #include "power.h"
35 
36 typedef int (*pm_callback_t)(struct device *);
37 
38 /*
39  * The entries in the dpm_list list are in a depth first order, simply
40  * because children are guaranteed to be discovered after parents, and
41  * are inserted at the back of the list on discovery.
42  *
43  * Since device_pm_add() may be called with a device lock held,
44  * we must never try to acquire a device lock while holding
45  * dpm_list_mutex.
46  */
47 
48 LIST_HEAD(dpm_list);
49 static LIST_HEAD(dpm_prepared_list);
50 static LIST_HEAD(dpm_suspended_list);
51 static LIST_HEAD(dpm_late_early_list);
52 static LIST_HEAD(dpm_noirq_list);
53 
54 struct suspend_stats suspend_stats;
55 static DEFINE_MUTEX(dpm_list_mtx);
56 static pm_message_t pm_transition;
57 
58 static int async_error;
59 
60 static char *pm_verb(int event)
61 {
62 	switch (event) {
63 	case PM_EVENT_SUSPEND:
64 		return "suspend";
65 	case PM_EVENT_RESUME:
66 		return "resume";
67 	case PM_EVENT_FREEZE:
68 		return "freeze";
69 	case PM_EVENT_QUIESCE:
70 		return "quiesce";
71 	case PM_EVENT_HIBERNATE:
72 		return "hibernate";
73 	case PM_EVENT_THAW:
74 		return "thaw";
75 	case PM_EVENT_RESTORE:
76 		return "restore";
77 	case PM_EVENT_RECOVER:
78 		return "recover";
79 	default:
80 		return "(unknown PM event)";
81 	}
82 }
83 
84 /**
85  * device_pm_sleep_init - Initialize system suspend-related device fields.
86  * @dev: Device object being initialized.
87  */
88 void device_pm_sleep_init(struct device *dev)
89 {
90 	dev->power.is_prepared = false;
91 	dev->power.is_suspended = false;
92 	init_completion(&dev->power.completion);
93 	complete_all(&dev->power.completion);
94 	dev->power.wakeup = NULL;
95 	INIT_LIST_HEAD(&dev->power.entry);
96 }
97 
98 /**
99  * device_pm_lock - Lock the list of active devices used by the PM core.
100  */
101 void device_pm_lock(void)
102 {
103 	mutex_lock(&dpm_list_mtx);
104 }
105 
106 /**
107  * device_pm_unlock - Unlock the list of active devices used by the PM core.
108  */
109 void device_pm_unlock(void)
110 {
111 	mutex_unlock(&dpm_list_mtx);
112 }
113 
114 /**
115  * device_pm_add - Add a device to the PM core's list of active devices.
116  * @dev: Device to add to the list.
117  */
118 void device_pm_add(struct device *dev)
119 {
120 	pr_debug("PM: Adding info for %s:%s\n",
121 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
122 	mutex_lock(&dpm_list_mtx);
123 	if (dev->parent && dev->parent->power.is_prepared)
124 		dev_warn(dev, "parent %s should not be sleeping\n",
125 			dev_name(dev->parent));
126 	list_add_tail(&dev->power.entry, &dpm_list);
127 	mutex_unlock(&dpm_list_mtx);
128 }
129 
130 /**
131  * device_pm_remove - Remove a device from the PM core's list of active devices.
132  * @dev: Device to be removed from the list.
133  */
134 void device_pm_remove(struct device *dev)
135 {
136 	pr_debug("PM: Removing info for %s:%s\n",
137 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138 	complete_all(&dev->power.completion);
139 	mutex_lock(&dpm_list_mtx);
140 	list_del_init(&dev->power.entry);
141 	mutex_unlock(&dpm_list_mtx);
142 	device_wakeup_disable(dev);
143 	pm_runtime_remove(dev);
144 }
145 
146 /**
147  * device_pm_move_before - Move device in the PM core's list of active devices.
148  * @deva: Device to move in dpm_list.
149  * @devb: Device @deva should come before.
150  */
151 void device_pm_move_before(struct device *deva, struct device *devb)
152 {
153 	pr_debug("PM: Moving %s:%s before %s:%s\n",
154 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
155 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
156 	/* Delete deva from dpm_list and reinsert before devb. */
157 	list_move_tail(&deva->power.entry, &devb->power.entry);
158 }
159 
160 /**
161  * device_pm_move_after - Move device in the PM core's list of active devices.
162  * @deva: Device to move in dpm_list.
163  * @devb: Device @deva should come after.
164  */
165 void device_pm_move_after(struct device *deva, struct device *devb)
166 {
167 	pr_debug("PM: Moving %s:%s after %s:%s\n",
168 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
169 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
170 	/* Delete deva from dpm_list and reinsert after devb. */
171 	list_move(&deva->power.entry, &devb->power.entry);
172 }
173 
174 /**
175  * device_pm_move_last - Move device to end of the PM core's list of devices.
176  * @dev: Device to move in dpm_list.
177  */
178 void device_pm_move_last(struct device *dev)
179 {
180 	pr_debug("PM: Moving %s:%s to end of list\n",
181 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
182 	list_move_tail(&dev->power.entry, &dpm_list);
183 }
184 
185 static ktime_t initcall_debug_start(struct device *dev)
186 {
187 	ktime_t calltime = ktime_set(0, 0);
188 
189 	if (pm_print_times_enabled) {
190 		pr_info("calling  %s+ @ %i, parent: %s\n",
191 			dev_name(dev), task_pid_nr(current),
192 			dev->parent ? dev_name(dev->parent) : "none");
193 		calltime = ktime_get();
194 	}
195 
196 	return calltime;
197 }
198 
199 static void initcall_debug_report(struct device *dev, ktime_t calltime,
200 				  int error, pm_message_t state, char *info)
201 {
202 	ktime_t rettime;
203 	s64 nsecs;
204 
205 	rettime = ktime_get();
206 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
207 
208 	if (pm_print_times_enabled) {
209 		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
210 			error, (unsigned long long)nsecs >> 10);
211 	}
212 
213 	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
214 				    error);
215 }
216 
217 /**
218  * dpm_wait - Wait for a PM operation to complete.
219  * @dev: Device to wait for.
220  * @async: If unset, wait only if the device's power.async_suspend flag is set.
221  */
222 static void dpm_wait(struct device *dev, bool async)
223 {
224 	if (!dev)
225 		return;
226 
227 	if (async || (pm_async_enabled && dev->power.async_suspend))
228 		wait_for_completion(&dev->power.completion);
229 }
230 
231 static int dpm_wait_fn(struct device *dev, void *async_ptr)
232 {
233 	dpm_wait(dev, *((bool *)async_ptr));
234 	return 0;
235 }
236 
237 static void dpm_wait_for_children(struct device *dev, bool async)
238 {
239        device_for_each_child(dev, &async, dpm_wait_fn);
240 }
241 
242 /**
243  * pm_op - Return the PM operation appropriate for given PM event.
244  * @ops: PM operations to choose from.
245  * @state: PM transition of the system being carried out.
246  */
247 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
248 {
249 	switch (state.event) {
250 #ifdef CONFIG_SUSPEND
251 	case PM_EVENT_SUSPEND:
252 		return ops->suspend;
253 	case PM_EVENT_RESUME:
254 		return ops->resume;
255 #endif /* CONFIG_SUSPEND */
256 #ifdef CONFIG_HIBERNATE_CALLBACKS
257 	case PM_EVENT_FREEZE:
258 	case PM_EVENT_QUIESCE:
259 		return ops->freeze;
260 	case PM_EVENT_HIBERNATE:
261 		return ops->poweroff;
262 	case PM_EVENT_THAW:
263 	case PM_EVENT_RECOVER:
264 		return ops->thaw;
265 		break;
266 	case PM_EVENT_RESTORE:
267 		return ops->restore;
268 #endif /* CONFIG_HIBERNATE_CALLBACKS */
269 	}
270 
271 	return NULL;
272 }
273 
274 /**
275  * pm_late_early_op - Return the PM operation appropriate for given PM event.
276  * @ops: PM operations to choose from.
277  * @state: PM transition of the system being carried out.
278  *
279  * Runtime PM is disabled for @dev while this function is being executed.
280  */
281 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
282 				      pm_message_t state)
283 {
284 	switch (state.event) {
285 #ifdef CONFIG_SUSPEND
286 	case PM_EVENT_SUSPEND:
287 		return ops->suspend_late;
288 	case PM_EVENT_RESUME:
289 		return ops->resume_early;
290 #endif /* CONFIG_SUSPEND */
291 #ifdef CONFIG_HIBERNATE_CALLBACKS
292 	case PM_EVENT_FREEZE:
293 	case PM_EVENT_QUIESCE:
294 		return ops->freeze_late;
295 	case PM_EVENT_HIBERNATE:
296 		return ops->poweroff_late;
297 	case PM_EVENT_THAW:
298 	case PM_EVENT_RECOVER:
299 		return ops->thaw_early;
300 	case PM_EVENT_RESTORE:
301 		return ops->restore_early;
302 #endif /* CONFIG_HIBERNATE_CALLBACKS */
303 	}
304 
305 	return NULL;
306 }
307 
308 /**
309  * pm_noirq_op - Return the PM operation appropriate for given PM event.
310  * @ops: PM operations to choose from.
311  * @state: PM transition of the system being carried out.
312  *
313  * The driver of @dev will not receive interrupts while this function is being
314  * executed.
315  */
316 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
317 {
318 	switch (state.event) {
319 #ifdef CONFIG_SUSPEND
320 	case PM_EVENT_SUSPEND:
321 		return ops->suspend_noirq;
322 	case PM_EVENT_RESUME:
323 		return ops->resume_noirq;
324 #endif /* CONFIG_SUSPEND */
325 #ifdef CONFIG_HIBERNATE_CALLBACKS
326 	case PM_EVENT_FREEZE:
327 	case PM_EVENT_QUIESCE:
328 		return ops->freeze_noirq;
329 	case PM_EVENT_HIBERNATE:
330 		return ops->poweroff_noirq;
331 	case PM_EVENT_THAW:
332 	case PM_EVENT_RECOVER:
333 		return ops->thaw_noirq;
334 	case PM_EVENT_RESTORE:
335 		return ops->restore_noirq;
336 #endif /* CONFIG_HIBERNATE_CALLBACKS */
337 	}
338 
339 	return NULL;
340 }
341 
342 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
343 {
344 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
345 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
346 		", may wakeup" : "");
347 }
348 
349 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
350 			int error)
351 {
352 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
353 		dev_name(dev), pm_verb(state.event), info, error);
354 }
355 
356 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
357 {
358 	ktime_t calltime;
359 	u64 usecs64;
360 	int usecs;
361 
362 	calltime = ktime_get();
363 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
364 	do_div(usecs64, NSEC_PER_USEC);
365 	usecs = usecs64;
366 	if (usecs == 0)
367 		usecs = 1;
368 	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
369 		info ?: "", info ? " " : "", pm_verb(state.event),
370 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
371 }
372 
373 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
374 			    pm_message_t state, char *info)
375 {
376 	ktime_t calltime;
377 	int error;
378 
379 	if (!cb)
380 		return 0;
381 
382 	calltime = initcall_debug_start(dev);
383 
384 	pm_dev_dbg(dev, state, info);
385 	error = cb(dev);
386 	suspend_report_result(cb, error);
387 
388 	initcall_debug_report(dev, calltime, error, state, info);
389 
390 	return error;
391 }
392 
393 /*------------------------- Resume routines -------------------------*/
394 
395 /**
396  * device_resume_noirq - Execute an "early resume" callback for given device.
397  * @dev: Device to handle.
398  * @state: PM transition of the system being carried out.
399  *
400  * The driver of @dev will not receive interrupts while this function is being
401  * executed.
402  */
403 static int device_resume_noirq(struct device *dev, pm_message_t state)
404 {
405 	pm_callback_t callback = NULL;
406 	char *info = NULL;
407 	int error = 0;
408 
409 	TRACE_DEVICE(dev);
410 	TRACE_RESUME(0);
411 
412 	if (dev->power.syscore)
413 		goto Out;
414 
415 	if (dev->pm_domain) {
416 		info = "noirq power domain ";
417 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
418 	} else if (dev->type && dev->type->pm) {
419 		info = "noirq type ";
420 		callback = pm_noirq_op(dev->type->pm, state);
421 	} else if (dev->class && dev->class->pm) {
422 		info = "noirq class ";
423 		callback = pm_noirq_op(dev->class->pm, state);
424 	} else if (dev->bus && dev->bus->pm) {
425 		info = "noirq bus ";
426 		callback = pm_noirq_op(dev->bus->pm, state);
427 	}
428 
429 	if (!callback && dev->driver && dev->driver->pm) {
430 		info = "noirq driver ";
431 		callback = pm_noirq_op(dev->driver->pm, state);
432 	}
433 
434 	error = dpm_run_callback(callback, dev, state, info);
435 
436  Out:
437 	TRACE_RESUME(error);
438 	return error;
439 }
440 
441 /**
442  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
443  * @state: PM transition of the system being carried out.
444  *
445  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
446  * enable device drivers to receive interrupts.
447  */
448 static void dpm_resume_noirq(pm_message_t state)
449 {
450 	ktime_t starttime = ktime_get();
451 
452 	mutex_lock(&dpm_list_mtx);
453 	while (!list_empty(&dpm_noirq_list)) {
454 		struct device *dev = to_device(dpm_noirq_list.next);
455 		int error;
456 
457 		get_device(dev);
458 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
459 		mutex_unlock(&dpm_list_mtx);
460 
461 		error = device_resume_noirq(dev, state);
462 		if (error) {
463 			suspend_stats.failed_resume_noirq++;
464 			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
465 			dpm_save_failed_dev(dev_name(dev));
466 			pm_dev_err(dev, state, " noirq", error);
467 		}
468 
469 		mutex_lock(&dpm_list_mtx);
470 		put_device(dev);
471 	}
472 	mutex_unlock(&dpm_list_mtx);
473 	dpm_show_time(starttime, state, "noirq");
474 	resume_device_irqs();
475 	cpuidle_resume();
476 }
477 
478 /**
479  * device_resume_early - Execute an "early resume" callback for given device.
480  * @dev: Device to handle.
481  * @state: PM transition of the system being carried out.
482  *
483  * Runtime PM is disabled for @dev while this function is being executed.
484  */
485 static int device_resume_early(struct device *dev, pm_message_t state)
486 {
487 	pm_callback_t callback = NULL;
488 	char *info = NULL;
489 	int error = 0;
490 
491 	TRACE_DEVICE(dev);
492 	TRACE_RESUME(0);
493 
494 	if (dev->power.syscore)
495 		goto Out;
496 
497 	if (dev->pm_domain) {
498 		info = "early power domain ";
499 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
500 	} else if (dev->type && dev->type->pm) {
501 		info = "early type ";
502 		callback = pm_late_early_op(dev->type->pm, state);
503 	} else if (dev->class && dev->class->pm) {
504 		info = "early class ";
505 		callback = pm_late_early_op(dev->class->pm, state);
506 	} else if (dev->bus && dev->bus->pm) {
507 		info = "early bus ";
508 		callback = pm_late_early_op(dev->bus->pm, state);
509 	}
510 
511 	if (!callback && dev->driver && dev->driver->pm) {
512 		info = "early driver ";
513 		callback = pm_late_early_op(dev->driver->pm, state);
514 	}
515 
516 	error = dpm_run_callback(callback, dev, state, info);
517 
518  Out:
519 	TRACE_RESUME(error);
520 
521 	pm_runtime_enable(dev);
522 	return error;
523 }
524 
525 /**
526  * dpm_resume_early - Execute "early resume" callbacks for all devices.
527  * @state: PM transition of the system being carried out.
528  */
529 static void dpm_resume_early(pm_message_t state)
530 {
531 	ktime_t starttime = ktime_get();
532 
533 	mutex_lock(&dpm_list_mtx);
534 	while (!list_empty(&dpm_late_early_list)) {
535 		struct device *dev = to_device(dpm_late_early_list.next);
536 		int error;
537 
538 		get_device(dev);
539 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
540 		mutex_unlock(&dpm_list_mtx);
541 
542 		error = device_resume_early(dev, state);
543 		if (error) {
544 			suspend_stats.failed_resume_early++;
545 			dpm_save_failed_step(SUSPEND_RESUME_EARLY);
546 			dpm_save_failed_dev(dev_name(dev));
547 			pm_dev_err(dev, state, " early", error);
548 		}
549 
550 		mutex_lock(&dpm_list_mtx);
551 		put_device(dev);
552 	}
553 	mutex_unlock(&dpm_list_mtx);
554 	dpm_show_time(starttime, state, "early");
555 }
556 
557 /**
558  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
559  * @state: PM transition of the system being carried out.
560  */
561 void dpm_resume_start(pm_message_t state)
562 {
563 	dpm_resume_noirq(state);
564 	dpm_resume_early(state);
565 }
566 EXPORT_SYMBOL_GPL(dpm_resume_start);
567 
568 /**
569  * device_resume - Execute "resume" callbacks for given device.
570  * @dev: Device to handle.
571  * @state: PM transition of the system being carried out.
572  * @async: If true, the device is being resumed asynchronously.
573  */
574 static int device_resume(struct device *dev, pm_message_t state, bool async)
575 {
576 	pm_callback_t callback = NULL;
577 	char *info = NULL;
578 	int error = 0;
579 
580 	TRACE_DEVICE(dev);
581 	TRACE_RESUME(0);
582 
583 	if (dev->power.syscore)
584 		goto Complete;
585 
586 	dpm_wait(dev->parent, async);
587 	device_lock(dev);
588 
589 	/*
590 	 * This is a fib.  But we'll allow new children to be added below
591 	 * a resumed device, even if the device hasn't been completed yet.
592 	 */
593 	dev->power.is_prepared = false;
594 
595 	if (!dev->power.is_suspended)
596 		goto Unlock;
597 
598 	if (dev->pm_domain) {
599 		info = "power domain ";
600 		callback = pm_op(&dev->pm_domain->ops, state);
601 		goto Driver;
602 	}
603 
604 	if (dev->type && dev->type->pm) {
605 		info = "type ";
606 		callback = pm_op(dev->type->pm, state);
607 		goto Driver;
608 	}
609 
610 	if (dev->class) {
611 		if (dev->class->pm) {
612 			info = "class ";
613 			callback = pm_op(dev->class->pm, state);
614 			goto Driver;
615 		} else if (dev->class->resume) {
616 			info = "legacy class ";
617 			callback = dev->class->resume;
618 			goto End;
619 		}
620 	}
621 
622 	if (dev->bus) {
623 		if (dev->bus->pm) {
624 			info = "bus ";
625 			callback = pm_op(dev->bus->pm, state);
626 		} else if (dev->bus->resume) {
627 			info = "legacy bus ";
628 			callback = dev->bus->resume;
629 			goto End;
630 		}
631 	}
632 
633  Driver:
634 	if (!callback && dev->driver && dev->driver->pm) {
635 		info = "driver ";
636 		callback = pm_op(dev->driver->pm, state);
637 	}
638 
639  End:
640 	error = dpm_run_callback(callback, dev, state, info);
641 	dev->power.is_suspended = false;
642 
643  Unlock:
644 	device_unlock(dev);
645 
646  Complete:
647 	complete_all(&dev->power.completion);
648 
649 	TRACE_RESUME(error);
650 
651 	return error;
652 }
653 
654 static void async_resume(void *data, async_cookie_t cookie)
655 {
656 	struct device *dev = (struct device *)data;
657 	int error;
658 
659 	error = device_resume(dev, pm_transition, true);
660 	if (error)
661 		pm_dev_err(dev, pm_transition, " async", error);
662 	put_device(dev);
663 }
664 
665 static bool is_async(struct device *dev)
666 {
667 	return dev->power.async_suspend && pm_async_enabled
668 		&& !pm_trace_is_enabled();
669 }
670 
671 /**
672  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
673  * @state: PM transition of the system being carried out.
674  *
675  * Execute the appropriate "resume" callback for all devices whose status
676  * indicates that they are suspended.
677  */
678 void dpm_resume(pm_message_t state)
679 {
680 	struct device *dev;
681 	ktime_t starttime = ktime_get();
682 
683 	might_sleep();
684 
685 	mutex_lock(&dpm_list_mtx);
686 	pm_transition = state;
687 	async_error = 0;
688 
689 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
690 		INIT_COMPLETION(dev->power.completion);
691 		if (is_async(dev)) {
692 			get_device(dev);
693 			async_schedule(async_resume, dev);
694 		}
695 	}
696 
697 	while (!list_empty(&dpm_suspended_list)) {
698 		dev = to_device(dpm_suspended_list.next);
699 		get_device(dev);
700 		if (!is_async(dev)) {
701 			int error;
702 
703 			mutex_unlock(&dpm_list_mtx);
704 
705 			error = device_resume(dev, state, false);
706 			if (error) {
707 				suspend_stats.failed_resume++;
708 				dpm_save_failed_step(SUSPEND_RESUME);
709 				dpm_save_failed_dev(dev_name(dev));
710 				pm_dev_err(dev, state, "", error);
711 			}
712 
713 			mutex_lock(&dpm_list_mtx);
714 		}
715 		if (!list_empty(&dev->power.entry))
716 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
717 		put_device(dev);
718 	}
719 	mutex_unlock(&dpm_list_mtx);
720 	async_synchronize_full();
721 	dpm_show_time(starttime, state, NULL);
722 }
723 
724 /**
725  * device_complete - Complete a PM transition for given device.
726  * @dev: Device to handle.
727  * @state: PM transition of the system being carried out.
728  */
729 static void device_complete(struct device *dev, pm_message_t state)
730 {
731 	void (*callback)(struct device *) = NULL;
732 	char *info = NULL;
733 
734 	if (dev->power.syscore)
735 		return;
736 
737 	device_lock(dev);
738 
739 	if (dev->pm_domain) {
740 		info = "completing power domain ";
741 		callback = dev->pm_domain->ops.complete;
742 	} else if (dev->type && dev->type->pm) {
743 		info = "completing type ";
744 		callback = dev->type->pm->complete;
745 	} else if (dev->class && dev->class->pm) {
746 		info = "completing class ";
747 		callback = dev->class->pm->complete;
748 	} else if (dev->bus && dev->bus->pm) {
749 		info = "completing bus ";
750 		callback = dev->bus->pm->complete;
751 	}
752 
753 	if (!callback && dev->driver && dev->driver->pm) {
754 		info = "completing driver ";
755 		callback = dev->driver->pm->complete;
756 	}
757 
758 	if (callback) {
759 		pm_dev_dbg(dev, state, info);
760 		callback(dev);
761 	}
762 
763 	device_unlock(dev);
764 
765 	pm_runtime_put(dev);
766 }
767 
768 /**
769  * dpm_complete - Complete a PM transition for all non-sysdev devices.
770  * @state: PM transition of the system being carried out.
771  *
772  * Execute the ->complete() callbacks for all devices whose PM status is not
773  * DPM_ON (this allows new devices to be registered).
774  */
775 void dpm_complete(pm_message_t state)
776 {
777 	struct list_head list;
778 
779 	might_sleep();
780 
781 	INIT_LIST_HEAD(&list);
782 	mutex_lock(&dpm_list_mtx);
783 	while (!list_empty(&dpm_prepared_list)) {
784 		struct device *dev = to_device(dpm_prepared_list.prev);
785 
786 		get_device(dev);
787 		dev->power.is_prepared = false;
788 		list_move(&dev->power.entry, &list);
789 		mutex_unlock(&dpm_list_mtx);
790 
791 		device_complete(dev, state);
792 
793 		mutex_lock(&dpm_list_mtx);
794 		put_device(dev);
795 	}
796 	list_splice(&list, &dpm_list);
797 	mutex_unlock(&dpm_list_mtx);
798 }
799 
800 /**
801  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
802  * @state: PM transition of the system being carried out.
803  *
804  * Execute "resume" callbacks for all devices and complete the PM transition of
805  * the system.
806  */
807 void dpm_resume_end(pm_message_t state)
808 {
809 	dpm_resume(state);
810 	dpm_complete(state);
811 }
812 EXPORT_SYMBOL_GPL(dpm_resume_end);
813 
814 
815 /*------------------------- Suspend routines -------------------------*/
816 
817 /**
818  * resume_event - Return a "resume" message for given "suspend" sleep state.
819  * @sleep_state: PM message representing a sleep state.
820  *
821  * Return a PM message representing the resume event corresponding to given
822  * sleep state.
823  */
824 static pm_message_t resume_event(pm_message_t sleep_state)
825 {
826 	switch (sleep_state.event) {
827 	case PM_EVENT_SUSPEND:
828 		return PMSG_RESUME;
829 	case PM_EVENT_FREEZE:
830 	case PM_EVENT_QUIESCE:
831 		return PMSG_RECOVER;
832 	case PM_EVENT_HIBERNATE:
833 		return PMSG_RESTORE;
834 	}
835 	return PMSG_ON;
836 }
837 
838 /**
839  * device_suspend_noirq - Execute a "late suspend" callback for given device.
840  * @dev: Device to handle.
841  * @state: PM transition of the system being carried out.
842  *
843  * The driver of @dev will not receive interrupts while this function is being
844  * executed.
845  */
846 static int device_suspend_noirq(struct device *dev, pm_message_t state)
847 {
848 	pm_callback_t callback = NULL;
849 	char *info = NULL;
850 
851 	if (dev->power.syscore)
852 		return 0;
853 
854 	if (dev->pm_domain) {
855 		info = "noirq power domain ";
856 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
857 	} else if (dev->type && dev->type->pm) {
858 		info = "noirq type ";
859 		callback = pm_noirq_op(dev->type->pm, state);
860 	} else if (dev->class && dev->class->pm) {
861 		info = "noirq class ";
862 		callback = pm_noirq_op(dev->class->pm, state);
863 	} else if (dev->bus && dev->bus->pm) {
864 		info = "noirq bus ";
865 		callback = pm_noirq_op(dev->bus->pm, state);
866 	}
867 
868 	if (!callback && dev->driver && dev->driver->pm) {
869 		info = "noirq driver ";
870 		callback = pm_noirq_op(dev->driver->pm, state);
871 	}
872 
873 	return dpm_run_callback(callback, dev, state, info);
874 }
875 
876 /**
877  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
878  * @state: PM transition of the system being carried out.
879  *
880  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
881  * handlers for all non-sysdev devices.
882  */
883 static int dpm_suspend_noirq(pm_message_t state)
884 {
885 	ktime_t starttime = ktime_get();
886 	int error = 0;
887 
888 	cpuidle_pause();
889 	suspend_device_irqs();
890 	mutex_lock(&dpm_list_mtx);
891 	while (!list_empty(&dpm_late_early_list)) {
892 		struct device *dev = to_device(dpm_late_early_list.prev);
893 
894 		get_device(dev);
895 		mutex_unlock(&dpm_list_mtx);
896 
897 		error = device_suspend_noirq(dev, state);
898 
899 		mutex_lock(&dpm_list_mtx);
900 		if (error) {
901 			pm_dev_err(dev, state, " noirq", error);
902 			suspend_stats.failed_suspend_noirq++;
903 			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
904 			dpm_save_failed_dev(dev_name(dev));
905 			put_device(dev);
906 			break;
907 		}
908 		if (!list_empty(&dev->power.entry))
909 			list_move(&dev->power.entry, &dpm_noirq_list);
910 		put_device(dev);
911 
912 		if (pm_wakeup_pending()) {
913 			error = -EBUSY;
914 			break;
915 		}
916 	}
917 	mutex_unlock(&dpm_list_mtx);
918 	if (error)
919 		dpm_resume_noirq(resume_event(state));
920 	else
921 		dpm_show_time(starttime, state, "noirq");
922 	return error;
923 }
924 
925 /**
926  * device_suspend_late - Execute a "late suspend" callback for given device.
927  * @dev: Device to handle.
928  * @state: PM transition of the system being carried out.
929  *
930  * Runtime PM is disabled for @dev while this function is being executed.
931  */
932 static int device_suspend_late(struct device *dev, pm_message_t state)
933 {
934 	pm_callback_t callback = NULL;
935 	char *info = NULL;
936 
937 	__pm_runtime_disable(dev, false);
938 
939 	if (dev->power.syscore)
940 		return 0;
941 
942 	if (dev->pm_domain) {
943 		info = "late power domain ";
944 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
945 	} else if (dev->type && dev->type->pm) {
946 		info = "late type ";
947 		callback = pm_late_early_op(dev->type->pm, state);
948 	} else if (dev->class && dev->class->pm) {
949 		info = "late class ";
950 		callback = pm_late_early_op(dev->class->pm, state);
951 	} else if (dev->bus && dev->bus->pm) {
952 		info = "late bus ";
953 		callback = pm_late_early_op(dev->bus->pm, state);
954 	}
955 
956 	if (!callback && dev->driver && dev->driver->pm) {
957 		info = "late driver ";
958 		callback = pm_late_early_op(dev->driver->pm, state);
959 	}
960 
961 	return dpm_run_callback(callback, dev, state, info);
962 }
963 
964 /**
965  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
966  * @state: PM transition of the system being carried out.
967  */
968 static int dpm_suspend_late(pm_message_t state)
969 {
970 	ktime_t starttime = ktime_get();
971 	int error = 0;
972 
973 	mutex_lock(&dpm_list_mtx);
974 	while (!list_empty(&dpm_suspended_list)) {
975 		struct device *dev = to_device(dpm_suspended_list.prev);
976 
977 		get_device(dev);
978 		mutex_unlock(&dpm_list_mtx);
979 
980 		error = device_suspend_late(dev, state);
981 
982 		mutex_lock(&dpm_list_mtx);
983 		if (error) {
984 			pm_dev_err(dev, state, " late", error);
985 			suspend_stats.failed_suspend_late++;
986 			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
987 			dpm_save_failed_dev(dev_name(dev));
988 			put_device(dev);
989 			break;
990 		}
991 		if (!list_empty(&dev->power.entry))
992 			list_move(&dev->power.entry, &dpm_late_early_list);
993 		put_device(dev);
994 
995 		if (pm_wakeup_pending()) {
996 			error = -EBUSY;
997 			break;
998 		}
999 	}
1000 	mutex_unlock(&dpm_list_mtx);
1001 	if (error)
1002 		dpm_resume_early(resume_event(state));
1003 	else
1004 		dpm_show_time(starttime, state, "late");
1005 
1006 	return error;
1007 }
1008 
1009 /**
1010  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1011  * @state: PM transition of the system being carried out.
1012  */
1013 int dpm_suspend_end(pm_message_t state)
1014 {
1015 	int error = dpm_suspend_late(state);
1016 	if (error)
1017 		return error;
1018 
1019 	error = dpm_suspend_noirq(state);
1020 	if (error) {
1021 		dpm_resume_early(resume_event(state));
1022 		return error;
1023 	}
1024 
1025 	return 0;
1026 }
1027 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1028 
1029 /**
1030  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1031  * @dev: Device to suspend.
1032  * @state: PM transition of the system being carried out.
1033  * @cb: Suspend callback to execute.
1034  */
1035 static int legacy_suspend(struct device *dev, pm_message_t state,
1036 			  int (*cb)(struct device *dev, pm_message_t state),
1037 			  char *info)
1038 {
1039 	int error;
1040 	ktime_t calltime;
1041 
1042 	calltime = initcall_debug_start(dev);
1043 
1044 	error = cb(dev, state);
1045 	suspend_report_result(cb, error);
1046 
1047 	initcall_debug_report(dev, calltime, error, state, info);
1048 
1049 	return error;
1050 }
1051 
1052 /**
1053  * device_suspend - Execute "suspend" callbacks for given device.
1054  * @dev: Device to handle.
1055  * @state: PM transition of the system being carried out.
1056  * @async: If true, the device is being suspended asynchronously.
1057  */
1058 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1059 {
1060 	pm_callback_t callback = NULL;
1061 	char *info = NULL;
1062 	int error = 0;
1063 
1064 	dpm_wait_for_children(dev, async);
1065 
1066 	if (async_error)
1067 		goto Complete;
1068 
1069 	/*
1070 	 * If a device configured to wake up the system from sleep states
1071 	 * has been suspended at run time and there's a resume request pending
1072 	 * for it, this is equivalent to the device signaling wakeup, so the
1073 	 * system suspend operation should be aborted.
1074 	 */
1075 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1076 		pm_wakeup_event(dev, 0);
1077 
1078 	if (pm_wakeup_pending()) {
1079 		async_error = -EBUSY;
1080 		goto Complete;
1081 	}
1082 
1083 	if (dev->power.syscore)
1084 		goto Complete;
1085 
1086 	device_lock(dev);
1087 
1088 	if (dev->pm_domain) {
1089 		info = "power domain ";
1090 		callback = pm_op(&dev->pm_domain->ops, state);
1091 		goto Run;
1092 	}
1093 
1094 	if (dev->type && dev->type->pm) {
1095 		info = "type ";
1096 		callback = pm_op(dev->type->pm, state);
1097 		goto Run;
1098 	}
1099 
1100 	if (dev->class) {
1101 		if (dev->class->pm) {
1102 			info = "class ";
1103 			callback = pm_op(dev->class->pm, state);
1104 			goto Run;
1105 		} else if (dev->class->suspend) {
1106 			pm_dev_dbg(dev, state, "legacy class ");
1107 			error = legacy_suspend(dev, state, dev->class->suspend,
1108 						"legacy class ");
1109 			goto End;
1110 		}
1111 	}
1112 
1113 	if (dev->bus) {
1114 		if (dev->bus->pm) {
1115 			info = "bus ";
1116 			callback = pm_op(dev->bus->pm, state);
1117 		} else if (dev->bus->suspend) {
1118 			pm_dev_dbg(dev, state, "legacy bus ");
1119 			error = legacy_suspend(dev, state, dev->bus->suspend,
1120 						"legacy bus ");
1121 			goto End;
1122 		}
1123 	}
1124 
1125  Run:
1126 	if (!callback && dev->driver && dev->driver->pm) {
1127 		info = "driver ";
1128 		callback = pm_op(dev->driver->pm, state);
1129 	}
1130 
1131 	error = dpm_run_callback(callback, dev, state, info);
1132 
1133  End:
1134 	if (!error) {
1135 		dev->power.is_suspended = true;
1136 		if (dev->power.wakeup_path
1137 		    && dev->parent && !dev->parent->power.ignore_children)
1138 			dev->parent->power.wakeup_path = true;
1139 	}
1140 
1141 	device_unlock(dev);
1142 
1143  Complete:
1144 	complete_all(&dev->power.completion);
1145 	if (error)
1146 		async_error = error;
1147 
1148 	return error;
1149 }
1150 
1151 static void async_suspend(void *data, async_cookie_t cookie)
1152 {
1153 	struct device *dev = (struct device *)data;
1154 	int error;
1155 
1156 	error = __device_suspend(dev, pm_transition, true);
1157 	if (error) {
1158 		dpm_save_failed_dev(dev_name(dev));
1159 		pm_dev_err(dev, pm_transition, " async", error);
1160 	}
1161 
1162 	put_device(dev);
1163 }
1164 
1165 static int device_suspend(struct device *dev)
1166 {
1167 	INIT_COMPLETION(dev->power.completion);
1168 
1169 	if (pm_async_enabled && dev->power.async_suspend) {
1170 		get_device(dev);
1171 		async_schedule(async_suspend, dev);
1172 		return 0;
1173 	}
1174 
1175 	return __device_suspend(dev, pm_transition, false);
1176 }
1177 
1178 /**
1179  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1180  * @state: PM transition of the system being carried out.
1181  */
1182 int dpm_suspend(pm_message_t state)
1183 {
1184 	ktime_t starttime = ktime_get();
1185 	int error = 0;
1186 
1187 	might_sleep();
1188 
1189 	mutex_lock(&dpm_list_mtx);
1190 	pm_transition = state;
1191 	async_error = 0;
1192 	while (!list_empty(&dpm_prepared_list)) {
1193 		struct device *dev = to_device(dpm_prepared_list.prev);
1194 
1195 		get_device(dev);
1196 		mutex_unlock(&dpm_list_mtx);
1197 
1198 		error = device_suspend(dev);
1199 
1200 		mutex_lock(&dpm_list_mtx);
1201 		if (error) {
1202 			pm_dev_err(dev, state, "", error);
1203 			dpm_save_failed_dev(dev_name(dev));
1204 			put_device(dev);
1205 			break;
1206 		}
1207 		if (!list_empty(&dev->power.entry))
1208 			list_move(&dev->power.entry, &dpm_suspended_list);
1209 		put_device(dev);
1210 		if (async_error)
1211 			break;
1212 	}
1213 	mutex_unlock(&dpm_list_mtx);
1214 	async_synchronize_full();
1215 	if (!error)
1216 		error = async_error;
1217 	if (error) {
1218 		suspend_stats.failed_suspend++;
1219 		dpm_save_failed_step(SUSPEND_SUSPEND);
1220 	} else
1221 		dpm_show_time(starttime, state, NULL);
1222 	return error;
1223 }
1224 
1225 /**
1226  * device_prepare - Prepare a device for system power transition.
1227  * @dev: Device to handle.
1228  * @state: PM transition of the system being carried out.
1229  *
1230  * Execute the ->prepare() callback(s) for given device.  No new children of the
1231  * device may be registered after this function has returned.
1232  */
1233 static int device_prepare(struct device *dev, pm_message_t state)
1234 {
1235 	int (*callback)(struct device *) = NULL;
1236 	char *info = NULL;
1237 	int error = 0;
1238 
1239 	if (dev->power.syscore)
1240 		return 0;
1241 
1242 	/*
1243 	 * If a device's parent goes into runtime suspend at the wrong time,
1244 	 * it won't be possible to resume the device.  To prevent this we
1245 	 * block runtime suspend here, during the prepare phase, and allow
1246 	 * it again during the complete phase.
1247 	 */
1248 	pm_runtime_get_noresume(dev);
1249 
1250 	device_lock(dev);
1251 
1252 	dev->power.wakeup_path = device_may_wakeup(dev);
1253 
1254 	if (dev->pm_domain) {
1255 		info = "preparing power domain ";
1256 		callback = dev->pm_domain->ops.prepare;
1257 	} else if (dev->type && dev->type->pm) {
1258 		info = "preparing type ";
1259 		callback = dev->type->pm->prepare;
1260 	} else if (dev->class && dev->class->pm) {
1261 		info = "preparing class ";
1262 		callback = dev->class->pm->prepare;
1263 	} else if (dev->bus && dev->bus->pm) {
1264 		info = "preparing bus ";
1265 		callback = dev->bus->pm->prepare;
1266 	}
1267 
1268 	if (!callback && dev->driver && dev->driver->pm) {
1269 		info = "preparing driver ";
1270 		callback = dev->driver->pm->prepare;
1271 	}
1272 
1273 	if (callback) {
1274 		error = callback(dev);
1275 		suspend_report_result(callback, error);
1276 	}
1277 
1278 	device_unlock(dev);
1279 
1280 	return error;
1281 }
1282 
1283 /**
1284  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1285  * @state: PM transition of the system being carried out.
1286  *
1287  * Execute the ->prepare() callback(s) for all devices.
1288  */
1289 int dpm_prepare(pm_message_t state)
1290 {
1291 	int error = 0;
1292 
1293 	might_sleep();
1294 
1295 	mutex_lock(&dpm_list_mtx);
1296 	while (!list_empty(&dpm_list)) {
1297 		struct device *dev = to_device(dpm_list.next);
1298 
1299 		get_device(dev);
1300 		mutex_unlock(&dpm_list_mtx);
1301 
1302 		error = device_prepare(dev, state);
1303 
1304 		mutex_lock(&dpm_list_mtx);
1305 		if (error) {
1306 			if (error == -EAGAIN) {
1307 				put_device(dev);
1308 				error = 0;
1309 				continue;
1310 			}
1311 			printk(KERN_INFO "PM: Device %s not prepared "
1312 				"for power transition: code %d\n",
1313 				dev_name(dev), error);
1314 			put_device(dev);
1315 			break;
1316 		}
1317 		dev->power.is_prepared = true;
1318 		if (!list_empty(&dev->power.entry))
1319 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1320 		put_device(dev);
1321 	}
1322 	mutex_unlock(&dpm_list_mtx);
1323 	return error;
1324 }
1325 
1326 /**
1327  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1328  * @state: PM transition of the system being carried out.
1329  *
1330  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1331  * callbacks for them.
1332  */
1333 int dpm_suspend_start(pm_message_t state)
1334 {
1335 	int error;
1336 
1337 	error = dpm_prepare(state);
1338 	if (error) {
1339 		suspend_stats.failed_prepare++;
1340 		dpm_save_failed_step(SUSPEND_PREPARE);
1341 	} else
1342 		error = dpm_suspend(state);
1343 	return error;
1344 }
1345 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1346 
1347 void __suspend_report_result(const char *function, void *fn, int ret)
1348 {
1349 	if (ret)
1350 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1351 }
1352 EXPORT_SYMBOL_GPL(__suspend_report_result);
1353 
1354 /**
1355  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1356  * @dev: Device to wait for.
1357  * @subordinate: Device that needs to wait for @dev.
1358  */
1359 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1360 {
1361 	dpm_wait(dev, subordinate->power.async_suspend);
1362 	return async_error;
1363 }
1364 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1365 
1366 /**
1367  * dpm_for_each_dev - device iterator.
1368  * @data: data for the callback.
1369  * @fn: function to be called for each device.
1370  *
1371  * Iterate over devices in dpm_list, and call @fn for each device,
1372  * passing it @data.
1373  */
1374 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1375 {
1376 	struct device *dev;
1377 
1378 	if (!fn)
1379 		return;
1380 
1381 	device_pm_lock();
1382 	list_for_each_entry(dev, &dpm_list, power.entry)
1383 		fn(dev, data);
1384 	device_pm_unlock();
1385 }
1386 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1387