xref: /openbmc/linux/drivers/base/power/main.c (revision 44ad3baf1cca483e418b6aadf2d3994f69e0f16a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 	list_for_each_entry_rcu(pos, head, member, \
45 			device_links_read_lock_held())
46 
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56 
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62 
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
66 
67 static int async_error;
68 
pm_verb(int event)69 static const char *pm_verb(int event)
70 {
71 	switch (event) {
72 	case PM_EVENT_SUSPEND:
73 		return "suspend";
74 	case PM_EVENT_RESUME:
75 		return "resume";
76 	case PM_EVENT_FREEZE:
77 		return "freeze";
78 	case PM_EVENT_QUIESCE:
79 		return "quiesce";
80 	case PM_EVENT_HIBERNATE:
81 		return "hibernate";
82 	case PM_EVENT_THAW:
83 		return "thaw";
84 	case PM_EVENT_RESTORE:
85 		return "restore";
86 	case PM_EVENT_RECOVER:
87 		return "recover";
88 	default:
89 		return "(unknown PM event)";
90 	}
91 }
92 
93 /**
94  * device_pm_sleep_init - Initialize system suspend-related device fields.
95  * @dev: Device object being initialized.
96  */
device_pm_sleep_init(struct device * dev)97 void device_pm_sleep_init(struct device *dev)
98 {
99 	dev->power.is_prepared = false;
100 	dev->power.is_suspended = false;
101 	dev->power.is_noirq_suspended = false;
102 	dev->power.is_late_suspended = false;
103 	init_completion(&dev->power.completion);
104 	complete_all(&dev->power.completion);
105 	dev->power.wakeup = NULL;
106 	INIT_LIST_HEAD(&dev->power.entry);
107 }
108 
109 /**
110  * device_pm_lock - Lock the list of active devices used by the PM core.
111  */
device_pm_lock(void)112 void device_pm_lock(void)
113 {
114 	mutex_lock(&dpm_list_mtx);
115 }
116 
117 /**
118  * device_pm_unlock - Unlock the list of active devices used by the PM core.
119  */
device_pm_unlock(void)120 void device_pm_unlock(void)
121 {
122 	mutex_unlock(&dpm_list_mtx);
123 }
124 
125 /**
126  * device_pm_add - Add a device to the PM core's list of active devices.
127  * @dev: Device to add to the list.
128  */
device_pm_add(struct device * dev)129 void device_pm_add(struct device *dev)
130 {
131 	/* Skip PM setup/initialization. */
132 	if (device_pm_not_required(dev))
133 		return;
134 
135 	pr_debug("Adding info for %s:%s\n",
136 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 	device_pm_check_callbacks(dev);
138 	mutex_lock(&dpm_list_mtx);
139 	if (dev->parent && dev->parent->power.is_prepared)
140 		dev_warn(dev, "parent %s should not be sleeping\n",
141 			dev_name(dev->parent));
142 	list_add_tail(&dev->power.entry, &dpm_list);
143 	dev->power.in_dpm_list = true;
144 	mutex_unlock(&dpm_list_mtx);
145 }
146 
147 /**
148  * device_pm_remove - Remove a device from the PM core's list of active devices.
149  * @dev: Device to be removed from the list.
150  */
device_pm_remove(struct device * dev)151 void device_pm_remove(struct device *dev)
152 {
153 	if (device_pm_not_required(dev))
154 		return;
155 
156 	pr_debug("Removing info for %s:%s\n",
157 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 	complete_all(&dev->power.completion);
159 	mutex_lock(&dpm_list_mtx);
160 	list_del_init(&dev->power.entry);
161 	dev->power.in_dpm_list = false;
162 	mutex_unlock(&dpm_list_mtx);
163 	device_wakeup_disable(dev);
164 	pm_runtime_remove(dev);
165 	device_pm_check_callbacks(dev);
166 }
167 
168 /**
169  * device_pm_move_before - Move device in the PM core's list of active devices.
170  * @deva: Device to move in dpm_list.
171  * @devb: Device @deva should come before.
172  */
device_pm_move_before(struct device * deva,struct device * devb)173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175 	pr_debug("Moving %s:%s before %s:%s\n",
176 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 	/* Delete deva from dpm_list and reinsert before devb. */
179 	list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181 
182 /**
183  * device_pm_move_after - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come after.
186  */
device_pm_move_after(struct device * deva,struct device * devb)187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189 	pr_debug("Moving %s:%s after %s:%s\n",
190 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 	/* Delete deva from dpm_list and reinsert after devb. */
193 	list_move(&deva->power.entry, &devb->power.entry);
194 }
195 
196 /**
197  * device_pm_move_last - Move device to end of the PM core's list of devices.
198  * @dev: Device to move in dpm_list.
199  */
device_pm_move_last(struct device * dev)200 void device_pm_move_last(struct device *dev)
201 {
202 	pr_debug("Moving %s:%s to end of list\n",
203 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 	list_move_tail(&dev->power.entry, &dpm_list);
205 }
206 
initcall_debug_start(struct device * dev,void * cb)207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209 	if (!pm_print_times_enabled)
210 		return 0;
211 
212 	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 		 task_pid_nr(current),
214 		 dev->parent ? dev_name(dev->parent) : "none");
215 	return ktime_get();
216 }
217 
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219 				  void *cb, int error)
220 {
221 	ktime_t rettime;
222 
223 	if (!pm_print_times_enabled)
224 		return;
225 
226 	rettime = ktime_get();
227 	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228 		 (unsigned long long)ktime_us_delta(rettime, calltime));
229 }
230 
231 /**
232  * dpm_wait - Wait for a PM operation to complete.
233  * @dev: Device to wait for.
234  * @async: If unset, wait only if the device's power.async_suspend flag is set.
235  */
dpm_wait(struct device * dev,bool async)236 static void dpm_wait(struct device *dev, bool async)
237 {
238 	if (!dev)
239 		return;
240 
241 	if (async || (pm_async_enabled && dev->power.async_suspend))
242 		wait_for_completion(&dev->power.completion);
243 }
244 
dpm_wait_fn(struct device * dev,void * async_ptr)245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
246 {
247 	dpm_wait(dev, *((bool *)async_ptr));
248 	return 0;
249 }
250 
dpm_wait_for_children(struct device * dev,bool async)251 static void dpm_wait_for_children(struct device *dev, bool async)
252 {
253        device_for_each_child(dev, &async, dpm_wait_fn);
254 }
255 
dpm_wait_for_suppliers(struct device * dev,bool async)256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
257 {
258 	struct device_link *link;
259 	int idx;
260 
261 	idx = device_links_read_lock();
262 
263 	/*
264 	 * If the supplier goes away right after we've checked the link to it,
265 	 * we'll wait for its completion to change the state, but that's fine,
266 	 * because the only things that will block as a result are the SRCU
267 	 * callbacks freeing the link objects for the links in the list we're
268 	 * walking.
269 	 */
270 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 			dpm_wait(link->supplier, async);
273 
274 	device_links_read_unlock(idx);
275 }
276 
dpm_wait_for_superior(struct device * dev,bool async)277 static bool dpm_wait_for_superior(struct device *dev, bool async)
278 {
279 	struct device *parent;
280 
281 	/*
282 	 * If the device is resumed asynchronously and the parent's callback
283 	 * deletes both the device and the parent itself, the parent object may
284 	 * be freed while this function is running, so avoid that by reference
285 	 * counting the parent once more unless the device has been deleted
286 	 * already (in which case return right away).
287 	 */
288 	mutex_lock(&dpm_list_mtx);
289 
290 	if (!device_pm_initialized(dev)) {
291 		mutex_unlock(&dpm_list_mtx);
292 		return false;
293 	}
294 
295 	parent = get_device(dev->parent);
296 
297 	mutex_unlock(&dpm_list_mtx);
298 
299 	dpm_wait(parent, async);
300 	put_device(parent);
301 
302 	dpm_wait_for_suppliers(dev, async);
303 
304 	/*
305 	 * If the parent's callback has deleted the device, attempting to resume
306 	 * it would be invalid, so avoid doing that then.
307 	 */
308 	return device_pm_initialized(dev);
309 }
310 
dpm_wait_for_consumers(struct device * dev,bool async)311 static void dpm_wait_for_consumers(struct device *dev, bool async)
312 {
313 	struct device_link *link;
314 	int idx;
315 
316 	idx = device_links_read_lock();
317 
318 	/*
319 	 * The status of a device link can only be changed from "dormant" by a
320 	 * probe, but that cannot happen during system suspend/resume.  In
321 	 * theory it can change to "dormant" at that time, but then it is
322 	 * reasonable to wait for the target device anyway (eg. if it goes
323 	 * away, it's better to wait for it to go away completely and then
324 	 * continue instead of trying to continue in parallel with its
325 	 * unregistration).
326 	 */
327 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 			dpm_wait(link->consumer, async);
330 
331 	device_links_read_unlock(idx);
332 }
333 
dpm_wait_for_subordinate(struct device * dev,bool async)334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
335 {
336 	dpm_wait_for_children(dev, async);
337 	dpm_wait_for_consumers(dev, async);
338 }
339 
340 /**
341  * pm_op - Return the PM operation appropriate for given PM event.
342  * @ops: PM operations to choose from.
343  * @state: PM transition of the system being carried out.
344  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
346 {
347 	switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349 	case PM_EVENT_SUSPEND:
350 		return ops->suspend;
351 	case PM_EVENT_RESUME:
352 		return ops->resume;
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355 	case PM_EVENT_FREEZE:
356 	case PM_EVENT_QUIESCE:
357 		return ops->freeze;
358 	case PM_EVENT_HIBERNATE:
359 		return ops->poweroff;
360 	case PM_EVENT_THAW:
361 	case PM_EVENT_RECOVER:
362 		return ops->thaw;
363 	case PM_EVENT_RESTORE:
364 		return ops->restore;
365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
366 	}
367 
368 	return NULL;
369 }
370 
371 /**
372  * pm_late_early_op - Return the PM operation appropriate for given PM event.
373  * @ops: PM operations to choose from.
374  * @state: PM transition of the system being carried out.
375  *
376  * Runtime PM is disabled for @dev while this function is being executed.
377  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
379 				      pm_message_t state)
380 {
381 	switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383 	case PM_EVENT_SUSPEND:
384 		return ops->suspend_late;
385 	case PM_EVENT_RESUME:
386 		return ops->resume_early;
387 #endif /* CONFIG_SUSPEND */
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389 	case PM_EVENT_FREEZE:
390 	case PM_EVENT_QUIESCE:
391 		return ops->freeze_late;
392 	case PM_EVENT_HIBERNATE:
393 		return ops->poweroff_late;
394 	case PM_EVENT_THAW:
395 	case PM_EVENT_RECOVER:
396 		return ops->thaw_early;
397 	case PM_EVENT_RESTORE:
398 		return ops->restore_early;
399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
400 	}
401 
402 	return NULL;
403 }
404 
405 /**
406  * pm_noirq_op - Return the PM operation appropriate for given PM event.
407  * @ops: PM operations to choose from.
408  * @state: PM transition of the system being carried out.
409  *
410  * The driver of @dev will not receive interrupts while this function is being
411  * executed.
412  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
414 {
415 	switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417 	case PM_EVENT_SUSPEND:
418 		return ops->suspend_noirq;
419 	case PM_EVENT_RESUME:
420 		return ops->resume_noirq;
421 #endif /* CONFIG_SUSPEND */
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423 	case PM_EVENT_FREEZE:
424 	case PM_EVENT_QUIESCE:
425 		return ops->freeze_noirq;
426 	case PM_EVENT_HIBERNATE:
427 		return ops->poweroff_noirq;
428 	case PM_EVENT_THAW:
429 	case PM_EVENT_RECOVER:
430 		return ops->thaw_noirq;
431 	case PM_EVENT_RESTORE:
432 		return ops->restore_noirq;
433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
434 	}
435 
436 	return NULL;
437 }
438 
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
440 {
441 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 		", may wakeup" : "", dev->power.driver_flags);
444 }
445 
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447 			int error)
448 {
449 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
450 		error);
451 }
452 
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
454 			  const char *info)
455 {
456 	ktime_t calltime;
457 	u64 usecs64;
458 	int usecs;
459 
460 	calltime = ktime_get();
461 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 	do_div(usecs64, NSEC_PER_USEC);
463 	usecs = usecs64;
464 	if (usecs == 0)
465 		usecs = 1;
466 
467 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 		  info ?: "", info ? " " : "", pm_verb(state.event),
469 		  error ? "aborted" : "complete",
470 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
471 }
472 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 			    pm_message_t state, const char *info)
475 {
476 	ktime_t calltime;
477 	int error;
478 
479 	if (!cb)
480 		return 0;
481 
482 	calltime = initcall_debug_start(dev, cb);
483 
484 	pm_dev_dbg(dev, state, info);
485 	trace_device_pm_callback_start(dev, info, state.event);
486 	error = cb(dev);
487 	trace_device_pm_callback_end(dev, error);
488 	suspend_report_result(dev, cb, error);
489 
490 	initcall_debug_report(dev, calltime, cb, error);
491 
492 	return error;
493 }
494 
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
497 	struct device		*dev;
498 	struct task_struct	*tsk;
499 	struct timer_list	timer;
500 };
501 
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 	struct dpm_watchdog wd
504 
505 /**
506  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507  * @t: The timer that PM watchdog depends on.
508  *
509  * Called when a driver has timed out suspending or resuming.
510  * There's not much we can do here to recover so panic() to
511  * capture a crash-dump in pstore.
512  */
dpm_watchdog_handler(struct timer_list * t)513 static void dpm_watchdog_handler(struct timer_list *t)
514 {
515 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
516 
517 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518 	show_stack(wd->tsk, NULL, KERN_EMERG);
519 	panic("%s %s: unrecoverable failure\n",
520 		dev_driver_string(wd->dev), dev_name(wd->dev));
521 }
522 
523 /**
524  * dpm_watchdog_set - Enable pm watchdog for given device.
525  * @wd: Watchdog. Must be allocated on the stack.
526  * @dev: Device to handle.
527  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
529 {
530 	struct timer_list *timer = &wd->timer;
531 
532 	wd->dev = dev;
533 	wd->tsk = current;
534 
535 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
536 	/* use same timeout value for both suspend and resume */
537 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
538 	add_timer(timer);
539 }
540 
541 /**
542  * dpm_watchdog_clear - Disable suspend/resume watchdog.
543  * @wd: Watchdog to disable.
544  */
dpm_watchdog_clear(struct dpm_watchdog * wd)545 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
546 {
547 	struct timer_list *timer = &wd->timer;
548 
549 	del_timer_sync(timer);
550 	destroy_timer_on_stack(timer);
551 }
552 #else
553 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
554 #define dpm_watchdog_set(x, y)
555 #define dpm_watchdog_clear(x)
556 #endif
557 
558 /*------------------------- Resume routines -------------------------*/
559 
560 /**
561  * dev_pm_skip_resume - System-wide device resume optimization check.
562  * @dev: Target device.
563  *
564  * Return:
565  * - %false if the transition under way is RESTORE.
566  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
567  * - The logical negation of %power.must_resume otherwise (that is, when the
568  *   transition under way is RESUME).
569  */
dev_pm_skip_resume(struct device * dev)570 bool dev_pm_skip_resume(struct device *dev)
571 {
572 	if (pm_transition.event == PM_EVENT_RESTORE)
573 		return false;
574 
575 	if (pm_transition.event == PM_EVENT_THAW)
576 		return dev_pm_skip_suspend(dev);
577 
578 	return !dev->power.must_resume;
579 }
580 
581 /**
582  * __device_resume_noirq - Execute a "noirq resume" callback for given device.
583  * @dev: Device to handle.
584  * @state: PM transition of the system being carried out.
585  * @async: If true, the device is being resumed asynchronously.
586  *
587  * The driver of @dev will not receive interrupts while this function is being
588  * executed.
589  */
__device_resume_noirq(struct device * dev,pm_message_t state,bool async)590 static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
591 {
592 	pm_callback_t callback = NULL;
593 	const char *info = NULL;
594 	bool skip_resume;
595 	int error = 0;
596 
597 	TRACE_DEVICE(dev);
598 	TRACE_RESUME(0);
599 
600 	if (dev->power.syscore || dev->power.direct_complete)
601 		goto Out;
602 
603 	if (!dev->power.is_noirq_suspended)
604 		goto Out;
605 
606 	if (!dpm_wait_for_superior(dev, async))
607 		goto Out;
608 
609 	skip_resume = dev_pm_skip_resume(dev);
610 	/*
611 	 * If the driver callback is skipped below or by the middle layer
612 	 * callback and device_resume_early() also skips the driver callback for
613 	 * this device later, it needs to appear as "suspended" to PM-runtime,
614 	 * so change its status accordingly.
615 	 *
616 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
617 	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
618 	 * to avoid confusing drivers that don't use it.
619 	 */
620 	if (skip_resume)
621 		pm_runtime_set_suspended(dev);
622 	else if (dev_pm_skip_suspend(dev))
623 		pm_runtime_set_active(dev);
624 
625 	if (dev->pm_domain) {
626 		info = "noirq power domain ";
627 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
628 	} else if (dev->type && dev->type->pm) {
629 		info = "noirq type ";
630 		callback = pm_noirq_op(dev->type->pm, state);
631 	} else if (dev->class && dev->class->pm) {
632 		info = "noirq class ";
633 		callback = pm_noirq_op(dev->class->pm, state);
634 	} else if (dev->bus && dev->bus->pm) {
635 		info = "noirq bus ";
636 		callback = pm_noirq_op(dev->bus->pm, state);
637 	}
638 	if (callback)
639 		goto Run;
640 
641 	if (skip_resume)
642 		goto Skip;
643 
644 	if (dev->driver && dev->driver->pm) {
645 		info = "noirq driver ";
646 		callback = pm_noirq_op(dev->driver->pm, state);
647 	}
648 
649 Run:
650 	error = dpm_run_callback(callback, dev, state, info);
651 
652 Skip:
653 	dev->power.is_noirq_suspended = false;
654 
655 Out:
656 	complete_all(&dev->power.completion);
657 	TRACE_RESUME(error);
658 
659 	if (error) {
660 		suspend_stats.failed_resume_noirq++;
661 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
662 		dpm_save_failed_dev(dev_name(dev));
663 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
664 	}
665 }
666 
is_async(struct device * dev)667 static bool is_async(struct device *dev)
668 {
669 	return dev->power.async_suspend && pm_async_enabled
670 		&& !pm_trace_is_enabled();
671 }
672 
dpm_async_fn(struct device * dev,async_func_t func)673 static bool dpm_async_fn(struct device *dev, async_func_t func)
674 {
675 	reinit_completion(&dev->power.completion);
676 
677 	if (!is_async(dev))
678 		return false;
679 
680 	get_device(dev);
681 
682 	if (async_schedule_dev_nocall(func, dev))
683 		return true;
684 
685 	put_device(dev);
686 
687 	return false;
688 }
689 
async_resume_noirq(void * data,async_cookie_t cookie)690 static void async_resume_noirq(void *data, async_cookie_t cookie)
691 {
692 	struct device *dev = data;
693 
694 	__device_resume_noirq(dev, pm_transition, true);
695 	put_device(dev);
696 }
697 
device_resume_noirq(struct device * dev)698 static void device_resume_noirq(struct device *dev)
699 {
700 	if (dpm_async_fn(dev, async_resume_noirq))
701 		return;
702 
703 	__device_resume_noirq(dev, pm_transition, false);
704 }
705 
dpm_noirq_resume_devices(pm_message_t state)706 static void dpm_noirq_resume_devices(pm_message_t state)
707 {
708 	struct device *dev;
709 	ktime_t starttime = ktime_get();
710 
711 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
712 	mutex_lock(&dpm_list_mtx);
713 	pm_transition = state;
714 
715 	while (!list_empty(&dpm_noirq_list)) {
716 		dev = to_device(dpm_noirq_list.next);
717 		get_device(dev);
718 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
719 
720 		mutex_unlock(&dpm_list_mtx);
721 
722 		device_resume_noirq(dev);
723 
724 		put_device(dev);
725 
726 		mutex_lock(&dpm_list_mtx);
727 	}
728 	mutex_unlock(&dpm_list_mtx);
729 	async_synchronize_full();
730 	dpm_show_time(starttime, state, 0, "noirq");
731 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
732 }
733 
734 /**
735  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
736  * @state: PM transition of the system being carried out.
737  *
738  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
739  * allow device drivers' interrupt handlers to be called.
740  */
dpm_resume_noirq(pm_message_t state)741 void dpm_resume_noirq(pm_message_t state)
742 {
743 	dpm_noirq_resume_devices(state);
744 
745 	resume_device_irqs();
746 	device_wakeup_disarm_wake_irqs();
747 }
748 
749 /**
750  * __device_resume_early - Execute an "early resume" callback for given device.
751  * @dev: Device to handle.
752  * @state: PM transition of the system being carried out.
753  * @async: If true, the device is being resumed asynchronously.
754  *
755  * Runtime PM is disabled for @dev while this function is being executed.
756  */
__device_resume_early(struct device * dev,pm_message_t state,bool async)757 static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
758 {
759 	pm_callback_t callback = NULL;
760 	const char *info = NULL;
761 	int error = 0;
762 
763 	TRACE_DEVICE(dev);
764 	TRACE_RESUME(0);
765 
766 	if (dev->power.syscore || dev->power.direct_complete)
767 		goto Out;
768 
769 	if (!dev->power.is_late_suspended)
770 		goto Out;
771 
772 	if (!dpm_wait_for_superior(dev, async))
773 		goto Out;
774 
775 	if (dev->pm_domain) {
776 		info = "early power domain ";
777 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
778 	} else if (dev->type && dev->type->pm) {
779 		info = "early type ";
780 		callback = pm_late_early_op(dev->type->pm, state);
781 	} else if (dev->class && dev->class->pm) {
782 		info = "early class ";
783 		callback = pm_late_early_op(dev->class->pm, state);
784 	} else if (dev->bus && dev->bus->pm) {
785 		info = "early bus ";
786 		callback = pm_late_early_op(dev->bus->pm, state);
787 	}
788 	if (callback)
789 		goto Run;
790 
791 	if (dev_pm_skip_resume(dev))
792 		goto Skip;
793 
794 	if (dev->driver && dev->driver->pm) {
795 		info = "early driver ";
796 		callback = pm_late_early_op(dev->driver->pm, state);
797 	}
798 
799 Run:
800 	error = dpm_run_callback(callback, dev, state, info);
801 
802 Skip:
803 	dev->power.is_late_suspended = false;
804 
805 Out:
806 	TRACE_RESUME(error);
807 
808 	pm_runtime_enable(dev);
809 	complete_all(&dev->power.completion);
810 
811 	if (error) {
812 		suspend_stats.failed_resume_early++;
813 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
814 		dpm_save_failed_dev(dev_name(dev));
815 		pm_dev_err(dev, state, async ? " async early" : " early", error);
816 	}
817 }
818 
async_resume_early(void * data,async_cookie_t cookie)819 static void async_resume_early(void *data, async_cookie_t cookie)
820 {
821 	struct device *dev = data;
822 
823 	__device_resume_early(dev, pm_transition, true);
824 	put_device(dev);
825 }
826 
device_resume_early(struct device * dev)827 static void device_resume_early(struct device *dev)
828 {
829 	if (dpm_async_fn(dev, async_resume_early))
830 		return;
831 
832 	__device_resume_early(dev, pm_transition, false);
833 }
834 
835 /**
836  * dpm_resume_early - Execute "early resume" callbacks for all devices.
837  * @state: PM transition of the system being carried out.
838  */
dpm_resume_early(pm_message_t state)839 void dpm_resume_early(pm_message_t state)
840 {
841 	struct device *dev;
842 	ktime_t starttime = ktime_get();
843 
844 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
845 	mutex_lock(&dpm_list_mtx);
846 	pm_transition = state;
847 
848 	while (!list_empty(&dpm_late_early_list)) {
849 		dev = to_device(dpm_late_early_list.next);
850 		get_device(dev);
851 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
852 
853 		mutex_unlock(&dpm_list_mtx);
854 
855 		device_resume_early(dev);
856 
857 		put_device(dev);
858 
859 		mutex_lock(&dpm_list_mtx);
860 	}
861 	mutex_unlock(&dpm_list_mtx);
862 	async_synchronize_full();
863 	dpm_show_time(starttime, state, 0, "early");
864 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
865 }
866 
867 /**
868  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
869  * @state: PM transition of the system being carried out.
870  */
dpm_resume_start(pm_message_t state)871 void dpm_resume_start(pm_message_t state)
872 {
873 	dpm_resume_noirq(state);
874 	dpm_resume_early(state);
875 }
876 EXPORT_SYMBOL_GPL(dpm_resume_start);
877 
878 /**
879  * __device_resume - Execute "resume" callbacks for given device.
880  * @dev: Device to handle.
881  * @state: PM transition of the system being carried out.
882  * @async: If true, the device is being resumed asynchronously.
883  */
__device_resume(struct device * dev,pm_message_t state,bool async)884 static void __device_resume(struct device *dev, pm_message_t state, bool async)
885 {
886 	pm_callback_t callback = NULL;
887 	const char *info = NULL;
888 	int error = 0;
889 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
890 
891 	TRACE_DEVICE(dev);
892 	TRACE_RESUME(0);
893 
894 	if (dev->power.syscore)
895 		goto Complete;
896 
897 	if (!dev->power.is_suspended)
898 		goto Complete;
899 
900 	dev->power.is_suspended = false;
901 
902 	if (dev->power.direct_complete) {
903 		/* Match the pm_runtime_disable() in __device_suspend(). */
904 		pm_runtime_enable(dev);
905 		goto Complete;
906 	}
907 
908 	if (!dpm_wait_for_superior(dev, async))
909 		goto Complete;
910 
911 	dpm_watchdog_set(&wd, dev);
912 	device_lock(dev);
913 
914 	/*
915 	 * This is a fib.  But we'll allow new children to be added below
916 	 * a resumed device, even if the device hasn't been completed yet.
917 	 */
918 	dev->power.is_prepared = false;
919 
920 	if (dev->pm_domain) {
921 		info = "power domain ";
922 		callback = pm_op(&dev->pm_domain->ops, state);
923 		goto Driver;
924 	}
925 
926 	if (dev->type && dev->type->pm) {
927 		info = "type ";
928 		callback = pm_op(dev->type->pm, state);
929 		goto Driver;
930 	}
931 
932 	if (dev->class && dev->class->pm) {
933 		info = "class ";
934 		callback = pm_op(dev->class->pm, state);
935 		goto Driver;
936 	}
937 
938 	if (dev->bus) {
939 		if (dev->bus->pm) {
940 			info = "bus ";
941 			callback = pm_op(dev->bus->pm, state);
942 		} else if (dev->bus->resume) {
943 			info = "legacy bus ";
944 			callback = dev->bus->resume;
945 			goto End;
946 		}
947 	}
948 
949  Driver:
950 	if (!callback && dev->driver && dev->driver->pm) {
951 		info = "driver ";
952 		callback = pm_op(dev->driver->pm, state);
953 	}
954 
955  End:
956 	error = dpm_run_callback(callback, dev, state, info);
957 
958 	device_unlock(dev);
959 	dpm_watchdog_clear(&wd);
960 
961  Complete:
962 	complete_all(&dev->power.completion);
963 
964 	TRACE_RESUME(error);
965 
966 	if (error) {
967 		suspend_stats.failed_resume++;
968 		dpm_save_failed_step(SUSPEND_RESUME);
969 		dpm_save_failed_dev(dev_name(dev));
970 		pm_dev_err(dev, state, async ? " async" : "", error);
971 	}
972 }
973 
async_resume(void * data,async_cookie_t cookie)974 static void async_resume(void *data, async_cookie_t cookie)
975 {
976 	struct device *dev = data;
977 
978 	__device_resume(dev, pm_transition, true);
979 	put_device(dev);
980 }
981 
device_resume(struct device * dev)982 static void device_resume(struct device *dev)
983 {
984 	if (dpm_async_fn(dev, async_resume))
985 		return;
986 
987 	__device_resume(dev, pm_transition, false);
988 }
989 
990 /**
991  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
992  * @state: PM transition of the system being carried out.
993  *
994  * Execute the appropriate "resume" callback for all devices whose status
995  * indicates that they are suspended.
996  */
dpm_resume(pm_message_t state)997 void dpm_resume(pm_message_t state)
998 {
999 	struct device *dev;
1000 	ktime_t starttime = ktime_get();
1001 
1002 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1003 	might_sleep();
1004 
1005 	mutex_lock(&dpm_list_mtx);
1006 	pm_transition = state;
1007 	async_error = 0;
1008 
1009 	while (!list_empty(&dpm_suspended_list)) {
1010 		dev = to_device(dpm_suspended_list.next);
1011 
1012 		get_device(dev);
1013 
1014 		mutex_unlock(&dpm_list_mtx);
1015 
1016 		device_resume(dev);
1017 
1018 		mutex_lock(&dpm_list_mtx);
1019 
1020 		if (!list_empty(&dev->power.entry))
1021 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1022 
1023 		mutex_unlock(&dpm_list_mtx);
1024 
1025 		put_device(dev);
1026 
1027 		mutex_lock(&dpm_list_mtx);
1028 	}
1029 	mutex_unlock(&dpm_list_mtx);
1030 	async_synchronize_full();
1031 	dpm_show_time(starttime, state, 0, NULL);
1032 
1033 	cpufreq_resume();
1034 	devfreq_resume();
1035 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1036 }
1037 
1038 /**
1039  * device_complete - Complete a PM transition for given device.
1040  * @dev: Device to handle.
1041  * @state: PM transition of the system being carried out.
1042  */
device_complete(struct device * dev,pm_message_t state)1043 static void device_complete(struct device *dev, pm_message_t state)
1044 {
1045 	void (*callback)(struct device *) = NULL;
1046 	const char *info = NULL;
1047 
1048 	if (dev->power.syscore)
1049 		goto out;
1050 
1051 	device_lock(dev);
1052 
1053 	if (dev->pm_domain) {
1054 		info = "completing power domain ";
1055 		callback = dev->pm_domain->ops.complete;
1056 	} else if (dev->type && dev->type->pm) {
1057 		info = "completing type ";
1058 		callback = dev->type->pm->complete;
1059 	} else if (dev->class && dev->class->pm) {
1060 		info = "completing class ";
1061 		callback = dev->class->pm->complete;
1062 	} else if (dev->bus && dev->bus->pm) {
1063 		info = "completing bus ";
1064 		callback = dev->bus->pm->complete;
1065 	}
1066 
1067 	if (!callback && dev->driver && dev->driver->pm) {
1068 		info = "completing driver ";
1069 		callback = dev->driver->pm->complete;
1070 	}
1071 
1072 	if (callback) {
1073 		pm_dev_dbg(dev, state, info);
1074 		callback(dev);
1075 	}
1076 
1077 	device_unlock(dev);
1078 
1079 out:
1080 	pm_runtime_put(dev);
1081 }
1082 
1083 /**
1084  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1085  * @state: PM transition of the system being carried out.
1086  *
1087  * Execute the ->complete() callbacks for all devices whose PM status is not
1088  * DPM_ON (this allows new devices to be registered).
1089  */
dpm_complete(pm_message_t state)1090 void dpm_complete(pm_message_t state)
1091 {
1092 	struct list_head list;
1093 
1094 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1095 	might_sleep();
1096 
1097 	INIT_LIST_HEAD(&list);
1098 	mutex_lock(&dpm_list_mtx);
1099 	while (!list_empty(&dpm_prepared_list)) {
1100 		struct device *dev = to_device(dpm_prepared_list.prev);
1101 
1102 		get_device(dev);
1103 		dev->power.is_prepared = false;
1104 		list_move(&dev->power.entry, &list);
1105 
1106 		mutex_unlock(&dpm_list_mtx);
1107 
1108 		trace_device_pm_callback_start(dev, "", state.event);
1109 		device_complete(dev, state);
1110 		trace_device_pm_callback_end(dev, 0);
1111 
1112 		put_device(dev);
1113 
1114 		mutex_lock(&dpm_list_mtx);
1115 	}
1116 	list_splice(&list, &dpm_list);
1117 	mutex_unlock(&dpm_list_mtx);
1118 
1119 	/* Allow device probing and trigger re-probing of deferred devices */
1120 	device_unblock_probing();
1121 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1122 }
1123 
1124 /**
1125  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1126  * @state: PM transition of the system being carried out.
1127  *
1128  * Execute "resume" callbacks for all devices and complete the PM transition of
1129  * the system.
1130  */
dpm_resume_end(pm_message_t state)1131 void dpm_resume_end(pm_message_t state)
1132 {
1133 	dpm_resume(state);
1134 	dpm_complete(state);
1135 }
1136 EXPORT_SYMBOL_GPL(dpm_resume_end);
1137 
1138 
1139 /*------------------------- Suspend routines -------------------------*/
1140 
1141 /**
1142  * resume_event - Return a "resume" message for given "suspend" sleep state.
1143  * @sleep_state: PM message representing a sleep state.
1144  *
1145  * Return a PM message representing the resume event corresponding to given
1146  * sleep state.
1147  */
resume_event(pm_message_t sleep_state)1148 static pm_message_t resume_event(pm_message_t sleep_state)
1149 {
1150 	switch (sleep_state.event) {
1151 	case PM_EVENT_SUSPEND:
1152 		return PMSG_RESUME;
1153 	case PM_EVENT_FREEZE:
1154 	case PM_EVENT_QUIESCE:
1155 		return PMSG_RECOVER;
1156 	case PM_EVENT_HIBERNATE:
1157 		return PMSG_RESTORE;
1158 	}
1159 	return PMSG_ON;
1160 }
1161 
dpm_superior_set_must_resume(struct device * dev)1162 static void dpm_superior_set_must_resume(struct device *dev)
1163 {
1164 	struct device_link *link;
1165 	int idx;
1166 
1167 	if (dev->parent)
1168 		dev->parent->power.must_resume = true;
1169 
1170 	idx = device_links_read_lock();
1171 
1172 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1173 		link->supplier->power.must_resume = true;
1174 
1175 	device_links_read_unlock(idx);
1176 }
1177 
1178 /**
1179  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1180  * @dev: Device to handle.
1181  * @state: PM transition of the system being carried out.
1182  * @async: If true, the device is being suspended asynchronously.
1183  *
1184  * The driver of @dev will not receive interrupts while this function is being
1185  * executed.
1186  */
__device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1187 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1188 {
1189 	pm_callback_t callback = NULL;
1190 	const char *info = NULL;
1191 	int error = 0;
1192 
1193 	TRACE_DEVICE(dev);
1194 	TRACE_SUSPEND(0);
1195 
1196 	dpm_wait_for_subordinate(dev, async);
1197 
1198 	if (async_error)
1199 		goto Complete;
1200 
1201 	if (dev->power.syscore || dev->power.direct_complete)
1202 		goto Complete;
1203 
1204 	if (dev->pm_domain) {
1205 		info = "noirq power domain ";
1206 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1207 	} else if (dev->type && dev->type->pm) {
1208 		info = "noirq type ";
1209 		callback = pm_noirq_op(dev->type->pm, state);
1210 	} else if (dev->class && dev->class->pm) {
1211 		info = "noirq class ";
1212 		callback = pm_noirq_op(dev->class->pm, state);
1213 	} else if (dev->bus && dev->bus->pm) {
1214 		info = "noirq bus ";
1215 		callback = pm_noirq_op(dev->bus->pm, state);
1216 	}
1217 	if (callback)
1218 		goto Run;
1219 
1220 	if (dev_pm_skip_suspend(dev))
1221 		goto Skip;
1222 
1223 	if (dev->driver && dev->driver->pm) {
1224 		info = "noirq driver ";
1225 		callback = pm_noirq_op(dev->driver->pm, state);
1226 	}
1227 
1228 Run:
1229 	error = dpm_run_callback(callback, dev, state, info);
1230 	if (error) {
1231 		async_error = error;
1232 		goto Complete;
1233 	}
1234 
1235 Skip:
1236 	dev->power.is_noirq_suspended = true;
1237 
1238 	/*
1239 	 * Devices must be resumed unless they are explicitly allowed to be left
1240 	 * in suspend, but even in that case skipping the resume of devices that
1241 	 * were in use right before the system suspend (as indicated by their
1242 	 * runtime PM usage counters and child counters) would be suboptimal.
1243 	 */
1244 	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1245 	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1246 		dev->power.must_resume = true;
1247 
1248 	if (dev->power.must_resume)
1249 		dpm_superior_set_must_resume(dev);
1250 
1251 Complete:
1252 	complete_all(&dev->power.completion);
1253 	TRACE_SUSPEND(error);
1254 	return error;
1255 }
1256 
async_suspend_noirq(void * data,async_cookie_t cookie)1257 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1258 {
1259 	struct device *dev = data;
1260 	int error;
1261 
1262 	error = __device_suspend_noirq(dev, pm_transition, true);
1263 	if (error) {
1264 		dpm_save_failed_dev(dev_name(dev));
1265 		pm_dev_err(dev, pm_transition, " async", error);
1266 	}
1267 
1268 	put_device(dev);
1269 }
1270 
device_suspend_noirq(struct device * dev)1271 static int device_suspend_noirq(struct device *dev)
1272 {
1273 	if (dpm_async_fn(dev, async_suspend_noirq))
1274 		return 0;
1275 
1276 	return __device_suspend_noirq(dev, pm_transition, false);
1277 }
1278 
dpm_noirq_suspend_devices(pm_message_t state)1279 static int dpm_noirq_suspend_devices(pm_message_t state)
1280 {
1281 	ktime_t starttime = ktime_get();
1282 	int error = 0;
1283 
1284 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1285 	mutex_lock(&dpm_list_mtx);
1286 	pm_transition = state;
1287 	async_error = 0;
1288 
1289 	while (!list_empty(&dpm_late_early_list)) {
1290 		struct device *dev = to_device(dpm_late_early_list.prev);
1291 
1292 		get_device(dev);
1293 		mutex_unlock(&dpm_list_mtx);
1294 
1295 		error = device_suspend_noirq(dev);
1296 
1297 		mutex_lock(&dpm_list_mtx);
1298 
1299 		if (error) {
1300 			pm_dev_err(dev, state, " noirq", error);
1301 			dpm_save_failed_dev(dev_name(dev));
1302 		} else if (!list_empty(&dev->power.entry)) {
1303 			list_move(&dev->power.entry, &dpm_noirq_list);
1304 		}
1305 
1306 		mutex_unlock(&dpm_list_mtx);
1307 
1308 		put_device(dev);
1309 
1310 		mutex_lock(&dpm_list_mtx);
1311 
1312 		if (error || async_error)
1313 			break;
1314 	}
1315 	mutex_unlock(&dpm_list_mtx);
1316 	async_synchronize_full();
1317 	if (!error)
1318 		error = async_error;
1319 
1320 	if (error) {
1321 		suspend_stats.failed_suspend_noirq++;
1322 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1323 	}
1324 	dpm_show_time(starttime, state, error, "noirq");
1325 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1326 	return error;
1327 }
1328 
1329 /**
1330  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1331  * @state: PM transition of the system being carried out.
1332  *
1333  * Prevent device drivers' interrupt handlers from being called and invoke
1334  * "noirq" suspend callbacks for all non-sysdev devices.
1335  */
dpm_suspend_noirq(pm_message_t state)1336 int dpm_suspend_noirq(pm_message_t state)
1337 {
1338 	int ret;
1339 
1340 	device_wakeup_arm_wake_irqs();
1341 	suspend_device_irqs();
1342 
1343 	ret = dpm_noirq_suspend_devices(state);
1344 	if (ret)
1345 		dpm_resume_noirq(resume_event(state));
1346 
1347 	return ret;
1348 }
1349 
dpm_propagate_wakeup_to_parent(struct device * dev)1350 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1351 {
1352 	struct device *parent = dev->parent;
1353 
1354 	if (!parent)
1355 		return;
1356 
1357 	spin_lock_irq(&parent->power.lock);
1358 
1359 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1360 		parent->power.wakeup_path = true;
1361 
1362 	spin_unlock_irq(&parent->power.lock);
1363 }
1364 
1365 /**
1366  * __device_suspend_late - Execute a "late suspend" callback for given device.
1367  * @dev: Device to handle.
1368  * @state: PM transition of the system being carried out.
1369  * @async: If true, the device is being suspended asynchronously.
1370  *
1371  * Runtime PM is disabled for @dev while this function is being executed.
1372  */
__device_suspend_late(struct device * dev,pm_message_t state,bool async)1373 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1374 {
1375 	pm_callback_t callback = NULL;
1376 	const char *info = NULL;
1377 	int error = 0;
1378 
1379 	TRACE_DEVICE(dev);
1380 	TRACE_SUSPEND(0);
1381 
1382 	__pm_runtime_disable(dev, false);
1383 
1384 	dpm_wait_for_subordinate(dev, async);
1385 
1386 	if (async_error)
1387 		goto Complete;
1388 
1389 	if (pm_wakeup_pending()) {
1390 		async_error = -EBUSY;
1391 		goto Complete;
1392 	}
1393 
1394 	if (dev->power.syscore || dev->power.direct_complete)
1395 		goto Complete;
1396 
1397 	if (dev->pm_domain) {
1398 		info = "late power domain ";
1399 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1400 	} else if (dev->type && dev->type->pm) {
1401 		info = "late type ";
1402 		callback = pm_late_early_op(dev->type->pm, state);
1403 	} else if (dev->class && dev->class->pm) {
1404 		info = "late class ";
1405 		callback = pm_late_early_op(dev->class->pm, state);
1406 	} else if (dev->bus && dev->bus->pm) {
1407 		info = "late bus ";
1408 		callback = pm_late_early_op(dev->bus->pm, state);
1409 	}
1410 	if (callback)
1411 		goto Run;
1412 
1413 	if (dev_pm_skip_suspend(dev))
1414 		goto Skip;
1415 
1416 	if (dev->driver && dev->driver->pm) {
1417 		info = "late driver ";
1418 		callback = pm_late_early_op(dev->driver->pm, state);
1419 	}
1420 
1421 Run:
1422 	error = dpm_run_callback(callback, dev, state, info);
1423 	if (error) {
1424 		async_error = error;
1425 		goto Complete;
1426 	}
1427 	dpm_propagate_wakeup_to_parent(dev);
1428 
1429 Skip:
1430 	dev->power.is_late_suspended = true;
1431 
1432 Complete:
1433 	TRACE_SUSPEND(error);
1434 	complete_all(&dev->power.completion);
1435 	return error;
1436 }
1437 
async_suspend_late(void * data,async_cookie_t cookie)1438 static void async_suspend_late(void *data, async_cookie_t cookie)
1439 {
1440 	struct device *dev = data;
1441 	int error;
1442 
1443 	error = __device_suspend_late(dev, pm_transition, true);
1444 	if (error) {
1445 		dpm_save_failed_dev(dev_name(dev));
1446 		pm_dev_err(dev, pm_transition, " async", error);
1447 	}
1448 	put_device(dev);
1449 }
1450 
device_suspend_late(struct device * dev)1451 static int device_suspend_late(struct device *dev)
1452 {
1453 	if (dpm_async_fn(dev, async_suspend_late))
1454 		return 0;
1455 
1456 	return __device_suspend_late(dev, pm_transition, false);
1457 }
1458 
1459 /**
1460  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1461  * @state: PM transition of the system being carried out.
1462  */
dpm_suspend_late(pm_message_t state)1463 int dpm_suspend_late(pm_message_t state)
1464 {
1465 	ktime_t starttime = ktime_get();
1466 	int error = 0;
1467 
1468 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1469 	wake_up_all_idle_cpus();
1470 	mutex_lock(&dpm_list_mtx);
1471 	pm_transition = state;
1472 	async_error = 0;
1473 
1474 	while (!list_empty(&dpm_suspended_list)) {
1475 		struct device *dev = to_device(dpm_suspended_list.prev);
1476 
1477 		get_device(dev);
1478 
1479 		mutex_unlock(&dpm_list_mtx);
1480 
1481 		error = device_suspend_late(dev);
1482 
1483 		mutex_lock(&dpm_list_mtx);
1484 
1485 		if (!list_empty(&dev->power.entry))
1486 			list_move(&dev->power.entry, &dpm_late_early_list);
1487 
1488 		if (error) {
1489 			pm_dev_err(dev, state, " late", error);
1490 			dpm_save_failed_dev(dev_name(dev));
1491 		}
1492 
1493 		mutex_unlock(&dpm_list_mtx);
1494 
1495 		put_device(dev);
1496 
1497 		mutex_lock(&dpm_list_mtx);
1498 
1499 		if (error || async_error)
1500 			break;
1501 	}
1502 	mutex_unlock(&dpm_list_mtx);
1503 	async_synchronize_full();
1504 	if (!error)
1505 		error = async_error;
1506 	if (error) {
1507 		suspend_stats.failed_suspend_late++;
1508 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1509 		dpm_resume_early(resume_event(state));
1510 	}
1511 	dpm_show_time(starttime, state, error, "late");
1512 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1513 	return error;
1514 }
1515 
1516 /**
1517  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1518  * @state: PM transition of the system being carried out.
1519  */
dpm_suspend_end(pm_message_t state)1520 int dpm_suspend_end(pm_message_t state)
1521 {
1522 	ktime_t starttime = ktime_get();
1523 	int error;
1524 
1525 	error = dpm_suspend_late(state);
1526 	if (error)
1527 		goto out;
1528 
1529 	error = dpm_suspend_noirq(state);
1530 	if (error)
1531 		dpm_resume_early(resume_event(state));
1532 
1533 out:
1534 	dpm_show_time(starttime, state, error, "end");
1535 	return error;
1536 }
1537 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1538 
1539 /**
1540  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1541  * @dev: Device to suspend.
1542  * @state: PM transition of the system being carried out.
1543  * @cb: Suspend callback to execute.
1544  * @info: string description of caller.
1545  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1546 static int legacy_suspend(struct device *dev, pm_message_t state,
1547 			  int (*cb)(struct device *dev, pm_message_t state),
1548 			  const char *info)
1549 {
1550 	int error;
1551 	ktime_t calltime;
1552 
1553 	calltime = initcall_debug_start(dev, cb);
1554 
1555 	trace_device_pm_callback_start(dev, info, state.event);
1556 	error = cb(dev, state);
1557 	trace_device_pm_callback_end(dev, error);
1558 	suspend_report_result(dev, cb, error);
1559 
1560 	initcall_debug_report(dev, calltime, cb, error);
1561 
1562 	return error;
1563 }
1564 
dpm_clear_superiors_direct_complete(struct device * dev)1565 static void dpm_clear_superiors_direct_complete(struct device *dev)
1566 {
1567 	struct device_link *link;
1568 	int idx;
1569 
1570 	if (dev->parent) {
1571 		spin_lock_irq(&dev->parent->power.lock);
1572 		dev->parent->power.direct_complete = false;
1573 		spin_unlock_irq(&dev->parent->power.lock);
1574 	}
1575 
1576 	idx = device_links_read_lock();
1577 
1578 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1579 		spin_lock_irq(&link->supplier->power.lock);
1580 		link->supplier->power.direct_complete = false;
1581 		spin_unlock_irq(&link->supplier->power.lock);
1582 	}
1583 
1584 	device_links_read_unlock(idx);
1585 }
1586 
1587 /**
1588  * __device_suspend - Execute "suspend" callbacks for given device.
1589  * @dev: Device to handle.
1590  * @state: PM transition of the system being carried out.
1591  * @async: If true, the device is being suspended asynchronously.
1592  */
__device_suspend(struct device * dev,pm_message_t state,bool async)1593 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1594 {
1595 	pm_callback_t callback = NULL;
1596 	const char *info = NULL;
1597 	int error = 0;
1598 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1599 
1600 	TRACE_DEVICE(dev);
1601 	TRACE_SUSPEND(0);
1602 
1603 	dpm_wait_for_subordinate(dev, async);
1604 
1605 	if (async_error) {
1606 		dev->power.direct_complete = false;
1607 		goto Complete;
1608 	}
1609 
1610 	/*
1611 	 * Wait for possible runtime PM transitions of the device in progress
1612 	 * to complete and if there's a runtime resume request pending for it,
1613 	 * resume it before proceeding with invoking the system-wide suspend
1614 	 * callbacks for it.
1615 	 *
1616 	 * If the system-wide suspend callbacks below change the configuration
1617 	 * of the device, they must disable runtime PM for it or otherwise
1618 	 * ensure that its runtime-resume callbacks will not be confused by that
1619 	 * change in case they are invoked going forward.
1620 	 */
1621 	pm_runtime_barrier(dev);
1622 
1623 	if (pm_wakeup_pending()) {
1624 		dev->power.direct_complete = false;
1625 		async_error = -EBUSY;
1626 		goto Complete;
1627 	}
1628 
1629 	if (dev->power.syscore)
1630 		goto Complete;
1631 
1632 	/* Avoid direct_complete to let wakeup_path propagate. */
1633 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1634 		dev->power.direct_complete = false;
1635 
1636 	if (dev->power.direct_complete) {
1637 		if (pm_runtime_status_suspended(dev)) {
1638 			pm_runtime_disable(dev);
1639 			if (pm_runtime_status_suspended(dev)) {
1640 				pm_dev_dbg(dev, state, "direct-complete ");
1641 				dev->power.is_suspended = true;
1642 				goto Complete;
1643 			}
1644 
1645 			pm_runtime_enable(dev);
1646 		}
1647 		dev->power.direct_complete = false;
1648 	}
1649 
1650 	dev->power.may_skip_resume = true;
1651 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1652 
1653 	dpm_watchdog_set(&wd, dev);
1654 	device_lock(dev);
1655 
1656 	if (dev->pm_domain) {
1657 		info = "power domain ";
1658 		callback = pm_op(&dev->pm_domain->ops, state);
1659 		goto Run;
1660 	}
1661 
1662 	if (dev->type && dev->type->pm) {
1663 		info = "type ";
1664 		callback = pm_op(dev->type->pm, state);
1665 		goto Run;
1666 	}
1667 
1668 	if (dev->class && dev->class->pm) {
1669 		info = "class ";
1670 		callback = pm_op(dev->class->pm, state);
1671 		goto Run;
1672 	}
1673 
1674 	if (dev->bus) {
1675 		if (dev->bus->pm) {
1676 			info = "bus ";
1677 			callback = pm_op(dev->bus->pm, state);
1678 		} else if (dev->bus->suspend) {
1679 			pm_dev_dbg(dev, state, "legacy bus ");
1680 			error = legacy_suspend(dev, state, dev->bus->suspend,
1681 						"legacy bus ");
1682 			goto End;
1683 		}
1684 	}
1685 
1686  Run:
1687 	if (!callback && dev->driver && dev->driver->pm) {
1688 		info = "driver ";
1689 		callback = pm_op(dev->driver->pm, state);
1690 	}
1691 
1692 	error = dpm_run_callback(callback, dev, state, info);
1693 
1694  End:
1695 	if (!error) {
1696 		dev->power.is_suspended = true;
1697 		if (device_may_wakeup(dev))
1698 			dev->power.wakeup_path = true;
1699 
1700 		dpm_propagate_wakeup_to_parent(dev);
1701 		dpm_clear_superiors_direct_complete(dev);
1702 	}
1703 
1704 	device_unlock(dev);
1705 	dpm_watchdog_clear(&wd);
1706 
1707  Complete:
1708 	if (error)
1709 		async_error = error;
1710 
1711 	complete_all(&dev->power.completion);
1712 	TRACE_SUSPEND(error);
1713 	return error;
1714 }
1715 
async_suspend(void * data,async_cookie_t cookie)1716 static void async_suspend(void *data, async_cookie_t cookie)
1717 {
1718 	struct device *dev = data;
1719 	int error;
1720 
1721 	error = __device_suspend(dev, pm_transition, true);
1722 	if (error) {
1723 		dpm_save_failed_dev(dev_name(dev));
1724 		pm_dev_err(dev, pm_transition, " async", error);
1725 	}
1726 
1727 	put_device(dev);
1728 }
1729 
device_suspend(struct device * dev)1730 static int device_suspend(struct device *dev)
1731 {
1732 	if (dpm_async_fn(dev, async_suspend))
1733 		return 0;
1734 
1735 	return __device_suspend(dev, pm_transition, false);
1736 }
1737 
1738 /**
1739  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1740  * @state: PM transition of the system being carried out.
1741  */
dpm_suspend(pm_message_t state)1742 int dpm_suspend(pm_message_t state)
1743 {
1744 	ktime_t starttime = ktime_get();
1745 	int error = 0;
1746 
1747 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1748 	might_sleep();
1749 
1750 	devfreq_suspend();
1751 	cpufreq_suspend();
1752 
1753 	mutex_lock(&dpm_list_mtx);
1754 	pm_transition = state;
1755 	async_error = 0;
1756 	while (!list_empty(&dpm_prepared_list)) {
1757 		struct device *dev = to_device(dpm_prepared_list.prev);
1758 
1759 		get_device(dev);
1760 
1761 		mutex_unlock(&dpm_list_mtx);
1762 
1763 		error = device_suspend(dev);
1764 
1765 		mutex_lock(&dpm_list_mtx);
1766 
1767 		if (error) {
1768 			pm_dev_err(dev, state, "", error);
1769 			dpm_save_failed_dev(dev_name(dev));
1770 		} else if (!list_empty(&dev->power.entry)) {
1771 			list_move(&dev->power.entry, &dpm_suspended_list);
1772 		}
1773 
1774 		mutex_unlock(&dpm_list_mtx);
1775 
1776 		put_device(dev);
1777 
1778 		mutex_lock(&dpm_list_mtx);
1779 
1780 		if (error || async_error)
1781 			break;
1782 	}
1783 	mutex_unlock(&dpm_list_mtx);
1784 	async_synchronize_full();
1785 	if (!error)
1786 		error = async_error;
1787 	if (error) {
1788 		suspend_stats.failed_suspend++;
1789 		dpm_save_failed_step(SUSPEND_SUSPEND);
1790 	}
1791 	dpm_show_time(starttime, state, error, NULL);
1792 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1793 	return error;
1794 }
1795 
1796 /**
1797  * device_prepare - Prepare a device for system power transition.
1798  * @dev: Device to handle.
1799  * @state: PM transition of the system being carried out.
1800  *
1801  * Execute the ->prepare() callback(s) for given device.  No new children of the
1802  * device may be registered after this function has returned.
1803  */
device_prepare(struct device * dev,pm_message_t state)1804 static int device_prepare(struct device *dev, pm_message_t state)
1805 {
1806 	int (*callback)(struct device *) = NULL;
1807 	int ret = 0;
1808 
1809 	/*
1810 	 * If a device's parent goes into runtime suspend at the wrong time,
1811 	 * it won't be possible to resume the device.  To prevent this we
1812 	 * block runtime suspend here, during the prepare phase, and allow
1813 	 * it again during the complete phase.
1814 	 */
1815 	pm_runtime_get_noresume(dev);
1816 
1817 	if (dev->power.syscore)
1818 		return 0;
1819 
1820 	device_lock(dev);
1821 
1822 	dev->power.wakeup_path = false;
1823 
1824 	if (dev->power.no_pm_callbacks)
1825 		goto unlock;
1826 
1827 	if (dev->pm_domain)
1828 		callback = dev->pm_domain->ops.prepare;
1829 	else if (dev->type && dev->type->pm)
1830 		callback = dev->type->pm->prepare;
1831 	else if (dev->class && dev->class->pm)
1832 		callback = dev->class->pm->prepare;
1833 	else if (dev->bus && dev->bus->pm)
1834 		callback = dev->bus->pm->prepare;
1835 
1836 	if (!callback && dev->driver && dev->driver->pm)
1837 		callback = dev->driver->pm->prepare;
1838 
1839 	if (callback)
1840 		ret = callback(dev);
1841 
1842 unlock:
1843 	device_unlock(dev);
1844 
1845 	if (ret < 0) {
1846 		suspend_report_result(dev, callback, ret);
1847 		pm_runtime_put(dev);
1848 		return ret;
1849 	}
1850 	/*
1851 	 * A positive return value from ->prepare() means "this device appears
1852 	 * to be runtime-suspended and its state is fine, so if it really is
1853 	 * runtime-suspended, you can leave it in that state provided that you
1854 	 * will do the same thing with all of its descendants".  This only
1855 	 * applies to suspend transitions, however.
1856 	 */
1857 	spin_lock_irq(&dev->power.lock);
1858 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1859 		(ret > 0 || dev->power.no_pm_callbacks) &&
1860 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1861 	spin_unlock_irq(&dev->power.lock);
1862 	return 0;
1863 }
1864 
1865 /**
1866  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1867  * @state: PM transition of the system being carried out.
1868  *
1869  * Execute the ->prepare() callback(s) for all devices.
1870  */
dpm_prepare(pm_message_t state)1871 int dpm_prepare(pm_message_t state)
1872 {
1873 	int error = 0;
1874 
1875 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1876 	might_sleep();
1877 
1878 	/*
1879 	 * Give a chance for the known devices to complete their probes, before
1880 	 * disable probing of devices. This sync point is important at least
1881 	 * at boot time + hibernation restore.
1882 	 */
1883 	wait_for_device_probe();
1884 	/*
1885 	 * It is unsafe if probing of devices will happen during suspend or
1886 	 * hibernation and system behavior will be unpredictable in this case.
1887 	 * So, let's prohibit device's probing here and defer their probes
1888 	 * instead. The normal behavior will be restored in dpm_complete().
1889 	 */
1890 	device_block_probing();
1891 
1892 	mutex_lock(&dpm_list_mtx);
1893 	while (!list_empty(&dpm_list) && !error) {
1894 		struct device *dev = to_device(dpm_list.next);
1895 
1896 		get_device(dev);
1897 
1898 		mutex_unlock(&dpm_list_mtx);
1899 
1900 		trace_device_pm_callback_start(dev, "", state.event);
1901 		error = device_prepare(dev, state);
1902 		trace_device_pm_callback_end(dev, error);
1903 
1904 		mutex_lock(&dpm_list_mtx);
1905 
1906 		if (!error) {
1907 			dev->power.is_prepared = true;
1908 			if (!list_empty(&dev->power.entry))
1909 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
1910 		} else if (error == -EAGAIN) {
1911 			error = 0;
1912 		} else {
1913 			dev_info(dev, "not prepared for power transition: code %d\n",
1914 				 error);
1915 		}
1916 
1917 		mutex_unlock(&dpm_list_mtx);
1918 
1919 		put_device(dev);
1920 
1921 		mutex_lock(&dpm_list_mtx);
1922 	}
1923 	mutex_unlock(&dpm_list_mtx);
1924 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1925 	return error;
1926 }
1927 
1928 /**
1929  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1930  * @state: PM transition of the system being carried out.
1931  *
1932  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1933  * callbacks for them.
1934  */
dpm_suspend_start(pm_message_t state)1935 int dpm_suspend_start(pm_message_t state)
1936 {
1937 	ktime_t starttime = ktime_get();
1938 	int error;
1939 
1940 	error = dpm_prepare(state);
1941 	if (error) {
1942 		suspend_stats.failed_prepare++;
1943 		dpm_save_failed_step(SUSPEND_PREPARE);
1944 	} else
1945 		error = dpm_suspend(state);
1946 	dpm_show_time(starttime, state, error, "start");
1947 	return error;
1948 }
1949 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1950 
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)1951 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1952 {
1953 	if (ret)
1954 		dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1955 }
1956 EXPORT_SYMBOL_GPL(__suspend_report_result);
1957 
1958 /**
1959  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1960  * @subordinate: Device that needs to wait for @dev.
1961  * @dev: Device to wait for.
1962  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)1963 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1964 {
1965 	dpm_wait(dev, subordinate->power.async_suspend);
1966 	return async_error;
1967 }
1968 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1969 
1970 /**
1971  * dpm_for_each_dev - device iterator.
1972  * @data: data for the callback.
1973  * @fn: function to be called for each device.
1974  *
1975  * Iterate over devices in dpm_list, and call @fn for each device,
1976  * passing it @data.
1977  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))1978 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1979 {
1980 	struct device *dev;
1981 
1982 	if (!fn)
1983 		return;
1984 
1985 	device_pm_lock();
1986 	list_for_each_entry(dev, &dpm_list, power.entry)
1987 		fn(dev, data);
1988 	device_pm_unlock();
1989 }
1990 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1991 
pm_ops_is_empty(const struct dev_pm_ops * ops)1992 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1993 {
1994 	if (!ops)
1995 		return true;
1996 
1997 	return !ops->prepare &&
1998 	       !ops->suspend &&
1999 	       !ops->suspend_late &&
2000 	       !ops->suspend_noirq &&
2001 	       !ops->resume_noirq &&
2002 	       !ops->resume_early &&
2003 	       !ops->resume &&
2004 	       !ops->complete;
2005 }
2006 
device_pm_check_callbacks(struct device * dev)2007 void device_pm_check_callbacks(struct device *dev)
2008 {
2009 	unsigned long flags;
2010 
2011 	spin_lock_irqsave(&dev->power.lock, flags);
2012 	dev->power.no_pm_callbacks =
2013 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2014 		 !dev->bus->suspend && !dev->bus->resume)) &&
2015 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2016 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2017 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2018 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2019 		 !dev->driver->suspend && !dev->driver->resume));
2020 	spin_unlock_irqrestore(&dev->power.lock, flags);
2021 }
2022 
dev_pm_skip_suspend(struct device * dev)2023 bool dev_pm_skip_suspend(struct device *dev)
2024 {
2025 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2026 		pm_runtime_status_suspended(dev);
2027 }
2028