Lines Matching refs:dev

97 void device_pm_sleep_init(struct device *dev)  in device_pm_sleep_init()  argument
99 dev->power.is_prepared = false; in device_pm_sleep_init()
100 dev->power.is_suspended = false; in device_pm_sleep_init()
101 dev->power.is_noirq_suspended = false; in device_pm_sleep_init()
102 dev->power.is_late_suspended = false; in device_pm_sleep_init()
103 init_completion(&dev->power.completion); in device_pm_sleep_init()
104 complete_all(&dev->power.completion); in device_pm_sleep_init()
105 dev->power.wakeup = NULL; in device_pm_sleep_init()
106 INIT_LIST_HEAD(&dev->power.entry); in device_pm_sleep_init()
129 void device_pm_add(struct device *dev) in device_pm_add() argument
132 if (device_pm_not_required(dev)) in device_pm_add()
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_add()
137 device_pm_check_callbacks(dev); in device_pm_add()
139 if (dev->parent && dev->parent->power.is_prepared) in device_pm_add()
140 dev_warn(dev, "parent %s should not be sleeping\n", in device_pm_add()
141 dev_name(dev->parent)); in device_pm_add()
142 list_add_tail(&dev->power.entry, &dpm_list); in device_pm_add()
143 dev->power.in_dpm_list = true; in device_pm_add()
151 void device_pm_remove(struct device *dev) in device_pm_remove() argument
153 if (device_pm_not_required(dev)) in device_pm_remove()
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_remove()
158 complete_all(&dev->power.completion); in device_pm_remove()
160 list_del_init(&dev->power.entry); in device_pm_remove()
161 dev->power.in_dpm_list = false; in device_pm_remove()
163 device_wakeup_disable(dev); in device_pm_remove()
164 pm_runtime_remove(dev); in device_pm_remove()
165 device_pm_check_callbacks(dev); in device_pm_remove()
200 void device_pm_move_last(struct device *dev) in device_pm_move_last() argument
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_move_last()
204 list_move_tail(&dev->power.entry, &dpm_list); in device_pm_move_last()
207 static ktime_t initcall_debug_start(struct device *dev, void *cb) in initcall_debug_start() argument
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb, in initcall_debug_start()
214 dev->parent ? dev_name(dev->parent) : "none"); in initcall_debug_start()
218 static void initcall_debug_report(struct device *dev, ktime_t calltime, in initcall_debug_report() argument
227 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error, in initcall_debug_report()
236 static void dpm_wait(struct device *dev, bool async) in dpm_wait() argument
238 if (!dev) in dpm_wait()
241 if (async || (pm_async_enabled && dev->power.async_suspend)) in dpm_wait()
242 wait_for_completion(&dev->power.completion); in dpm_wait()
245 static int dpm_wait_fn(struct device *dev, void *async_ptr) in dpm_wait_fn() argument
247 dpm_wait(dev, *((bool *)async_ptr)); in dpm_wait_fn()
251 static void dpm_wait_for_children(struct device *dev, bool async) in dpm_wait_for_children() argument
253 device_for_each_child(dev, &async, dpm_wait_fn); in dpm_wait_for_children()
256 static void dpm_wait_for_suppliers(struct device *dev, bool async) in dpm_wait_for_suppliers() argument
270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) in dpm_wait_for_suppliers()
277 static bool dpm_wait_for_superior(struct device *dev, bool async) in dpm_wait_for_superior() argument
290 if (!device_pm_initialized(dev)) { in dpm_wait_for_superior()
295 parent = get_device(dev->parent); in dpm_wait_for_superior()
302 dpm_wait_for_suppliers(dev, async); in dpm_wait_for_superior()
308 return device_pm_initialized(dev); in dpm_wait_for_superior()
311 static void dpm_wait_for_consumers(struct device *dev, bool async) in dpm_wait_for_consumers() argument
327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) in dpm_wait_for_consumers()
334 static void dpm_wait_for_subordinate(struct device *dev, bool async) in dpm_wait_for_subordinate() argument
336 dpm_wait_for_children(dev, async); in dpm_wait_for_subordinate()
337 dpm_wait_for_consumers(dev, async); in dpm_wait_for_subordinate()
439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) in pm_dev_dbg() argument
441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event), in pm_dev_dbg()
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? in pm_dev_dbg()
443 ", may wakeup" : "", dev->power.driver_flags); in pm_dev_dbg()
446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, in pm_dev_err() argument
449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info, in pm_dev_err()
473 static int dpm_run_callback(pm_callback_t cb, struct device *dev, in dpm_run_callback() argument
482 calltime = initcall_debug_start(dev, cb); in dpm_run_callback()
484 pm_dev_dbg(dev, state, info); in dpm_run_callback()
485 trace_device_pm_callback_start(dev, info, state.event); in dpm_run_callback()
486 error = cb(dev); in dpm_run_callback()
487 trace_device_pm_callback_end(dev, error); in dpm_run_callback()
488 suspend_report_result(dev, cb, error); in dpm_run_callback()
490 initcall_debug_report(dev, calltime, cb, error); in dpm_run_callback()
497 struct device *dev; member
517 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); in dpm_watchdog_handler()
520 dev_driver_string(wd->dev), dev_name(wd->dev)); in dpm_watchdog_handler()
528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) in dpm_watchdog_set() argument
532 wd->dev = dev; in dpm_watchdog_set()
570 bool dev_pm_skip_resume(struct device *dev) in dev_pm_skip_resume() argument
576 return dev_pm_skip_suspend(dev); in dev_pm_skip_resume()
578 return !dev->power.must_resume; in dev_pm_skip_resume()
590 static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async) in __device_resume_noirq() argument
597 TRACE_DEVICE(dev); in __device_resume_noirq()
600 if (dev->power.syscore || dev->power.direct_complete) in __device_resume_noirq()
603 if (!dev->power.is_noirq_suspended) in __device_resume_noirq()
606 if (!dpm_wait_for_superior(dev, async)) in __device_resume_noirq()
609 skip_resume = dev_pm_skip_resume(dev); in __device_resume_noirq()
621 pm_runtime_set_suspended(dev); in __device_resume_noirq()
622 else if (dev_pm_skip_suspend(dev)) in __device_resume_noirq()
623 pm_runtime_set_active(dev); in __device_resume_noirq()
625 if (dev->pm_domain) { in __device_resume_noirq()
627 callback = pm_noirq_op(&dev->pm_domain->ops, state); in __device_resume_noirq()
628 } else if (dev->type && dev->type->pm) { in __device_resume_noirq()
630 callback = pm_noirq_op(dev->type->pm, state); in __device_resume_noirq()
631 } else if (dev->class && dev->class->pm) { in __device_resume_noirq()
633 callback = pm_noirq_op(dev->class->pm, state); in __device_resume_noirq()
634 } else if (dev->bus && dev->bus->pm) { in __device_resume_noirq()
636 callback = pm_noirq_op(dev->bus->pm, state); in __device_resume_noirq()
644 if (dev->driver && dev->driver->pm) { in __device_resume_noirq()
646 callback = pm_noirq_op(dev->driver->pm, state); in __device_resume_noirq()
650 error = dpm_run_callback(callback, dev, state, info); in __device_resume_noirq()
653 dev->power.is_noirq_suspended = false; in __device_resume_noirq()
656 complete_all(&dev->power.completion); in __device_resume_noirq()
662 dpm_save_failed_dev(dev_name(dev)); in __device_resume_noirq()
663 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); in __device_resume_noirq()
667 static bool is_async(struct device *dev) in is_async() argument
669 return dev->power.async_suspend && pm_async_enabled in is_async()
673 static bool dpm_async_fn(struct device *dev, async_func_t func) in dpm_async_fn() argument
675 reinit_completion(&dev->power.completion); in dpm_async_fn()
677 if (!is_async(dev)) in dpm_async_fn()
680 get_device(dev); in dpm_async_fn()
682 if (async_schedule_dev_nocall(func, dev)) in dpm_async_fn()
685 put_device(dev); in dpm_async_fn()
692 struct device *dev = data; in async_resume_noirq() local
694 __device_resume_noirq(dev, pm_transition, true); in async_resume_noirq()
695 put_device(dev); in async_resume_noirq()
698 static void device_resume_noirq(struct device *dev) in device_resume_noirq() argument
700 if (dpm_async_fn(dev, async_resume_noirq)) in device_resume_noirq()
703 __device_resume_noirq(dev, pm_transition, false); in device_resume_noirq()
708 struct device *dev; in dpm_noirq_resume_devices() local
716 dev = to_device(dpm_noirq_list.next); in dpm_noirq_resume_devices()
717 get_device(dev); in dpm_noirq_resume_devices()
718 list_move_tail(&dev->power.entry, &dpm_late_early_list); in dpm_noirq_resume_devices()
722 device_resume_noirq(dev); in dpm_noirq_resume_devices()
724 put_device(dev); in dpm_noirq_resume_devices()
757 static void __device_resume_early(struct device *dev, pm_message_t state, bool async) in __device_resume_early() argument
763 TRACE_DEVICE(dev); in __device_resume_early()
766 if (dev->power.syscore || dev->power.direct_complete) in __device_resume_early()
769 if (!dev->power.is_late_suspended) in __device_resume_early()
772 if (!dpm_wait_for_superior(dev, async)) in __device_resume_early()
775 if (dev->pm_domain) { in __device_resume_early()
777 callback = pm_late_early_op(&dev->pm_domain->ops, state); in __device_resume_early()
778 } else if (dev->type && dev->type->pm) { in __device_resume_early()
780 callback = pm_late_early_op(dev->type->pm, state); in __device_resume_early()
781 } else if (dev->class && dev->class->pm) { in __device_resume_early()
783 callback = pm_late_early_op(dev->class->pm, state); in __device_resume_early()
784 } else if (dev->bus && dev->bus->pm) { in __device_resume_early()
786 callback = pm_late_early_op(dev->bus->pm, state); in __device_resume_early()
791 if (dev_pm_skip_resume(dev)) in __device_resume_early()
794 if (dev->driver && dev->driver->pm) { in __device_resume_early()
796 callback = pm_late_early_op(dev->driver->pm, state); in __device_resume_early()
800 error = dpm_run_callback(callback, dev, state, info); in __device_resume_early()
803 dev->power.is_late_suspended = false; in __device_resume_early()
808 pm_runtime_enable(dev); in __device_resume_early()
809 complete_all(&dev->power.completion); in __device_resume_early()
814 dpm_save_failed_dev(dev_name(dev)); in __device_resume_early()
815 pm_dev_err(dev, state, async ? " async early" : " early", error); in __device_resume_early()
821 struct device *dev = data; in async_resume_early() local
823 __device_resume_early(dev, pm_transition, true); in async_resume_early()
824 put_device(dev); in async_resume_early()
827 static void device_resume_early(struct device *dev) in device_resume_early() argument
829 if (dpm_async_fn(dev, async_resume_early)) in device_resume_early()
832 __device_resume_early(dev, pm_transition, false); in device_resume_early()
841 struct device *dev; in dpm_resume_early() local
849 dev = to_device(dpm_late_early_list.next); in dpm_resume_early()
850 get_device(dev); in dpm_resume_early()
851 list_move_tail(&dev->power.entry, &dpm_suspended_list); in dpm_resume_early()
855 device_resume_early(dev); in dpm_resume_early()
857 put_device(dev); in dpm_resume_early()
884 static void __device_resume(struct device *dev, pm_message_t state, bool async) in __device_resume() argument
891 TRACE_DEVICE(dev); in __device_resume()
894 if (dev->power.syscore) in __device_resume()
897 if (dev->power.direct_complete) { in __device_resume()
899 pm_runtime_enable(dev); in __device_resume()
903 if (!dpm_wait_for_superior(dev, async)) in __device_resume()
906 dpm_watchdog_set(&wd, dev); in __device_resume()
907 device_lock(dev); in __device_resume()
913 dev->power.is_prepared = false; in __device_resume()
915 if (!dev->power.is_suspended) in __device_resume()
918 if (dev->pm_domain) { in __device_resume()
920 callback = pm_op(&dev->pm_domain->ops, state); in __device_resume()
924 if (dev->type && dev->type->pm) { in __device_resume()
926 callback = pm_op(dev->type->pm, state); in __device_resume()
930 if (dev->class && dev->class->pm) { in __device_resume()
932 callback = pm_op(dev->class->pm, state); in __device_resume()
936 if (dev->bus) { in __device_resume()
937 if (dev->bus->pm) { in __device_resume()
939 callback = pm_op(dev->bus->pm, state); in __device_resume()
940 } else if (dev->bus->resume) { in __device_resume()
942 callback = dev->bus->resume; in __device_resume()
948 if (!callback && dev->driver && dev->driver->pm) { in __device_resume()
950 callback = pm_op(dev->driver->pm, state); in __device_resume()
954 error = dpm_run_callback(callback, dev, state, info); in __device_resume()
955 dev->power.is_suspended = false; in __device_resume()
958 device_unlock(dev); in __device_resume()
962 complete_all(&dev->power.completion); in __device_resume()
969 dpm_save_failed_dev(dev_name(dev)); in __device_resume()
970 pm_dev_err(dev, state, async ? " async" : "", error); in __device_resume()
976 struct device *dev = data; in async_resume() local
978 __device_resume(dev, pm_transition, true); in async_resume()
979 put_device(dev); in async_resume()
982 static void device_resume(struct device *dev) in device_resume() argument
984 if (dpm_async_fn(dev, async_resume)) in device_resume()
987 __device_resume(dev, pm_transition, false); in device_resume()
999 struct device *dev; in dpm_resume() local
1010 dev = to_device(dpm_suspended_list.next); in dpm_resume()
1012 get_device(dev); in dpm_resume()
1016 device_resume(dev); in dpm_resume()
1020 if (!list_empty(&dev->power.entry)) in dpm_resume()
1021 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_resume()
1025 put_device(dev); in dpm_resume()
1043 static void device_complete(struct device *dev, pm_message_t state) in device_complete() argument
1048 if (dev->power.syscore) in device_complete()
1051 device_lock(dev); in device_complete()
1053 if (dev->pm_domain) { in device_complete()
1055 callback = dev->pm_domain->ops.complete; in device_complete()
1056 } else if (dev->type && dev->type->pm) { in device_complete()
1058 callback = dev->type->pm->complete; in device_complete()
1059 } else if (dev->class && dev->class->pm) { in device_complete()
1061 callback = dev->class->pm->complete; in device_complete()
1062 } else if (dev->bus && dev->bus->pm) { in device_complete()
1064 callback = dev->bus->pm->complete; in device_complete()
1067 if (!callback && dev->driver && dev->driver->pm) { in device_complete()
1069 callback = dev->driver->pm->complete; in device_complete()
1073 pm_dev_dbg(dev, state, info); in device_complete()
1074 callback(dev); in device_complete()
1077 device_unlock(dev); in device_complete()
1080 pm_runtime_put(dev); in device_complete()
1100 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_complete() local
1102 get_device(dev); in dpm_complete()
1103 dev->power.is_prepared = false; in dpm_complete()
1104 list_move(&dev->power.entry, &list); in dpm_complete()
1108 trace_device_pm_callback_start(dev, "", state.event); in dpm_complete()
1109 device_complete(dev, state); in dpm_complete()
1110 trace_device_pm_callback_end(dev, 0); in dpm_complete()
1112 put_device(dev); in dpm_complete()
1162 static void dpm_superior_set_must_resume(struct device *dev) in dpm_superior_set_must_resume() argument
1167 if (dev->parent) in dpm_superior_set_must_resume()
1168 dev->parent->power.must_resume = true; in dpm_superior_set_must_resume()
1172 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) in dpm_superior_set_must_resume()
1187 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) in __device_suspend_noirq() argument
1193 TRACE_DEVICE(dev); in __device_suspend_noirq()
1196 dpm_wait_for_subordinate(dev, async); in __device_suspend_noirq()
1201 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_noirq()
1204 if (dev->pm_domain) { in __device_suspend_noirq()
1206 callback = pm_noirq_op(&dev->pm_domain->ops, state); in __device_suspend_noirq()
1207 } else if (dev->type && dev->type->pm) { in __device_suspend_noirq()
1209 callback = pm_noirq_op(dev->type->pm, state); in __device_suspend_noirq()
1210 } else if (dev->class && dev->class->pm) { in __device_suspend_noirq()
1212 callback = pm_noirq_op(dev->class->pm, state); in __device_suspend_noirq()
1213 } else if (dev->bus && dev->bus->pm) { in __device_suspend_noirq()
1215 callback = pm_noirq_op(dev->bus->pm, state); in __device_suspend_noirq()
1220 if (dev_pm_skip_suspend(dev)) in __device_suspend_noirq()
1223 if (dev->driver && dev->driver->pm) { in __device_suspend_noirq()
1225 callback = pm_noirq_op(dev->driver->pm, state); in __device_suspend_noirq()
1229 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_noirq()
1236 dev->power.is_noirq_suspended = true; in __device_suspend_noirq()
1244 if (atomic_read(&dev->power.usage_count) > 1 || in __device_suspend_noirq()
1245 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && in __device_suspend_noirq()
1246 dev->power.may_skip_resume)) in __device_suspend_noirq()
1247 dev->power.must_resume = true; in __device_suspend_noirq()
1249 if (dev->power.must_resume) in __device_suspend_noirq()
1250 dpm_superior_set_must_resume(dev); in __device_suspend_noirq()
1253 complete_all(&dev->power.completion); in __device_suspend_noirq()
1260 struct device *dev = data; in async_suspend_noirq() local
1263 error = __device_suspend_noirq(dev, pm_transition, true); in async_suspend_noirq()
1265 dpm_save_failed_dev(dev_name(dev)); in async_suspend_noirq()
1266 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_noirq()
1269 put_device(dev); in async_suspend_noirq()
1272 static int device_suspend_noirq(struct device *dev) in device_suspend_noirq() argument
1274 if (dpm_async_fn(dev, async_suspend_noirq)) in device_suspend_noirq()
1277 return __device_suspend_noirq(dev, pm_transition, false); in device_suspend_noirq()
1291 struct device *dev = to_device(dpm_late_early_list.prev); in dpm_noirq_suspend_devices() local
1293 get_device(dev); in dpm_noirq_suspend_devices()
1296 error = device_suspend_noirq(dev); in dpm_noirq_suspend_devices()
1301 pm_dev_err(dev, state, " noirq", error); in dpm_noirq_suspend_devices()
1302 dpm_save_failed_dev(dev_name(dev)); in dpm_noirq_suspend_devices()
1303 } else if (!list_empty(&dev->power.entry)) { in dpm_noirq_suspend_devices()
1304 list_move(&dev->power.entry, &dpm_noirq_list); in dpm_noirq_suspend_devices()
1309 put_device(dev); in dpm_noirq_suspend_devices()
1351 static void dpm_propagate_wakeup_to_parent(struct device *dev) in dpm_propagate_wakeup_to_parent() argument
1353 struct device *parent = dev->parent; in dpm_propagate_wakeup_to_parent()
1360 if (device_wakeup_path(dev) && !parent->power.ignore_children) in dpm_propagate_wakeup_to_parent()
1374 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) in __device_suspend_late() argument
1380 TRACE_DEVICE(dev); in __device_suspend_late()
1383 __pm_runtime_disable(dev, false); in __device_suspend_late()
1385 dpm_wait_for_subordinate(dev, async); in __device_suspend_late()
1395 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_late()
1398 if (dev->pm_domain) { in __device_suspend_late()
1400 callback = pm_late_early_op(&dev->pm_domain->ops, state); in __device_suspend_late()
1401 } else if (dev->type && dev->type->pm) { in __device_suspend_late()
1403 callback = pm_late_early_op(dev->type->pm, state); in __device_suspend_late()
1404 } else if (dev->class && dev->class->pm) { in __device_suspend_late()
1406 callback = pm_late_early_op(dev->class->pm, state); in __device_suspend_late()
1407 } else if (dev->bus && dev->bus->pm) { in __device_suspend_late()
1409 callback = pm_late_early_op(dev->bus->pm, state); in __device_suspend_late()
1414 if (dev_pm_skip_suspend(dev)) in __device_suspend_late()
1417 if (dev->driver && dev->driver->pm) { in __device_suspend_late()
1419 callback = pm_late_early_op(dev->driver->pm, state); in __device_suspend_late()
1423 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_late()
1428 dpm_propagate_wakeup_to_parent(dev); in __device_suspend_late()
1431 dev->power.is_late_suspended = true; in __device_suspend_late()
1435 complete_all(&dev->power.completion); in __device_suspend_late()
1441 struct device *dev = data; in async_suspend_late() local
1444 error = __device_suspend_late(dev, pm_transition, true); in async_suspend_late()
1446 dpm_save_failed_dev(dev_name(dev)); in async_suspend_late()
1447 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_late()
1449 put_device(dev); in async_suspend_late()
1452 static int device_suspend_late(struct device *dev) in device_suspend_late() argument
1454 if (dpm_async_fn(dev, async_suspend_late)) in device_suspend_late()
1457 return __device_suspend_late(dev, pm_transition, false); in device_suspend_late()
1476 struct device *dev = to_device(dpm_suspended_list.prev); in dpm_suspend_late() local
1478 get_device(dev); in dpm_suspend_late()
1482 error = device_suspend_late(dev); in dpm_suspend_late()
1486 if (!list_empty(&dev->power.entry)) in dpm_suspend_late()
1487 list_move(&dev->power.entry, &dpm_late_early_list); in dpm_suspend_late()
1490 pm_dev_err(dev, state, " late", error); in dpm_suspend_late()
1491 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend_late()
1496 put_device(dev); in dpm_suspend_late()
1547 static int legacy_suspend(struct device *dev, pm_message_t state, in legacy_suspend() argument
1548 int (*cb)(struct device *dev, pm_message_t state), in legacy_suspend() argument
1554 calltime = initcall_debug_start(dev, cb); in legacy_suspend()
1556 trace_device_pm_callback_start(dev, info, state.event); in legacy_suspend()
1557 error = cb(dev, state); in legacy_suspend()
1558 trace_device_pm_callback_end(dev, error); in legacy_suspend()
1559 suspend_report_result(dev, cb, error); in legacy_suspend()
1561 initcall_debug_report(dev, calltime, cb, error); in legacy_suspend()
1566 static void dpm_clear_superiors_direct_complete(struct device *dev) in dpm_clear_superiors_direct_complete() argument
1571 if (dev->parent) { in dpm_clear_superiors_direct_complete()
1572 spin_lock_irq(&dev->parent->power.lock); in dpm_clear_superiors_direct_complete()
1573 dev->parent->power.direct_complete = false; in dpm_clear_superiors_direct_complete()
1574 spin_unlock_irq(&dev->parent->power.lock); in dpm_clear_superiors_direct_complete()
1579 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { in dpm_clear_superiors_direct_complete()
1594 static int __device_suspend(struct device *dev, pm_message_t state, bool async) in __device_suspend() argument
1601 TRACE_DEVICE(dev); in __device_suspend()
1604 dpm_wait_for_subordinate(dev, async); in __device_suspend()
1607 dev->power.direct_complete = false; in __device_suspend()
1622 pm_runtime_barrier(dev); in __device_suspend()
1625 dev->power.direct_complete = false; in __device_suspend()
1630 if (dev->power.syscore) in __device_suspend()
1634 if (device_may_wakeup(dev) || device_wakeup_path(dev)) in __device_suspend()
1635 dev->power.direct_complete = false; in __device_suspend()
1637 if (dev->power.direct_complete) { in __device_suspend()
1638 if (pm_runtime_status_suspended(dev)) { in __device_suspend()
1639 pm_runtime_disable(dev); in __device_suspend()
1640 if (pm_runtime_status_suspended(dev)) { in __device_suspend()
1641 pm_dev_dbg(dev, state, "direct-complete "); in __device_suspend()
1645 pm_runtime_enable(dev); in __device_suspend()
1647 dev->power.direct_complete = false; in __device_suspend()
1650 dev->power.may_skip_resume = true; in __device_suspend()
1651 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME); in __device_suspend()
1653 dpm_watchdog_set(&wd, dev); in __device_suspend()
1654 device_lock(dev); in __device_suspend()
1656 if (dev->pm_domain) { in __device_suspend()
1658 callback = pm_op(&dev->pm_domain->ops, state); in __device_suspend()
1662 if (dev->type && dev->type->pm) { in __device_suspend()
1664 callback = pm_op(dev->type->pm, state); in __device_suspend()
1668 if (dev->class && dev->class->pm) { in __device_suspend()
1670 callback = pm_op(dev->class->pm, state); in __device_suspend()
1674 if (dev->bus) { in __device_suspend()
1675 if (dev->bus->pm) { in __device_suspend()
1677 callback = pm_op(dev->bus->pm, state); in __device_suspend()
1678 } else if (dev->bus->suspend) { in __device_suspend()
1679 pm_dev_dbg(dev, state, "legacy bus "); in __device_suspend()
1680 error = legacy_suspend(dev, state, dev->bus->suspend, in __device_suspend()
1687 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend()
1689 callback = pm_op(dev->driver->pm, state); in __device_suspend()
1692 error = dpm_run_callback(callback, dev, state, info); in __device_suspend()
1696 dev->power.is_suspended = true; in __device_suspend()
1697 if (device_may_wakeup(dev)) in __device_suspend()
1698 dev->power.wakeup_path = true; in __device_suspend()
1700 dpm_propagate_wakeup_to_parent(dev); in __device_suspend()
1701 dpm_clear_superiors_direct_complete(dev); in __device_suspend()
1704 device_unlock(dev); in __device_suspend()
1711 complete_all(&dev->power.completion); in __device_suspend()
1718 struct device *dev = data; in async_suspend() local
1721 error = __device_suspend(dev, pm_transition, true); in async_suspend()
1723 dpm_save_failed_dev(dev_name(dev)); in async_suspend()
1724 pm_dev_err(dev, pm_transition, " async", error); in async_suspend()
1727 put_device(dev); in async_suspend()
1730 static int device_suspend(struct device *dev) in device_suspend() argument
1732 if (dpm_async_fn(dev, async_suspend)) in device_suspend()
1735 return __device_suspend(dev, pm_transition, false); in device_suspend()
1757 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_suspend() local
1759 get_device(dev); in dpm_suspend()
1763 error = device_suspend(dev); in dpm_suspend()
1768 pm_dev_err(dev, state, "", error); in dpm_suspend()
1769 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend()
1770 } else if (!list_empty(&dev->power.entry)) { in dpm_suspend()
1771 list_move(&dev->power.entry, &dpm_suspended_list); in dpm_suspend()
1776 put_device(dev); in dpm_suspend()
1804 static int device_prepare(struct device *dev, pm_message_t state) in device_prepare() argument
1815 pm_runtime_get_noresume(dev); in device_prepare()
1817 if (dev->power.syscore) in device_prepare()
1820 device_lock(dev); in device_prepare()
1822 dev->power.wakeup_path = false; in device_prepare()
1824 if (dev->power.no_pm_callbacks) in device_prepare()
1827 if (dev->pm_domain) in device_prepare()
1828 callback = dev->pm_domain->ops.prepare; in device_prepare()
1829 else if (dev->type && dev->type->pm) in device_prepare()
1830 callback = dev->type->pm->prepare; in device_prepare()
1831 else if (dev->class && dev->class->pm) in device_prepare()
1832 callback = dev->class->pm->prepare; in device_prepare()
1833 else if (dev->bus && dev->bus->pm) in device_prepare()
1834 callback = dev->bus->pm->prepare; in device_prepare()
1836 if (!callback && dev->driver && dev->driver->pm) in device_prepare()
1837 callback = dev->driver->pm->prepare; in device_prepare()
1840 ret = callback(dev); in device_prepare()
1843 device_unlock(dev); in device_prepare()
1846 suspend_report_result(dev, callback, ret); in device_prepare()
1847 pm_runtime_put(dev); in device_prepare()
1857 spin_lock_irq(&dev->power.lock); in device_prepare()
1858 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && in device_prepare()
1859 (ret > 0 || dev->power.no_pm_callbacks) && in device_prepare()
1860 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); in device_prepare()
1861 spin_unlock_irq(&dev->power.lock); in device_prepare()
1894 struct device *dev = to_device(dpm_list.next); in dpm_prepare() local
1896 get_device(dev); in dpm_prepare()
1900 trace_device_pm_callback_start(dev, "", state.event); in dpm_prepare()
1901 error = device_prepare(dev, state); in dpm_prepare()
1902 trace_device_pm_callback_end(dev, error); in dpm_prepare()
1907 dev->power.is_prepared = true; in dpm_prepare()
1908 if (!list_empty(&dev->power.entry)) in dpm_prepare()
1909 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_prepare()
1913 dev_info(dev, "not prepared for power transition: code %d\n", in dpm_prepare()
1919 put_device(dev); in dpm_prepare()
1951 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) in __suspend_report_result() argument
1954 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret); in __suspend_report_result()
1963 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) in device_pm_wait_for_dev() argument
1965 dpm_wait(dev, subordinate->power.async_suspend); in device_pm_wait_for_dev()
1980 struct device *dev; in dpm_for_each_dev() local
1986 list_for_each_entry(dev, &dpm_list, power.entry) in dpm_for_each_dev()
1987 fn(dev, data); in dpm_for_each_dev()
2007 void device_pm_check_callbacks(struct device *dev) in device_pm_check_callbacks() argument
2011 spin_lock_irqsave(&dev->power.lock, flags); in device_pm_check_callbacks()
2012 dev->power.no_pm_callbacks = in device_pm_check_callbacks()
2013 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && in device_pm_check_callbacks()
2014 !dev->bus->suspend && !dev->bus->resume)) && in device_pm_check_callbacks()
2015 (!dev->class || pm_ops_is_empty(dev->class->pm)) && in device_pm_check_callbacks()
2016 (!dev->type || pm_ops_is_empty(dev->type->pm)) && in device_pm_check_callbacks()
2017 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && in device_pm_check_callbacks()
2018 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && in device_pm_check_callbacks()
2019 !dev->driver->suspend && !dev->driver->resume)); in device_pm_check_callbacks()
2020 spin_unlock_irqrestore(&dev->power.lock, flags); in device_pm_check_callbacks()
2023 bool dev_pm_skip_suspend(struct device *dev) in dev_pm_skip_suspend() argument
2025 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && in dev_pm_skip_suspend()
2026 pm_runtime_status_suspended(dev); in dev_pm_skip_suspend()