Lines Matching refs:dev

52 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)  in __dev_pm_qos_flags()  argument
54 struct dev_pm_qos *qos = dev->power.qos; in __dev_pm_qos_flags()
58 lockdep_assert_held(&dev->power.lock); in __dev_pm_qos_flags()
79 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) in dev_pm_qos_flags() argument
84 spin_lock_irqsave(&dev->power.lock, irqflags); in dev_pm_qos_flags()
85 ret = __dev_pm_qos_flags(dev, mask); in dev_pm_qos_flags()
86 spin_unlock_irqrestore(&dev->power.lock, irqflags); in dev_pm_qos_flags()
98 s32 __dev_pm_qos_resume_latency(struct device *dev) in __dev_pm_qos_resume_latency() argument
100 lockdep_assert_held(&dev->power.lock); in __dev_pm_qos_resume_latency()
102 return dev_pm_qos_raw_resume_latency(dev); in __dev_pm_qos_resume_latency()
110 s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type) in dev_pm_qos_read_value() argument
112 struct dev_pm_qos *qos = dev->power.qos; in dev_pm_qos_read_value()
116 spin_lock_irqsave(&dev->power.lock, flags); in dev_pm_qos_read_value()
136 spin_unlock_irqrestore(&dev->power.lock, flags); in dev_pm_qos_read_value()
153 struct dev_pm_qos *qos = req->dev->power.qos; in apply_constraint()
169 req->dev->power.set_latency_tolerance(req->dev, value); in apply_constraint()
194 static int dev_pm_qos_constraints_allocate(struct device *dev) in dev_pm_qos_constraints_allocate() argument
230 spin_lock_irq(&dev->power.lock); in dev_pm_qos_constraints_allocate()
231 dev->power.qos = qos; in dev_pm_qos_constraints_allocate()
232 spin_unlock_irq(&dev->power.lock); in dev_pm_qos_constraints_allocate()
237 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
238 static void __dev_pm_qos_hide_flags(struct device *dev);
246 void dev_pm_qos_constraints_destroy(struct device *dev) in dev_pm_qos_constraints_destroy() argument
259 pm_qos_sysfs_remove_resume_latency(dev); in dev_pm_qos_constraints_destroy()
260 pm_qos_sysfs_remove_flags(dev); in dev_pm_qos_constraints_destroy()
264 __dev_pm_qos_hide_latency_limit(dev); in dev_pm_qos_constraints_destroy()
265 __dev_pm_qos_hide_flags(dev); in dev_pm_qos_constraints_destroy()
267 qos = dev->power.qos; in dev_pm_qos_constraints_destroy()
308 spin_lock_irq(&dev->power.lock); in dev_pm_qos_constraints_destroy()
309 dev->power.qos = ERR_PTR(-ENODEV); in dev_pm_qos_constraints_destroy()
310 spin_unlock_irq(&dev->power.lock); in dev_pm_qos_constraints_destroy()
321 static bool dev_pm_qos_invalid_req_type(struct device *dev, in dev_pm_qos_invalid_req_type() argument
325 !dev->power.set_latency_tolerance; in dev_pm_qos_invalid_req_type()
328 static int __dev_pm_qos_add_request(struct device *dev, in __dev_pm_qos_add_request() argument
334 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type)) in __dev_pm_qos_add_request()
341 if (IS_ERR(dev->power.qos)) in __dev_pm_qos_add_request()
343 else if (!dev->power.qos) in __dev_pm_qos_add_request()
344 ret = dev_pm_qos_constraints_allocate(dev); in __dev_pm_qos_add_request()
346 trace_dev_pm_qos_add_request(dev_name(dev), type, value); in __dev_pm_qos_add_request()
350 req->dev = dev; in __dev_pm_qos_add_request()
353 ret = freq_qos_add_request(&dev->power.qos->freq, in __dev_pm_qos_add_request()
357 ret = freq_qos_add_request(&dev->power.qos->freq, in __dev_pm_qos_add_request()
388 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, in dev_pm_qos_add_request() argument
394 ret = __dev_pm_qos_add_request(dev, req, type, value); in dev_pm_qos_add_request()
418 if (IS_ERR_OR_NULL(req->dev->power.qos)) in __dev_pm_qos_update_request()
437 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type, in __dev_pm_qos_update_request()
485 if (IS_ERR_OR_NULL(req->dev->power.qos)) in __dev_pm_qos_remove_request()
488 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type, in __dev_pm_qos_remove_request()
535 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier, in dev_pm_qos_add_notifier() argument
542 if (IS_ERR(dev->power.qos)) in dev_pm_qos_add_notifier()
544 else if (!dev->power.qos) in dev_pm_qos_add_notifier()
545 ret = dev_pm_qos_constraints_allocate(dev); in dev_pm_qos_add_notifier()
552 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, in dev_pm_qos_add_notifier()
556 ret = freq_qos_add_notifier(&dev->power.qos->freq, in dev_pm_qos_add_notifier()
560 ret = freq_qos_add_notifier(&dev->power.qos->freq, in dev_pm_qos_add_notifier()
585 int dev_pm_qos_remove_notifier(struct device *dev, in dev_pm_qos_remove_notifier() argument
594 if (IS_ERR_OR_NULL(dev->power.qos)) in dev_pm_qos_remove_notifier()
599 ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, in dev_pm_qos_remove_notifier()
603 ret = freq_qos_remove_notifier(&dev->power.qos->freq, in dev_pm_qos_remove_notifier()
607 ret = freq_qos_remove_notifier(&dev->power.qos->freq, in dev_pm_qos_remove_notifier()
628 int dev_pm_qos_add_ancestor_request(struct device *dev, in dev_pm_qos_add_ancestor_request() argument
632 struct device *ancestor = dev->parent; in dev_pm_qos_add_ancestor_request()
653 req->dev = NULL; in dev_pm_qos_add_ancestor_request()
659 static void __dev_pm_qos_drop_user_request(struct device *dev, in __dev_pm_qos_drop_user_request() argument
666 req = dev->power.qos->resume_latency_req; in __dev_pm_qos_drop_user_request()
667 dev->power.qos->resume_latency_req = NULL; in __dev_pm_qos_drop_user_request()
670 req = dev->power.qos->latency_tolerance_req; in __dev_pm_qos_drop_user_request()
671 dev->power.qos->latency_tolerance_req = NULL; in __dev_pm_qos_drop_user_request()
674 req = dev->power.qos->flags_req; in __dev_pm_qos_drop_user_request()
675 dev->power.qos->flags_req = NULL; in __dev_pm_qos_drop_user_request()
685 static void dev_pm_qos_drop_user_request(struct device *dev, in dev_pm_qos_drop_user_request() argument
689 __dev_pm_qos_drop_user_request(dev, type); in dev_pm_qos_drop_user_request()
698 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) in dev_pm_qos_expose_latency_limit() argument
703 if (!device_is_registered(dev) || value < 0) in dev_pm_qos_expose_latency_limit()
710 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); in dev_pm_qos_expose_latency_limit()
720 if (IS_ERR_OR_NULL(dev->power.qos)) in dev_pm_qos_expose_latency_limit()
722 else if (dev->power.qos->resume_latency_req) in dev_pm_qos_expose_latency_limit()
731 dev->power.qos->resume_latency_req = req; in dev_pm_qos_expose_latency_limit()
735 ret = pm_qos_sysfs_add_resume_latency(dev); in dev_pm_qos_expose_latency_limit()
737 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); in dev_pm_qos_expose_latency_limit()
745 static void __dev_pm_qos_hide_latency_limit(struct device *dev) in __dev_pm_qos_hide_latency_limit() argument
747 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) in __dev_pm_qos_hide_latency_limit()
748 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); in __dev_pm_qos_hide_latency_limit()
755 void dev_pm_qos_hide_latency_limit(struct device *dev) in dev_pm_qos_hide_latency_limit() argument
759 pm_qos_sysfs_remove_resume_latency(dev); in dev_pm_qos_hide_latency_limit()
762 __dev_pm_qos_hide_latency_limit(dev); in dev_pm_qos_hide_latency_limit()
774 int dev_pm_qos_expose_flags(struct device *dev, s32 val) in dev_pm_qos_expose_flags() argument
779 if (!device_is_registered(dev)) in dev_pm_qos_expose_flags()
786 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); in dev_pm_qos_expose_flags()
792 pm_runtime_get_sync(dev); in dev_pm_qos_expose_flags()
797 if (IS_ERR_OR_NULL(dev->power.qos)) in dev_pm_qos_expose_flags()
799 else if (dev->power.qos->flags_req) in dev_pm_qos_expose_flags()
808 dev->power.qos->flags_req = req; in dev_pm_qos_expose_flags()
812 ret = pm_qos_sysfs_add_flags(dev); in dev_pm_qos_expose_flags()
814 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); in dev_pm_qos_expose_flags()
818 pm_runtime_put(dev); in dev_pm_qos_expose_flags()
823 static void __dev_pm_qos_hide_flags(struct device *dev) in __dev_pm_qos_hide_flags() argument
825 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) in __dev_pm_qos_hide_flags()
826 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); in __dev_pm_qos_hide_flags()
833 void dev_pm_qos_hide_flags(struct device *dev) in dev_pm_qos_hide_flags() argument
835 pm_runtime_get_sync(dev); in dev_pm_qos_hide_flags()
838 pm_qos_sysfs_remove_flags(dev); in dev_pm_qos_hide_flags()
841 __dev_pm_qos_hide_flags(dev); in dev_pm_qos_hide_flags()
845 pm_runtime_put(dev); in dev_pm_qos_hide_flags()
855 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) in dev_pm_qos_update_flags() argument
860 pm_runtime_get_sync(dev); in dev_pm_qos_update_flags()
863 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) { in dev_pm_qos_update_flags()
868 value = dev_pm_qos_requested_flags(dev); in dev_pm_qos_update_flags()
874 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); in dev_pm_qos_update_flags()
878 pm_runtime_put(dev); in dev_pm_qos_update_flags()
886 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) in dev_pm_qos_get_user_latency_tolerance() argument
891 ret = IS_ERR_OR_NULL(dev->power.qos) in dev_pm_qos_get_user_latency_tolerance()
892 || !dev->power.qos->latency_tolerance_req ? in dev_pm_qos_get_user_latency_tolerance()
894 dev->power.qos->latency_tolerance_req->data.pnode.prio; in dev_pm_qos_get_user_latency_tolerance()
904 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) in dev_pm_qos_update_user_latency_tolerance() argument
910 if (IS_ERR_OR_NULL(dev->power.qos) in dev_pm_qos_update_user_latency_tolerance()
911 || !dev->power.qos->latency_tolerance_req) { in dev_pm_qos_update_user_latency_tolerance()
926 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); in dev_pm_qos_update_user_latency_tolerance()
931 dev->power.qos->latency_tolerance_req = req; in dev_pm_qos_update_user_latency_tolerance()
934 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); in dev_pm_qos_update_user_latency_tolerance()
937 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); in dev_pm_qos_update_user_latency_tolerance()
951 int dev_pm_qos_expose_latency_tolerance(struct device *dev) in dev_pm_qos_expose_latency_tolerance() argument
955 if (!dev->power.set_latency_tolerance) in dev_pm_qos_expose_latency_tolerance()
959 ret = pm_qos_sysfs_add_latency_tolerance(dev); in dev_pm_qos_expose_latency_tolerance()
970 void dev_pm_qos_hide_latency_tolerance(struct device *dev) in dev_pm_qos_hide_latency_tolerance() argument
973 pm_qos_sysfs_remove_latency_tolerance(dev); in dev_pm_qos_hide_latency_tolerance()
977 pm_runtime_get_sync(dev); in dev_pm_qos_hide_latency_tolerance()
978 dev_pm_qos_update_user_latency_tolerance(dev, in dev_pm_qos_hide_latency_tolerance()
980 pm_runtime_put(dev); in dev_pm_qos_hide_latency_tolerance()