xref: /openbmc/linux/drivers/base/power/qos.c (revision 41ba8bd0)
191ff4cb8SJean Pihet /*
291ff4cb8SJean Pihet  * Devices PM QoS constraints management
391ff4cb8SJean Pihet  *
491ff4cb8SJean Pihet  * Copyright (C) 2011 Texas Instruments, Inc.
591ff4cb8SJean Pihet  *
691ff4cb8SJean Pihet  * This program is free software; you can redistribute it and/or modify
791ff4cb8SJean Pihet  * it under the terms of the GNU General Public License version 2 as
891ff4cb8SJean Pihet  * published by the Free Software Foundation.
991ff4cb8SJean Pihet  *
1091ff4cb8SJean Pihet  *
1191ff4cb8SJean Pihet  * This module exposes the interface to kernel space for specifying
1291ff4cb8SJean Pihet  * per-device PM QoS dependencies. It provides infrastructure for registration
1391ff4cb8SJean Pihet  * of:
1491ff4cb8SJean Pihet  *
1591ff4cb8SJean Pihet  * Dependents on a QoS value : register requests
1691ff4cb8SJean Pihet  * Watchers of QoS value : get notified when target QoS value changes
1791ff4cb8SJean Pihet  *
1891ff4cb8SJean Pihet  * This QoS design is best effort based. Dependents register their QoS needs.
1991ff4cb8SJean Pihet  * Watchers register to keep track of the current QoS needs of the system.
20d08d1b27SViresh Kumar  * Watchers can register a per-device notification callback using the
21d08d1b27SViresh Kumar  * dev_pm_qos_*_notifier API. The notification chain data is stored in the
22d08d1b27SViresh Kumar  * per-device constraint data struct.
2391ff4cb8SJean Pihet  *
2491ff4cb8SJean Pihet  * Note about the per-device constraint data struct allocation:
2591ff4cb8SJean Pihet  * . The per-device constraints data struct ptr is tored into the device
2691ff4cb8SJean Pihet  *    dev_pm_info.
2791ff4cb8SJean Pihet  * . To minimize the data usage by the per-device constraints, the data struct
2891ff4cb8SJean Pihet  *   is only allocated at the first call to dev_pm_qos_add_request.
2991ff4cb8SJean Pihet  * . The data is later free'd when the device is removed from the system.
3091ff4cb8SJean Pihet  *  . A global mutex protects the constraints users from the data being
3191ff4cb8SJean Pihet  *     allocated and free'd.
3291ff4cb8SJean Pihet  */
3391ff4cb8SJean Pihet 
3491ff4cb8SJean Pihet #include <linux/pm_qos.h>
3591ff4cb8SJean Pihet #include <linux/spinlock.h>
3691ff4cb8SJean Pihet #include <linux/slab.h>
3791ff4cb8SJean Pihet #include <linux/device.h>
3891ff4cb8SJean Pihet #include <linux/mutex.h>
391b6bc32fSPaul Gortmaker #include <linux/export.h>
40e39473d0SRafael J. Wysocki #include <linux/pm_runtime.h>
4137530f2bSRafael J. Wysocki #include <linux/err.h>
4296d9d0b5SSahara #include <trace/events/power.h>
4391ff4cb8SJean Pihet 
4485dc0b8aSRafael J. Wysocki #include "power.h"
4591ff4cb8SJean Pihet 
4691ff4cb8SJean Pihet static DEFINE_MUTEX(dev_pm_qos_mtx);
470f703069SRafael J. Wysocki static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
481a9a9152SRafael J. Wysocki 
491a9a9152SRafael J. Wysocki /**
50ae0fb4b7SRafael J. Wysocki  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
51ae0fb4b7SRafael J. Wysocki  * @dev: Device to check the PM QoS flags for.
52ae0fb4b7SRafael J. Wysocki  * @mask: Flags to check against.
53ae0fb4b7SRafael J. Wysocki  *
54ae0fb4b7SRafael J. Wysocki  * This routine must be called with dev->power.lock held.
55ae0fb4b7SRafael J. Wysocki  */
56ae0fb4b7SRafael J. Wysocki enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
57ae0fb4b7SRafael J. Wysocki {
58ae0fb4b7SRafael J. Wysocki 	struct dev_pm_qos *qos = dev->power.qos;
59ae0fb4b7SRafael J. Wysocki 	struct pm_qos_flags *pqf;
60ae0fb4b7SRafael J. Wysocki 	s32 val;
61ae0fb4b7SRafael J. Wysocki 
62f90b8ad8SKrzysztof Kozlowski 	lockdep_assert_held(&dev->power.lock);
63f90b8ad8SKrzysztof Kozlowski 
6437530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(qos))
65ae0fb4b7SRafael J. Wysocki 		return PM_QOS_FLAGS_UNDEFINED;
66ae0fb4b7SRafael J. Wysocki 
67ae0fb4b7SRafael J. Wysocki 	pqf = &qos->flags;
68ae0fb4b7SRafael J. Wysocki 	if (list_empty(&pqf->list))
69ae0fb4b7SRafael J. Wysocki 		return PM_QOS_FLAGS_UNDEFINED;
70ae0fb4b7SRafael J. Wysocki 
71ae0fb4b7SRafael J. Wysocki 	val = pqf->effective_flags & mask;
72ae0fb4b7SRafael J. Wysocki 	if (val)
73ae0fb4b7SRafael J. Wysocki 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
74ae0fb4b7SRafael J. Wysocki 
75ae0fb4b7SRafael J. Wysocki 	return PM_QOS_FLAGS_NONE;
76ae0fb4b7SRafael J. Wysocki }
77ae0fb4b7SRafael J. Wysocki 
78ae0fb4b7SRafael J. Wysocki /**
79ae0fb4b7SRafael J. Wysocki  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
80ae0fb4b7SRafael J. Wysocki  * @dev: Device to check the PM QoS flags for.
81ae0fb4b7SRafael J. Wysocki  * @mask: Flags to check against.
82ae0fb4b7SRafael J. Wysocki  */
83ae0fb4b7SRafael J. Wysocki enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
84ae0fb4b7SRafael J. Wysocki {
85ae0fb4b7SRafael J. Wysocki 	unsigned long irqflags;
86ae0fb4b7SRafael J. Wysocki 	enum pm_qos_flags_status ret;
87ae0fb4b7SRafael J. Wysocki 
88ae0fb4b7SRafael J. Wysocki 	spin_lock_irqsave(&dev->power.lock, irqflags);
89ae0fb4b7SRafael J. Wysocki 	ret = __dev_pm_qos_flags(dev, mask);
90ae0fb4b7SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
91ae0fb4b7SRafael J. Wysocki 
92ae0fb4b7SRafael J. Wysocki 	return ret;
93ae0fb4b7SRafael J. Wysocki }
946802771bSLan Tianyu EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
95ae0fb4b7SRafael J. Wysocki 
96ae0fb4b7SRafael J. Wysocki /**
9700dc9ad1SRafael J. Wysocki  * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
9800dc9ad1SRafael J. Wysocki  * @dev: Device to get the PM QoS constraint value for.
9900dc9ad1SRafael J. Wysocki  *
10000dc9ad1SRafael J. Wysocki  * This routine must be called with dev->power.lock held.
10100dc9ad1SRafael J. Wysocki  */
10200dc9ad1SRafael J. Wysocki s32 __dev_pm_qos_read_value(struct device *dev)
10300dc9ad1SRafael J. Wysocki {
104f90b8ad8SKrzysztof Kozlowski 	lockdep_assert_held(&dev->power.lock);
105f90b8ad8SKrzysztof Kozlowski 
1066dbf5ceaSRafael J. Wysocki 	return dev_pm_qos_raw_read_value(dev);
10700dc9ad1SRafael J. Wysocki }
10800dc9ad1SRafael J. Wysocki 
10900dc9ad1SRafael J. Wysocki /**
11000dc9ad1SRafael J. Wysocki  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
1111a9a9152SRafael J. Wysocki  * @dev: Device to get the PM QoS constraint value for.
1121a9a9152SRafael J. Wysocki  */
1131a9a9152SRafael J. Wysocki s32 dev_pm_qos_read_value(struct device *dev)
1141a9a9152SRafael J. Wysocki {
1151a9a9152SRafael J. Wysocki 	unsigned long flags;
11600dc9ad1SRafael J. Wysocki 	s32 ret;
1171a9a9152SRafael J. Wysocki 
1181a9a9152SRafael J. Wysocki 	spin_lock_irqsave(&dev->power.lock, flags);
11900dc9ad1SRafael J. Wysocki 	ret = __dev_pm_qos_read_value(dev);
1201a9a9152SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, flags);
1211a9a9152SRafael J. Wysocki 
1221a9a9152SRafael J. Wysocki 	return ret;
1231a9a9152SRafael J. Wysocki }
1241a9a9152SRafael J. Wysocki 
125ae0fb4b7SRafael J. Wysocki /**
126ae0fb4b7SRafael J. Wysocki  * apply_constraint - Add/modify/remove device PM QoS request.
127ae0fb4b7SRafael J. Wysocki  * @req: Constraint request to apply
128ae0fb4b7SRafael J. Wysocki  * @action: Action to perform (add/update/remove).
129ae0fb4b7SRafael J. Wysocki  * @value: Value to assign to the QoS request.
130b66213cdSJean Pihet  *
131b66213cdSJean Pihet  * Internal function to update the constraints list using the PM QoS core
132d08d1b27SViresh Kumar  * code and if needed call the per-device callbacks.
133b66213cdSJean Pihet  */
134b66213cdSJean Pihet static int apply_constraint(struct dev_pm_qos_request *req,
135ae0fb4b7SRafael J. Wysocki 			    enum pm_qos_req_action action, s32 value)
136b66213cdSJean Pihet {
137ae0fb4b7SRafael J. Wysocki 	struct dev_pm_qos *qos = req->dev->power.qos;
138ae0fb4b7SRafael J. Wysocki 	int ret;
139b66213cdSJean Pihet 
140ae0fb4b7SRafael J. Wysocki 	switch(req->type) {
141b02f6695SRafael J. Wysocki 	case DEV_PM_QOS_RESUME_LATENCY:
142b02f6695SRafael J. Wysocki 		ret = pm_qos_update_target(&qos->resume_latency,
143b02f6695SRafael J. Wysocki 					   &req->data.pnode, action, value);
144ae0fb4b7SRafael J. Wysocki 		break;
1452d984ad1SRafael J. Wysocki 	case DEV_PM_QOS_LATENCY_TOLERANCE:
1462d984ad1SRafael J. Wysocki 		ret = pm_qos_update_target(&qos->latency_tolerance,
1472d984ad1SRafael J. Wysocki 					   &req->data.pnode, action, value);
1482d984ad1SRafael J. Wysocki 		if (ret) {
1492d984ad1SRafael J. Wysocki 			value = pm_qos_read_value(&qos->latency_tolerance);
1502d984ad1SRafael J. Wysocki 			req->dev->power.set_latency_tolerance(req->dev, value);
1512d984ad1SRafael J. Wysocki 		}
1522d984ad1SRafael J. Wysocki 		break;
153ae0fb4b7SRafael J. Wysocki 	case DEV_PM_QOS_FLAGS:
154ae0fb4b7SRafael J. Wysocki 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
155ae0fb4b7SRafael J. Wysocki 					  action, value);
156ae0fb4b7SRafael J. Wysocki 		break;
157ae0fb4b7SRafael J. Wysocki 	default:
158ae0fb4b7SRafael J. Wysocki 		ret = -EINVAL;
159ae0fb4b7SRafael J. Wysocki 	}
160b66213cdSJean Pihet 
161b66213cdSJean Pihet 	return ret;
162b66213cdSJean Pihet }
16391ff4cb8SJean Pihet 
16491ff4cb8SJean Pihet /*
16591ff4cb8SJean Pihet  * dev_pm_qos_constraints_allocate
16691ff4cb8SJean Pihet  * @dev: device to allocate data for
16791ff4cb8SJean Pihet  *
16891ff4cb8SJean Pihet  * Called at the first call to add_request, for constraint data allocation
16991ff4cb8SJean Pihet  * Must be called with the dev_pm_qos_mtx mutex held
17091ff4cb8SJean Pihet  */
17191ff4cb8SJean Pihet static int dev_pm_qos_constraints_allocate(struct device *dev)
17291ff4cb8SJean Pihet {
1735f986c59SRafael J. Wysocki 	struct dev_pm_qos *qos;
17491ff4cb8SJean Pihet 	struct pm_qos_constraints *c;
17591ff4cb8SJean Pihet 	struct blocking_notifier_head *n;
17691ff4cb8SJean Pihet 
1775f986c59SRafael J. Wysocki 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
1785f986c59SRafael J. Wysocki 	if (!qos)
17991ff4cb8SJean Pihet 		return -ENOMEM;
18091ff4cb8SJean Pihet 
18191ff4cb8SJean Pihet 	n = kzalloc(sizeof(*n), GFP_KERNEL);
18291ff4cb8SJean Pihet 	if (!n) {
1835f986c59SRafael J. Wysocki 		kfree(qos);
18491ff4cb8SJean Pihet 		return -ENOMEM;
18591ff4cb8SJean Pihet 	}
18691ff4cb8SJean Pihet 	BLOCKING_INIT_NOTIFIER_HEAD(n);
18791ff4cb8SJean Pihet 
188b02f6695SRafael J. Wysocki 	c = &qos->resume_latency;
1891a9a9152SRafael J. Wysocki 	plist_head_init(&c->list);
190b02f6695SRafael J. Wysocki 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
191b02f6695SRafael J. Wysocki 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
192327adaedSRafael J. Wysocki 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
1931a9a9152SRafael J. Wysocki 	c->type = PM_QOS_MIN;
1941a9a9152SRafael J. Wysocki 	c->notifiers = n;
1951a9a9152SRafael J. Wysocki 
1962d984ad1SRafael J. Wysocki 	c = &qos->latency_tolerance;
1972d984ad1SRafael J. Wysocki 	plist_head_init(&c->list);
1982d984ad1SRafael J. Wysocki 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
1992d984ad1SRafael J. Wysocki 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
2002d984ad1SRafael J. Wysocki 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
2012d984ad1SRafael J. Wysocki 	c->type = PM_QOS_MIN;
2022d984ad1SRafael J. Wysocki 
203ae0fb4b7SRafael J. Wysocki 	INIT_LIST_HEAD(&qos->flags.list);
204ae0fb4b7SRafael J. Wysocki 
2051a9a9152SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
2065f986c59SRafael J. Wysocki 	dev->power.qos = qos;
2071a9a9152SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
20891ff4cb8SJean Pihet 
20991ff4cb8SJean Pihet 	return 0;
21091ff4cb8SJean Pihet }
21191ff4cb8SJean Pihet 
21237530f2bSRafael J. Wysocki static void __dev_pm_qos_hide_latency_limit(struct device *dev);
21337530f2bSRafael J. Wysocki static void __dev_pm_qos_hide_flags(struct device *dev);
21491ff4cb8SJean Pihet 
21591ff4cb8SJean Pihet /**
21691ff4cb8SJean Pihet  * dev_pm_qos_constraints_destroy
21791ff4cb8SJean Pihet  * @dev: target device
21891ff4cb8SJean Pihet  *
2191a9a9152SRafael J. Wysocki  * Called from the device PM subsystem on device removal under device_pm_lock().
22091ff4cb8SJean Pihet  */
22191ff4cb8SJean Pihet void dev_pm_qos_constraints_destroy(struct device *dev)
22291ff4cb8SJean Pihet {
2235f986c59SRafael J. Wysocki 	struct dev_pm_qos *qos;
22491ff4cb8SJean Pihet 	struct dev_pm_qos_request *req, *tmp;
2251a9a9152SRafael J. Wysocki 	struct pm_qos_constraints *c;
22635546bd4SRafael J. Wysocki 	struct pm_qos_flags *f;
22791ff4cb8SJean Pihet 
2280f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
22937530f2bSRafael J. Wysocki 
23085dc0b8aSRafael J. Wysocki 	/*
23135546bd4SRafael J. Wysocki 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
23235546bd4SRafael J. Wysocki 	 * exposed to user space, they have to be hidden at this point.
23385dc0b8aSRafael J. Wysocki 	 */
234b02f6695SRafael J. Wysocki 	pm_qos_sysfs_remove_resume_latency(dev);
2350f703069SRafael J. Wysocki 	pm_qos_sysfs_remove_flags(dev);
2360f703069SRafael J. Wysocki 
2370f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
2380f703069SRafael J. Wysocki 
23937530f2bSRafael J. Wysocki 	__dev_pm_qos_hide_latency_limit(dev);
24037530f2bSRafael J. Wysocki 	__dev_pm_qos_hide_flags(dev);
24185dc0b8aSRafael J. Wysocki 
2425f986c59SRafael J. Wysocki 	qos = dev->power.qos;
2435f986c59SRafael J. Wysocki 	if (!qos)
2441a9a9152SRafael J. Wysocki 		goto out;
2451a9a9152SRafael J. Wysocki 
24635546bd4SRafael J. Wysocki 	/* Flush the constraints lists for the device. */
247b02f6695SRafael J. Wysocki 	c = &qos->resume_latency;
248021c870bSRafael J. Wysocki 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
24991ff4cb8SJean Pihet 		/*
250b66213cdSJean Pihet 		 * Update constraints list and call the notification
25191ff4cb8SJean Pihet 		 * callbacks if needed
25291ff4cb8SJean Pihet 		 */
2531a9a9152SRafael J. Wysocki 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
25491ff4cb8SJean Pihet 		memset(req, 0, sizeof(*req));
25591ff4cb8SJean Pihet 	}
2562d984ad1SRafael J. Wysocki 	c = &qos->latency_tolerance;
2572d984ad1SRafael J. Wysocki 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
2582d984ad1SRafael J. Wysocki 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
2592d984ad1SRafael J. Wysocki 		memset(req, 0, sizeof(*req));
2602d984ad1SRafael J. Wysocki 	}
26135546bd4SRafael J. Wysocki 	f = &qos->flags;
26235546bd4SRafael J. Wysocki 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
26335546bd4SRafael J. Wysocki 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
26435546bd4SRafael J. Wysocki 		memset(req, 0, sizeof(*req));
26535546bd4SRafael J. Wysocki 	}
26691ff4cb8SJean Pihet 
2671a9a9152SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
26837530f2bSRafael J. Wysocki 	dev->power.qos = ERR_PTR(-ENODEV);
2691a9a9152SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
27091ff4cb8SJean Pihet 
271e84b4a84SJohn Keeping 	kfree(qos->resume_latency.notifiers);
2729eaee2cdSLan,Tianyu 	kfree(qos);
2731a9a9152SRafael J. Wysocki 
2741a9a9152SRafael J. Wysocki  out:
27591ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
2760f703069SRafael J. Wysocki 
2770f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
27891ff4cb8SJean Pihet }
27991ff4cb8SJean Pihet 
28041ba8bd0SJan H. Schönherr static bool dev_pm_qos_invalid_req_type(struct device *dev,
28141ba8bd0SJan H. Schönherr 					enum dev_pm_qos_req_type type)
2822d984ad1SRafael J. Wysocki {
28341ba8bd0SJan H. Schönherr 	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
28441ba8bd0SJan H. Schönherr 	       !dev->power.set_latency_tolerance;
2852d984ad1SRafael J. Wysocki }
2862d984ad1SRafael J. Wysocki 
2872d984ad1SRafael J. Wysocki static int __dev_pm_qos_add_request(struct device *dev,
2882d984ad1SRafael J. Wysocki 				    struct dev_pm_qos_request *req,
2892d984ad1SRafael J. Wysocki 				    enum dev_pm_qos_req_type type, s32 value)
2902d984ad1SRafael J. Wysocki {
2912d984ad1SRafael J. Wysocki 	int ret = 0;
2922d984ad1SRafael J. Wysocki 
29341ba8bd0SJan H. Schönherr 	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
2942d984ad1SRafael J. Wysocki 		return -EINVAL;
2952d984ad1SRafael J. Wysocki 
2962d984ad1SRafael J. Wysocki 	if (WARN(dev_pm_qos_request_active(req),
2972d984ad1SRafael J. Wysocki 		 "%s() called for already added request\n", __func__))
2982d984ad1SRafael J. Wysocki 		return -EINVAL;
2992d984ad1SRafael J. Wysocki 
3002d984ad1SRafael J. Wysocki 	if (IS_ERR(dev->power.qos))
3012d984ad1SRafael J. Wysocki 		ret = -ENODEV;
3022d984ad1SRafael J. Wysocki 	else if (!dev->power.qos)
3032d984ad1SRafael J. Wysocki 		ret = dev_pm_qos_constraints_allocate(dev);
3042d984ad1SRafael J. Wysocki 
3052d984ad1SRafael J. Wysocki 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
3062d984ad1SRafael J. Wysocki 	if (!ret) {
3072d984ad1SRafael J. Wysocki 		req->dev = dev;
3082d984ad1SRafael J. Wysocki 		req->type = type;
3092d984ad1SRafael J. Wysocki 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
3102d984ad1SRafael J. Wysocki 	}
3112d984ad1SRafael J. Wysocki 	return ret;
3122d984ad1SRafael J. Wysocki }
3132d984ad1SRafael J. Wysocki 
31491ff4cb8SJean Pihet /**
31591ff4cb8SJean Pihet  * dev_pm_qos_add_request - inserts new qos request into the list
31691ff4cb8SJean Pihet  * @dev: target device for the constraint
31791ff4cb8SJean Pihet  * @req: pointer to a preallocated handle
318ae0fb4b7SRafael J. Wysocki  * @type: type of the request
31991ff4cb8SJean Pihet  * @value: defines the qos request
32091ff4cb8SJean Pihet  *
32191ff4cb8SJean Pihet  * This function inserts a new entry in the device constraints list of
32291ff4cb8SJean Pihet  * requested qos performance characteristics. It recomputes the aggregate
32391ff4cb8SJean Pihet  * QoS expectations of parameters and initializes the dev_pm_qos_request
32491ff4cb8SJean Pihet  * handle.  Caller needs to save this handle for later use in updates and
32591ff4cb8SJean Pihet  * removal.
32691ff4cb8SJean Pihet  *
32791ff4cb8SJean Pihet  * Returns 1 if the aggregated constraint value has changed,
32891ff4cb8SJean Pihet  * 0 if the aggregated constraint value has not changed,
3291a9a9152SRafael J. Wysocki  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
3301a9a9152SRafael J. Wysocki  * to allocate for data structures, -ENODEV if the device has just been removed
3311a9a9152SRafael J. Wysocki  * from the system.
332436ede89SRafael J. Wysocki  *
333436ede89SRafael J. Wysocki  * Callers should ensure that the target device is not RPM_SUSPENDED before
334436ede89SRafael J. Wysocki  * using this function for requests of type DEV_PM_QOS_FLAGS.
33591ff4cb8SJean Pihet  */
33691ff4cb8SJean Pihet int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
337ae0fb4b7SRafael J. Wysocki 			   enum dev_pm_qos_req_type type, s32 value)
33891ff4cb8SJean Pihet {
3392d984ad1SRafael J. Wysocki 	int ret;
34091ff4cb8SJean Pihet 
3411a9a9152SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
3422d984ad1SRafael J. Wysocki 	ret = __dev_pm_qos_add_request(dev, req, type, value);
34391ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
34491ff4cb8SJean Pihet 	return ret;
34591ff4cb8SJean Pihet }
34691ff4cb8SJean Pihet EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
34791ff4cb8SJean Pihet 
34891ff4cb8SJean Pihet /**
349e39473d0SRafael J. Wysocki  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
350e39473d0SRafael J. Wysocki  * @req : PM QoS request to modify.
351e39473d0SRafael J. Wysocki  * @new_value: New value to request.
352e39473d0SRafael J. Wysocki  */
353e39473d0SRafael J. Wysocki static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
354e39473d0SRafael J. Wysocki 				       s32 new_value)
355e39473d0SRafael J. Wysocki {
356e39473d0SRafael J. Wysocki 	s32 curr_value;
357e39473d0SRafael J. Wysocki 	int ret = 0;
358e39473d0SRafael J. Wysocki 
359b81ea1b5SRafael J. Wysocki 	if (!req) /*guard against callers passing in null */
360b81ea1b5SRafael J. Wysocki 		return -EINVAL;
361b81ea1b5SRafael J. Wysocki 
362b81ea1b5SRafael J. Wysocki 	if (WARN(!dev_pm_qos_request_active(req),
363b81ea1b5SRafael J. Wysocki 		 "%s() called for unknown object\n", __func__))
364b81ea1b5SRafael J. Wysocki 		return -EINVAL;
365b81ea1b5SRafael J. Wysocki 
36637530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(req->dev->power.qos))
367e39473d0SRafael J. Wysocki 		return -ENODEV;
368e39473d0SRafael J. Wysocki 
369e39473d0SRafael J. Wysocki 	switch(req->type) {
370b02f6695SRafael J. Wysocki 	case DEV_PM_QOS_RESUME_LATENCY:
3712d984ad1SRafael J. Wysocki 	case DEV_PM_QOS_LATENCY_TOLERANCE:
372e39473d0SRafael J. Wysocki 		curr_value = req->data.pnode.prio;
373e39473d0SRafael J. Wysocki 		break;
374e39473d0SRafael J. Wysocki 	case DEV_PM_QOS_FLAGS:
375e39473d0SRafael J. Wysocki 		curr_value = req->data.flr.flags;
376e39473d0SRafael J. Wysocki 		break;
377e39473d0SRafael J. Wysocki 	default:
378e39473d0SRafael J. Wysocki 		return -EINVAL;
379e39473d0SRafael J. Wysocki 	}
380e39473d0SRafael J. Wysocki 
38196d9d0b5SSahara 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
38296d9d0b5SSahara 					new_value);
383e39473d0SRafael J. Wysocki 	if (curr_value != new_value)
384e39473d0SRafael J. Wysocki 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
385e39473d0SRafael J. Wysocki 
386e39473d0SRafael J. Wysocki 	return ret;
387e39473d0SRafael J. Wysocki }
388e39473d0SRafael J. Wysocki 
389e39473d0SRafael J. Wysocki /**
39091ff4cb8SJean Pihet  * dev_pm_qos_update_request - modifies an existing qos request
39191ff4cb8SJean Pihet  * @req : handle to list element holding a dev_pm_qos request to use
39291ff4cb8SJean Pihet  * @new_value: defines the qos request
39391ff4cb8SJean Pihet  *
39491ff4cb8SJean Pihet  * Updates an existing dev PM qos request along with updating the
39591ff4cb8SJean Pihet  * target value.
39691ff4cb8SJean Pihet  *
39791ff4cb8SJean Pihet  * Attempts are made to make this code callable on hot code paths.
39891ff4cb8SJean Pihet  *
39991ff4cb8SJean Pihet  * Returns 1 if the aggregated constraint value has changed,
40091ff4cb8SJean Pihet  * 0 if the aggregated constraint value has not changed,
40191ff4cb8SJean Pihet  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
40291ff4cb8SJean Pihet  * removed from the system
403436ede89SRafael J. Wysocki  *
404436ede89SRafael J. Wysocki  * Callers should ensure that the target device is not RPM_SUSPENDED before
405436ede89SRafael J. Wysocki  * using this function for requests of type DEV_PM_QOS_FLAGS.
40691ff4cb8SJean Pihet  */
407e39473d0SRafael J. Wysocki int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
40891ff4cb8SJean Pihet {
409e39473d0SRafael J. Wysocki 	int ret;
41091ff4cb8SJean Pihet 
411b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
412b81ea1b5SRafael J. Wysocki 	ret = __dev_pm_qos_update_request(req, new_value);
413b81ea1b5SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
414b81ea1b5SRafael J. Wysocki 	return ret;
415b81ea1b5SRafael J. Wysocki }
416b81ea1b5SRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
417b81ea1b5SRafael J. Wysocki 
418b81ea1b5SRafael J. Wysocki static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
419b81ea1b5SRafael J. Wysocki {
42037530f2bSRafael J. Wysocki 	int ret;
421b81ea1b5SRafael J. Wysocki 
42291ff4cb8SJean Pihet 	if (!req) /*guard against callers passing in null */
42391ff4cb8SJean Pihet 		return -EINVAL;
42491ff4cb8SJean Pihet 
425af4c720eSGuennadi Liakhovetski 	if (WARN(!dev_pm_qos_request_active(req),
426af4c720eSGuennadi Liakhovetski 		 "%s() called for unknown object\n", __func__))
42791ff4cb8SJean Pihet 		return -EINVAL;
42891ff4cb8SJean Pihet 
42937530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(req->dev->power.qos))
43037530f2bSRafael J. Wysocki 		return -ENODEV;
43137530f2bSRafael J. Wysocki 
43296d9d0b5SSahara 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
43396d9d0b5SSahara 					PM_QOS_DEFAULT_VALUE);
43437530f2bSRafael J. Wysocki 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
435b81ea1b5SRafael J. Wysocki 	memset(req, 0, sizeof(*req));
43691ff4cb8SJean Pihet 	return ret;
43791ff4cb8SJean Pihet }
43891ff4cb8SJean Pihet 
43991ff4cb8SJean Pihet /**
44091ff4cb8SJean Pihet  * dev_pm_qos_remove_request - modifies an existing qos request
44191ff4cb8SJean Pihet  * @req: handle to request list element
44291ff4cb8SJean Pihet  *
44391ff4cb8SJean Pihet  * Will remove pm qos request from the list of constraints and
44491ff4cb8SJean Pihet  * recompute the current target value. Call this on slow code paths.
44591ff4cb8SJean Pihet  *
44691ff4cb8SJean Pihet  * Returns 1 if the aggregated constraint value has changed,
44791ff4cb8SJean Pihet  * 0 if the aggregated constraint value has not changed,
44891ff4cb8SJean Pihet  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
44991ff4cb8SJean Pihet  * removed from the system
450436ede89SRafael J. Wysocki  *
451436ede89SRafael J. Wysocki  * Callers should ensure that the target device is not RPM_SUSPENDED before
452436ede89SRafael J. Wysocki  * using this function for requests of type DEV_PM_QOS_FLAGS.
45391ff4cb8SJean Pihet  */
45491ff4cb8SJean Pihet int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
45591ff4cb8SJean Pihet {
456b81ea1b5SRafael J. Wysocki 	int ret;
45791ff4cb8SJean Pihet 
45891ff4cb8SJean Pihet 	mutex_lock(&dev_pm_qos_mtx);
459b81ea1b5SRafael J. Wysocki 	ret = __dev_pm_qos_remove_request(req);
46091ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
46191ff4cb8SJean Pihet 	return ret;
46291ff4cb8SJean Pihet }
46391ff4cb8SJean Pihet EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
46491ff4cb8SJean Pihet 
46591ff4cb8SJean Pihet /**
46691ff4cb8SJean Pihet  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
46791ff4cb8SJean Pihet  * of per-device PM QoS constraints
46891ff4cb8SJean Pihet  *
46991ff4cb8SJean Pihet  * @dev: target device for the constraint
47091ff4cb8SJean Pihet  * @notifier: notifier block managed by caller.
47191ff4cb8SJean Pihet  *
47291ff4cb8SJean Pihet  * Will register the notifier into a notification chain that gets called
47391ff4cb8SJean Pihet  * upon changes to the target value for the device.
47423e0fc5aSRafael J. Wysocki  *
47523e0fc5aSRafael J. Wysocki  * If the device's constraints object doesn't exist when this routine is called,
47623e0fc5aSRafael J. Wysocki  * it will be created (or error code will be returned if that fails).
47791ff4cb8SJean Pihet  */
47891ff4cb8SJean Pihet int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
47991ff4cb8SJean Pihet {
48023e0fc5aSRafael J. Wysocki 	int ret = 0;
48191ff4cb8SJean Pihet 
48291ff4cb8SJean Pihet 	mutex_lock(&dev_pm_qos_mtx);
48391ff4cb8SJean Pihet 
48437530f2bSRafael J. Wysocki 	if (IS_ERR(dev->power.qos))
48537530f2bSRafael J. Wysocki 		ret = -ENODEV;
48637530f2bSRafael J. Wysocki 	else if (!dev->power.qos)
48737530f2bSRafael J. Wysocki 		ret = dev_pm_qos_constraints_allocate(dev);
48823e0fc5aSRafael J. Wysocki 
48923e0fc5aSRafael J. Wysocki 	if (!ret)
490b02f6695SRafael J. Wysocki 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
491b02f6695SRafael J. Wysocki 						       notifier);
49291ff4cb8SJean Pihet 
49391ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
49423e0fc5aSRafael J. Wysocki 	return ret;
49591ff4cb8SJean Pihet }
49691ff4cb8SJean Pihet EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
49791ff4cb8SJean Pihet 
49891ff4cb8SJean Pihet /**
49991ff4cb8SJean Pihet  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
50091ff4cb8SJean Pihet  * of per-device PM QoS constraints
50191ff4cb8SJean Pihet  *
50291ff4cb8SJean Pihet  * @dev: target device for the constraint
50391ff4cb8SJean Pihet  * @notifier: notifier block to be removed.
50491ff4cb8SJean Pihet  *
50591ff4cb8SJean Pihet  * Will remove the notifier from the notification chain that gets called
50691ff4cb8SJean Pihet  * upon changes to the target value.
50791ff4cb8SJean Pihet  */
50891ff4cb8SJean Pihet int dev_pm_qos_remove_notifier(struct device *dev,
50991ff4cb8SJean Pihet 			       struct notifier_block *notifier)
51091ff4cb8SJean Pihet {
51191ff4cb8SJean Pihet 	int retval = 0;
51291ff4cb8SJean Pihet 
51391ff4cb8SJean Pihet 	mutex_lock(&dev_pm_qos_mtx);
51491ff4cb8SJean Pihet 
5151a9a9152SRafael J. Wysocki 	/* Silently return if the constraints object is not present. */
51637530f2bSRafael J. Wysocki 	if (!IS_ERR_OR_NULL(dev->power.qos))
517b02f6695SRafael J. Wysocki 		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
51891ff4cb8SJean Pihet 							    notifier);
51991ff4cb8SJean Pihet 
52091ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
52191ff4cb8SJean Pihet 	return retval;
52291ff4cb8SJean Pihet }
52391ff4cb8SJean Pihet EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
524b66213cdSJean Pihet 
525b66213cdSJean Pihet /**
52640a5f8beSRafael J. Wysocki  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
52740a5f8beSRafael J. Wysocki  * @dev: Device whose ancestor to add the request for.
52840a5f8beSRafael J. Wysocki  * @req: Pointer to the preallocated handle.
52971d821fdSRafael J. Wysocki  * @type: Type of the request.
53040a5f8beSRafael J. Wysocki  * @value: Constraint latency value.
53140a5f8beSRafael J. Wysocki  */
53240a5f8beSRafael J. Wysocki int dev_pm_qos_add_ancestor_request(struct device *dev,
53371d821fdSRafael J. Wysocki 				    struct dev_pm_qos_request *req,
53471d821fdSRafael J. Wysocki 				    enum dev_pm_qos_req_type type, s32 value)
53540a5f8beSRafael J. Wysocki {
53640a5f8beSRafael J. Wysocki 	struct device *ancestor = dev->parent;
5374ce47802SRafael J. Wysocki 	int ret = -ENODEV;
53840a5f8beSRafael J. Wysocki 
53971d821fdSRafael J. Wysocki 	switch (type) {
54071d821fdSRafael J. Wysocki 	case DEV_PM_QOS_RESUME_LATENCY:
54140a5f8beSRafael J. Wysocki 		while (ancestor && !ancestor->power.ignore_children)
54240a5f8beSRafael J. Wysocki 			ancestor = ancestor->parent;
54340a5f8beSRafael J. Wysocki 
54471d821fdSRafael J. Wysocki 		break;
54571d821fdSRafael J. Wysocki 	case DEV_PM_QOS_LATENCY_TOLERANCE:
54671d821fdSRafael J. Wysocki 		while (ancestor && !ancestor->power.set_latency_tolerance)
54771d821fdSRafael J. Wysocki 			ancestor = ancestor->parent;
54871d821fdSRafael J. Wysocki 
54971d821fdSRafael J. Wysocki 		break;
55071d821fdSRafael J. Wysocki 	default:
55171d821fdSRafael J. Wysocki 		ancestor = NULL;
55271d821fdSRafael J. Wysocki 	}
55340a5f8beSRafael J. Wysocki 	if (ancestor)
55471d821fdSRafael J. Wysocki 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
55540a5f8beSRafael J. Wysocki 
5564ce47802SRafael J. Wysocki 	if (ret < 0)
55740a5f8beSRafael J. Wysocki 		req->dev = NULL;
55840a5f8beSRafael J. Wysocki 
5594ce47802SRafael J. Wysocki 	return ret;
56040a5f8beSRafael J. Wysocki }
56140a5f8beSRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
56285dc0b8aSRafael J. Wysocki 
563e39473d0SRafael J. Wysocki static void __dev_pm_qos_drop_user_request(struct device *dev,
564e39473d0SRafael J. Wysocki 					   enum dev_pm_qos_req_type type)
56585dc0b8aSRafael J. Wysocki {
566b81ea1b5SRafael J. Wysocki 	struct dev_pm_qos_request *req = NULL;
567b81ea1b5SRafael J. Wysocki 
568e39473d0SRafael J. Wysocki 	switch(type) {
569b02f6695SRafael J. Wysocki 	case DEV_PM_QOS_RESUME_LATENCY:
570b02f6695SRafael J. Wysocki 		req = dev->power.qos->resume_latency_req;
571b02f6695SRafael J. Wysocki 		dev->power.qos->resume_latency_req = NULL;
572e39473d0SRafael J. Wysocki 		break;
5732d984ad1SRafael J. Wysocki 	case DEV_PM_QOS_LATENCY_TOLERANCE:
5742d984ad1SRafael J. Wysocki 		req = dev->power.qos->latency_tolerance_req;
5752d984ad1SRafael J. Wysocki 		dev->power.qos->latency_tolerance_req = NULL;
5762d984ad1SRafael J. Wysocki 		break;
577e39473d0SRafael J. Wysocki 	case DEV_PM_QOS_FLAGS:
578b81ea1b5SRafael J. Wysocki 		req = dev->power.qos->flags_req;
579e39473d0SRafael J. Wysocki 		dev->power.qos->flags_req = NULL;
580e39473d0SRafael J. Wysocki 		break;
581e39473d0SRafael J. Wysocki 	}
582b81ea1b5SRafael J. Wysocki 	__dev_pm_qos_remove_request(req);
583b81ea1b5SRafael J. Wysocki 	kfree(req);
58485dc0b8aSRafael J. Wysocki }
58585dc0b8aSRafael J. Wysocki 
5860f703069SRafael J. Wysocki static void dev_pm_qos_drop_user_request(struct device *dev,
5870f703069SRafael J. Wysocki 					 enum dev_pm_qos_req_type type)
5880f703069SRafael J. Wysocki {
5890f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
5900f703069SRafael J. Wysocki 	__dev_pm_qos_drop_user_request(dev, type);
5910f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
5920f703069SRafael J. Wysocki }
5930f703069SRafael J. Wysocki 
59485dc0b8aSRafael J. Wysocki /**
59585dc0b8aSRafael J. Wysocki  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
59685dc0b8aSRafael J. Wysocki  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
59785dc0b8aSRafael J. Wysocki  * @value: Initial value of the latency limit.
59885dc0b8aSRafael J. Wysocki  */
59985dc0b8aSRafael J. Wysocki int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
60085dc0b8aSRafael J. Wysocki {
60185dc0b8aSRafael J. Wysocki 	struct dev_pm_qos_request *req;
60285dc0b8aSRafael J. Wysocki 	int ret;
60385dc0b8aSRafael J. Wysocki 
60485dc0b8aSRafael J. Wysocki 	if (!device_is_registered(dev) || value < 0)
60585dc0b8aSRafael J. Wysocki 		return -EINVAL;
60685dc0b8aSRafael J. Wysocki 
60785dc0b8aSRafael J. Wysocki 	req = kzalloc(sizeof(*req), GFP_KERNEL);
60885dc0b8aSRafael J. Wysocki 	if (!req)
60985dc0b8aSRafael J. Wysocki 		return -ENOMEM;
61085dc0b8aSRafael J. Wysocki 
611b02f6695SRafael J. Wysocki 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
612b81ea1b5SRafael J. Wysocki 	if (ret < 0) {
613b81ea1b5SRafael J. Wysocki 		kfree(req);
61485dc0b8aSRafael J. Wysocki 		return ret;
615b81ea1b5SRafael J. Wysocki 	}
616b81ea1b5SRafael J. Wysocki 
6170f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
6180f703069SRafael J. Wysocki 
619b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
620b81ea1b5SRafael J. Wysocki 
62137530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(dev->power.qos))
622b81ea1b5SRafael J. Wysocki 		ret = -ENODEV;
623b02f6695SRafael J. Wysocki 	else if (dev->power.qos->resume_latency_req)
624b81ea1b5SRafael J. Wysocki 		ret = -EEXIST;
625b81ea1b5SRafael J. Wysocki 
626b81ea1b5SRafael J. Wysocki 	if (ret < 0) {
627b81ea1b5SRafael J. Wysocki 		__dev_pm_qos_remove_request(req);
628b81ea1b5SRafael J. Wysocki 		kfree(req);
6290f703069SRafael J. Wysocki 		mutex_unlock(&dev_pm_qos_mtx);
630b81ea1b5SRafael J. Wysocki 		goto out;
631b81ea1b5SRafael J. Wysocki 	}
632b02f6695SRafael J. Wysocki 	dev->power.qos->resume_latency_req = req;
6330f703069SRafael J. Wysocki 
6340f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
6350f703069SRafael J. Wysocki 
636b02f6695SRafael J. Wysocki 	ret = pm_qos_sysfs_add_resume_latency(dev);
63785dc0b8aSRafael J. Wysocki 	if (ret)
638b02f6695SRafael J. Wysocki 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
63985dc0b8aSRafael J. Wysocki 
640b81ea1b5SRafael J. Wysocki  out:
6410f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
64285dc0b8aSRafael J. Wysocki 	return ret;
64385dc0b8aSRafael J. Wysocki }
64485dc0b8aSRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
64585dc0b8aSRafael J. Wysocki 
64637530f2bSRafael J. Wysocki static void __dev_pm_qos_hide_latency_limit(struct device *dev)
64737530f2bSRafael J. Wysocki {
648b02f6695SRafael J. Wysocki 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
649b02f6695SRafael J. Wysocki 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
65037530f2bSRafael J. Wysocki }
65137530f2bSRafael J. Wysocki 
65285dc0b8aSRafael J. Wysocki /**
65385dc0b8aSRafael J. Wysocki  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
65485dc0b8aSRafael J. Wysocki  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
65585dc0b8aSRafael J. Wysocki  */
65685dc0b8aSRafael J. Wysocki void dev_pm_qos_hide_latency_limit(struct device *dev)
65785dc0b8aSRafael J. Wysocki {
6580f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
6590f703069SRafael J. Wysocki 
660b02f6695SRafael J. Wysocki 	pm_qos_sysfs_remove_resume_latency(dev);
6610f703069SRafael J. Wysocki 
662b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
66337530f2bSRafael J. Wysocki 	__dev_pm_qos_hide_latency_limit(dev);
664b81ea1b5SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
6650f703069SRafael J. Wysocki 
6660f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
66785dc0b8aSRafael J. Wysocki }
66885dc0b8aSRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
669e39473d0SRafael J. Wysocki 
670e39473d0SRafael J. Wysocki /**
671e39473d0SRafael J. Wysocki  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
672e39473d0SRafael J. Wysocki  * @dev: Device whose PM QoS flags are to be exposed to user space.
673e39473d0SRafael J. Wysocki  * @val: Initial values of the flags.
674e39473d0SRafael J. Wysocki  */
675e39473d0SRafael J. Wysocki int dev_pm_qos_expose_flags(struct device *dev, s32 val)
676e39473d0SRafael J. Wysocki {
677e39473d0SRafael J. Wysocki 	struct dev_pm_qos_request *req;
678e39473d0SRafael J. Wysocki 	int ret;
679e39473d0SRafael J. Wysocki 
680e39473d0SRafael J. Wysocki 	if (!device_is_registered(dev))
681e39473d0SRafael J. Wysocki 		return -EINVAL;
682e39473d0SRafael J. Wysocki 
683e39473d0SRafael J. Wysocki 	req = kzalloc(sizeof(*req), GFP_KERNEL);
684e39473d0SRafael J. Wysocki 	if (!req)
685e39473d0SRafael J. Wysocki 		return -ENOMEM;
686e39473d0SRafael J. Wysocki 
687e39473d0SRafael J. Wysocki 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
688b81ea1b5SRafael J. Wysocki 	if (ret < 0) {
689b81ea1b5SRafael J. Wysocki 		kfree(req);
690b81ea1b5SRafael J. Wysocki 		return ret;
691b81ea1b5SRafael J. Wysocki 	}
692b81ea1b5SRafael J. Wysocki 
693b81ea1b5SRafael J. Wysocki 	pm_runtime_get_sync(dev);
6940f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
6950f703069SRafael J. Wysocki 
696b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
697b81ea1b5SRafael J. Wysocki 
69837530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(dev->power.qos))
699b81ea1b5SRafael J. Wysocki 		ret = -ENODEV;
700b81ea1b5SRafael J. Wysocki 	else if (dev->power.qos->flags_req)
701b81ea1b5SRafael J. Wysocki 		ret = -EEXIST;
702b81ea1b5SRafael J. Wysocki 
703b81ea1b5SRafael J. Wysocki 	if (ret < 0) {
704b81ea1b5SRafael J. Wysocki 		__dev_pm_qos_remove_request(req);
705b81ea1b5SRafael J. Wysocki 		kfree(req);
7060f703069SRafael J. Wysocki 		mutex_unlock(&dev_pm_qos_mtx);
707b81ea1b5SRafael J. Wysocki 		goto out;
708b81ea1b5SRafael J. Wysocki 	}
709e39473d0SRafael J. Wysocki 	dev->power.qos->flags_req = req;
7100f703069SRafael J. Wysocki 
7110f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
7120f703069SRafael J. Wysocki 
713e39473d0SRafael J. Wysocki 	ret = pm_qos_sysfs_add_flags(dev);
714e39473d0SRafael J. Wysocki 	if (ret)
7150f703069SRafael J. Wysocki 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
716e39473d0SRafael J. Wysocki 
717b81ea1b5SRafael J. Wysocki  out:
7180f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
7197e4d6844SLan Tianyu 	pm_runtime_put(dev);
720e39473d0SRafael J. Wysocki 	return ret;
721e39473d0SRafael J. Wysocki }
722e39473d0SRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
723e39473d0SRafael J. Wysocki 
72437530f2bSRafael J. Wysocki static void __dev_pm_qos_hide_flags(struct device *dev)
72537530f2bSRafael J. Wysocki {
7260f703069SRafael J. Wysocki 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
72737530f2bSRafael J. Wysocki 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
72837530f2bSRafael J. Wysocki }
72937530f2bSRafael J. Wysocki 
730e39473d0SRafael J. Wysocki /**
731e39473d0SRafael J. Wysocki  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
732e39473d0SRafael J. Wysocki  * @dev: Device whose PM QoS flags are to be hidden from user space.
733e39473d0SRafael J. Wysocki  */
734e39473d0SRafael J. Wysocki void dev_pm_qos_hide_flags(struct device *dev)
735e39473d0SRafael J. Wysocki {
736b81ea1b5SRafael J. Wysocki 	pm_runtime_get_sync(dev);
7370f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
7380f703069SRafael J. Wysocki 
7390f703069SRafael J. Wysocki 	pm_qos_sysfs_remove_flags(dev);
7400f703069SRafael J. Wysocki 
741b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
74237530f2bSRafael J. Wysocki 	__dev_pm_qos_hide_flags(dev);
743b81ea1b5SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
7440f703069SRafael J. Wysocki 
7450f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
746b81ea1b5SRafael J. Wysocki 	pm_runtime_put(dev);
747e39473d0SRafael J. Wysocki }
748e39473d0SRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
749e39473d0SRafael J. Wysocki 
750e39473d0SRafael J. Wysocki /**
751e39473d0SRafael J. Wysocki  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
752e39473d0SRafael J. Wysocki  * @dev: Device to update the PM QoS flags request for.
753e39473d0SRafael J. Wysocki  * @mask: Flags to set/clear.
754e39473d0SRafael J. Wysocki  * @set: Whether to set or clear the flags (true means set).
755e39473d0SRafael J. Wysocki  */
756e39473d0SRafael J. Wysocki int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
757e39473d0SRafael J. Wysocki {
758e39473d0SRafael J. Wysocki 	s32 value;
759e39473d0SRafael J. Wysocki 	int ret;
760e39473d0SRafael J. Wysocki 
761e39473d0SRafael J. Wysocki 	pm_runtime_get_sync(dev);
762e39473d0SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
763e39473d0SRafael J. Wysocki 
76437530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
765b81ea1b5SRafael J. Wysocki 		ret = -EINVAL;
766b81ea1b5SRafael J. Wysocki 		goto out;
767b81ea1b5SRafael J. Wysocki 	}
768b81ea1b5SRafael J. Wysocki 
769e39473d0SRafael J. Wysocki 	value = dev_pm_qos_requested_flags(dev);
770e39473d0SRafael J. Wysocki 	if (set)
771e39473d0SRafael J. Wysocki 		value |= mask;
772e39473d0SRafael J. Wysocki 	else
773e39473d0SRafael J. Wysocki 		value &= ~mask;
774e39473d0SRafael J. Wysocki 
775e39473d0SRafael J. Wysocki 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
776e39473d0SRafael J. Wysocki 
777b81ea1b5SRafael J. Wysocki  out:
778e39473d0SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
779e39473d0SRafael J. Wysocki 	pm_runtime_put(dev);
780e39473d0SRafael J. Wysocki 	return ret;
781e39473d0SRafael J. Wysocki }
7822d984ad1SRafael J. Wysocki 
7832d984ad1SRafael J. Wysocki /**
7842d984ad1SRafael J. Wysocki  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
7852d984ad1SRafael J. Wysocki  * @dev: Device to obtain the user space latency tolerance for.
7862d984ad1SRafael J. Wysocki  */
7872d984ad1SRafael J. Wysocki s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
7882d984ad1SRafael J. Wysocki {
7892d984ad1SRafael J. Wysocki 	s32 ret;
7902d984ad1SRafael J. Wysocki 
7912d984ad1SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
7922d984ad1SRafael J. Wysocki 	ret = IS_ERR_OR_NULL(dev->power.qos)
7932d984ad1SRafael J. Wysocki 		|| !dev->power.qos->latency_tolerance_req ?
7942d984ad1SRafael J. Wysocki 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
7952d984ad1SRafael J. Wysocki 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
7962d984ad1SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
7972d984ad1SRafael J. Wysocki 	return ret;
7982d984ad1SRafael J. Wysocki }
7992d984ad1SRafael J. Wysocki 
8002d984ad1SRafael J. Wysocki /**
8012d984ad1SRafael J. Wysocki  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
8022d984ad1SRafael J. Wysocki  * @dev: Device to update the user space latency tolerance for.
8032d984ad1SRafael J. Wysocki  * @val: New user space latency tolerance for @dev (negative values disable).
8042d984ad1SRafael J. Wysocki  */
8052d984ad1SRafael J. Wysocki int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
8062d984ad1SRafael J. Wysocki {
8072d984ad1SRafael J. Wysocki 	int ret;
8082d984ad1SRafael J. Wysocki 
8092d984ad1SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
8102d984ad1SRafael J. Wysocki 
8112d984ad1SRafael J. Wysocki 	if (IS_ERR_OR_NULL(dev->power.qos)
8122d984ad1SRafael J. Wysocki 	    || !dev->power.qos->latency_tolerance_req) {
8132d984ad1SRafael J. Wysocki 		struct dev_pm_qos_request *req;
8142d984ad1SRafael J. Wysocki 
8152d984ad1SRafael J. Wysocki 		if (val < 0) {
81680a6f7c7SAndrew Lutomirski 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
81780a6f7c7SAndrew Lutomirski 				ret = 0;
81880a6f7c7SAndrew Lutomirski 			else
8192d984ad1SRafael J. Wysocki 				ret = -EINVAL;
8202d984ad1SRafael J. Wysocki 			goto out;
8212d984ad1SRafael J. Wysocki 		}
8222d984ad1SRafael J. Wysocki 		req = kzalloc(sizeof(*req), GFP_KERNEL);
8232d984ad1SRafael J. Wysocki 		if (!req) {
8242d984ad1SRafael J. Wysocki 			ret = -ENOMEM;
8252d984ad1SRafael J. Wysocki 			goto out;
8262d984ad1SRafael J. Wysocki 		}
8272d984ad1SRafael J. Wysocki 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
8282d984ad1SRafael J. Wysocki 		if (ret < 0) {
8292d984ad1SRafael J. Wysocki 			kfree(req);
8302d984ad1SRafael J. Wysocki 			goto out;
8312d984ad1SRafael J. Wysocki 		}
8322d984ad1SRafael J. Wysocki 		dev->power.qos->latency_tolerance_req = req;
8332d984ad1SRafael J. Wysocki 	} else {
8342d984ad1SRafael J. Wysocki 		if (val < 0) {
8352d984ad1SRafael J. Wysocki 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
8362d984ad1SRafael J. Wysocki 			ret = 0;
8372d984ad1SRafael J. Wysocki 		} else {
8382d984ad1SRafael J. Wysocki 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
8392d984ad1SRafael J. Wysocki 		}
8402d984ad1SRafael J. Wysocki 	}
8412d984ad1SRafael J. Wysocki 
8422d984ad1SRafael J. Wysocki  out:
8432d984ad1SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
8442d984ad1SRafael J. Wysocki 	return ret;
8452d984ad1SRafael J. Wysocki }
846034e7906SAndrew Lutomirski EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
84713b2c4a0SMika Westerberg 
84813b2c4a0SMika Westerberg /**
84913b2c4a0SMika Westerberg  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
85013b2c4a0SMika Westerberg  * @dev: Device whose latency tolerance to expose
85113b2c4a0SMika Westerberg  */
85213b2c4a0SMika Westerberg int dev_pm_qos_expose_latency_tolerance(struct device *dev)
85313b2c4a0SMika Westerberg {
85413b2c4a0SMika Westerberg 	int ret;
85513b2c4a0SMika Westerberg 
85613b2c4a0SMika Westerberg 	if (!dev->power.set_latency_tolerance)
85713b2c4a0SMika Westerberg 		return -EINVAL;
85813b2c4a0SMika Westerberg 
85913b2c4a0SMika Westerberg 	mutex_lock(&dev_pm_qos_sysfs_mtx);
86013b2c4a0SMika Westerberg 	ret = pm_qos_sysfs_add_latency_tolerance(dev);
86113b2c4a0SMika Westerberg 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
86213b2c4a0SMika Westerberg 
86313b2c4a0SMika Westerberg 	return ret;
86413b2c4a0SMika Westerberg }
86513b2c4a0SMika Westerberg EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
86613b2c4a0SMika Westerberg 
86713b2c4a0SMika Westerberg /**
86813b2c4a0SMika Westerberg  * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
86913b2c4a0SMika Westerberg  * @dev: Device whose latency tolerance to hide
87013b2c4a0SMika Westerberg  */
87113b2c4a0SMika Westerberg void dev_pm_qos_hide_latency_tolerance(struct device *dev)
87213b2c4a0SMika Westerberg {
87313b2c4a0SMika Westerberg 	mutex_lock(&dev_pm_qos_sysfs_mtx);
87413b2c4a0SMika Westerberg 	pm_qos_sysfs_remove_latency_tolerance(dev);
87513b2c4a0SMika Westerberg 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
87613b2c4a0SMika Westerberg 
87713b2c4a0SMika Westerberg 	/* Remove the request from user space now */
87813b2c4a0SMika Westerberg 	pm_runtime_get_sync(dev);
87913b2c4a0SMika Westerberg 	dev_pm_qos_update_user_latency_tolerance(dev,
88013b2c4a0SMika Westerberg 		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
88113b2c4a0SMika Westerberg 	pm_runtime_put(dev);
88213b2c4a0SMika Westerberg }
88313b2c4a0SMika Westerberg EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
884