xref: /openbmc/linux/drivers/base/power/qos.c (revision 36a8015f)
15de363b6SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
291ff4cb8SJean Pihet /*
391ff4cb8SJean Pihet  * Devices PM QoS constraints management
491ff4cb8SJean Pihet  *
591ff4cb8SJean Pihet  * Copyright (C) 2011 Texas Instruments, Inc.
691ff4cb8SJean Pihet  *
791ff4cb8SJean Pihet  * This module exposes the interface to kernel space for specifying
891ff4cb8SJean Pihet  * per-device PM QoS dependencies. It provides infrastructure for registration
991ff4cb8SJean Pihet  * of:
1091ff4cb8SJean Pihet  *
1191ff4cb8SJean Pihet  * Dependents on a QoS value : register requests
1291ff4cb8SJean Pihet  * Watchers of QoS value : get notified when target QoS value changes
1391ff4cb8SJean Pihet  *
1491ff4cb8SJean Pihet  * This QoS design is best effort based. Dependents register their QoS needs.
1591ff4cb8SJean Pihet  * Watchers register to keep track of the current QoS needs of the system.
16d08d1b27SViresh Kumar  * Watchers can register a per-device notification callback using the
17d08d1b27SViresh Kumar  * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18d08d1b27SViresh Kumar  * per-device constraint data struct.
1991ff4cb8SJean Pihet  *
2091ff4cb8SJean Pihet  * Note about the per-device constraint data struct allocation:
2107a6c71bSAisheng Dong  * . The per-device constraints data struct ptr is stored into the device
2291ff4cb8SJean Pihet  *    dev_pm_info.
2391ff4cb8SJean Pihet  * . To minimize the data usage by the per-device constraints, the data struct
2491ff4cb8SJean Pihet  *   is only allocated at the first call to dev_pm_qos_add_request.
2591ff4cb8SJean Pihet  * . The data is later free'd when the device is removed from the system.
2691ff4cb8SJean Pihet  *  . A global mutex protects the constraints users from the data being
2791ff4cb8SJean Pihet  *     allocated and free'd.
2891ff4cb8SJean Pihet  */
2991ff4cb8SJean Pihet 
3091ff4cb8SJean Pihet #include <linux/pm_qos.h>
3191ff4cb8SJean Pihet #include <linux/spinlock.h>
3291ff4cb8SJean Pihet #include <linux/slab.h>
3391ff4cb8SJean Pihet #include <linux/device.h>
3491ff4cb8SJean Pihet #include <linux/mutex.h>
351b6bc32fSPaul Gortmaker #include <linux/export.h>
36e39473d0SRafael J. Wysocki #include <linux/pm_runtime.h>
3737530f2bSRafael J. Wysocki #include <linux/err.h>
3896d9d0b5SSahara #include <trace/events/power.h>
3991ff4cb8SJean Pihet 
4085dc0b8aSRafael J. Wysocki #include "power.h"
4191ff4cb8SJean Pihet 
4291ff4cb8SJean Pihet static DEFINE_MUTEX(dev_pm_qos_mtx);
430f703069SRafael J. Wysocki static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
441a9a9152SRafael J. Wysocki 
451a9a9152SRafael J. Wysocki /**
46ae0fb4b7SRafael J. Wysocki  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47ae0fb4b7SRafael J. Wysocki  * @dev: Device to check the PM QoS flags for.
48ae0fb4b7SRafael J. Wysocki  * @mask: Flags to check against.
49ae0fb4b7SRafael J. Wysocki  *
50ae0fb4b7SRafael J. Wysocki  * This routine must be called with dev->power.lock held.
51ae0fb4b7SRafael J. Wysocki  */
__dev_pm_qos_flags(struct device * dev,s32 mask)52ae0fb4b7SRafael J. Wysocki enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53ae0fb4b7SRafael J. Wysocki {
54ae0fb4b7SRafael J. Wysocki 	struct dev_pm_qos *qos = dev->power.qos;
55ae0fb4b7SRafael J. Wysocki 	struct pm_qos_flags *pqf;
56ae0fb4b7SRafael J. Wysocki 	s32 val;
57ae0fb4b7SRafael J. Wysocki 
58f90b8ad8SKrzysztof Kozlowski 	lockdep_assert_held(&dev->power.lock);
59f90b8ad8SKrzysztof Kozlowski 
6037530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(qos))
61ae0fb4b7SRafael J. Wysocki 		return PM_QOS_FLAGS_UNDEFINED;
62ae0fb4b7SRafael J. Wysocki 
63ae0fb4b7SRafael J. Wysocki 	pqf = &qos->flags;
64ae0fb4b7SRafael J. Wysocki 	if (list_empty(&pqf->list))
65ae0fb4b7SRafael J. Wysocki 		return PM_QOS_FLAGS_UNDEFINED;
66ae0fb4b7SRafael J. Wysocki 
67ae0fb4b7SRafael J. Wysocki 	val = pqf->effective_flags & mask;
68ae0fb4b7SRafael J. Wysocki 	if (val)
69ae0fb4b7SRafael J. Wysocki 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70ae0fb4b7SRafael J. Wysocki 
71ae0fb4b7SRafael J. Wysocki 	return PM_QOS_FLAGS_NONE;
72ae0fb4b7SRafael J. Wysocki }
73ae0fb4b7SRafael J. Wysocki 
74ae0fb4b7SRafael J. Wysocki /**
75ae0fb4b7SRafael J. Wysocki  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76ae0fb4b7SRafael J. Wysocki  * @dev: Device to check the PM QoS flags for.
77ae0fb4b7SRafael J. Wysocki  * @mask: Flags to check against.
78ae0fb4b7SRafael J. Wysocki  */
dev_pm_qos_flags(struct device * dev,s32 mask)79ae0fb4b7SRafael J. Wysocki enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80ae0fb4b7SRafael J. Wysocki {
81ae0fb4b7SRafael J. Wysocki 	unsigned long irqflags;
82ae0fb4b7SRafael J. Wysocki 	enum pm_qos_flags_status ret;
83ae0fb4b7SRafael J. Wysocki 
84ae0fb4b7SRafael J. Wysocki 	spin_lock_irqsave(&dev->power.lock, irqflags);
85ae0fb4b7SRafael J. Wysocki 	ret = __dev_pm_qos_flags(dev, mask);
86ae0fb4b7SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
87ae0fb4b7SRafael J. Wysocki 
88ae0fb4b7SRafael J. Wysocki 	return ret;
89ae0fb4b7SRafael J. Wysocki }
906802771bSLan Tianyu EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
91ae0fb4b7SRafael J. Wysocki 
92ae0fb4b7SRafael J. Wysocki /**
938262331eSViresh Kumar  * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
9400dc9ad1SRafael J. Wysocki  * @dev: Device to get the PM QoS constraint value for.
9500dc9ad1SRafael J. Wysocki  *
9600dc9ad1SRafael J. Wysocki  * This routine must be called with dev->power.lock held.
9700dc9ad1SRafael J. Wysocki  */
__dev_pm_qos_resume_latency(struct device * dev)988262331eSViresh Kumar s32 __dev_pm_qos_resume_latency(struct device *dev)
9900dc9ad1SRafael J. Wysocki {
100f90b8ad8SKrzysztof Kozlowski 	lockdep_assert_held(&dev->power.lock);
101f90b8ad8SKrzysztof Kozlowski 
1028262331eSViresh Kumar 	return dev_pm_qos_raw_resume_latency(dev);
10300dc9ad1SRafael J. Wysocki }
10400dc9ad1SRafael J. Wysocki 
10500dc9ad1SRafael J. Wysocki /**
10600dc9ad1SRafael J. Wysocki  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
1071a9a9152SRafael J. Wysocki  * @dev: Device to get the PM QoS constraint value for.
1082a79ea5eSViresh Kumar  * @type: QoS request type.
1091a9a9152SRafael J. Wysocki  */
dev_pm_qos_read_value(struct device * dev,enum dev_pm_qos_req_type type)1102a79ea5eSViresh Kumar s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
1111a9a9152SRafael J. Wysocki {
1122a79ea5eSViresh Kumar 	struct dev_pm_qos *qos = dev->power.qos;
1131a9a9152SRafael J. Wysocki 	unsigned long flags;
11400dc9ad1SRafael J. Wysocki 	s32 ret;
1151a9a9152SRafael J. Wysocki 
1161a9a9152SRafael J. Wysocki 	spin_lock_irqsave(&dev->power.lock, flags);
1178262331eSViresh Kumar 
11836a8015fSLeonard Crestez 	switch (type) {
11936a8015fSLeonard Crestez 	case DEV_PM_QOS_RESUME_LATENCY:
1202a79ea5eSViresh Kumar 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
1212a79ea5eSViresh Kumar 			: pm_qos_read_value(&qos->resume_latency);
12236a8015fSLeonard Crestez 		break;
12336a8015fSLeonard Crestez 	case DEV_PM_QOS_MIN_FREQUENCY:
12436a8015fSLeonard Crestez 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
12536a8015fSLeonard Crestez 			: freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
12636a8015fSLeonard Crestez 		break;
12736a8015fSLeonard Crestez 	case DEV_PM_QOS_MAX_FREQUENCY:
12836a8015fSLeonard Crestez 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
12936a8015fSLeonard Crestez 			: freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
13036a8015fSLeonard Crestez 		break;
13136a8015fSLeonard Crestez 	default:
1322a79ea5eSViresh Kumar 		WARN_ON(1);
1332a79ea5eSViresh Kumar 		ret = 0;
1342a79ea5eSViresh Kumar 	}
1358262331eSViresh Kumar 
1361a9a9152SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, flags);
1371a9a9152SRafael J. Wysocki 
1381a9a9152SRafael J. Wysocki 	return ret;
1391a9a9152SRafael J. Wysocki }
1401a9a9152SRafael J. Wysocki 
141ae0fb4b7SRafael J. Wysocki /**
142ae0fb4b7SRafael J. Wysocki  * apply_constraint - Add/modify/remove device PM QoS request.
143ae0fb4b7SRafael J. Wysocki  * @req: Constraint request to apply
144ae0fb4b7SRafael J. Wysocki  * @action: Action to perform (add/update/remove).
145ae0fb4b7SRafael J. Wysocki  * @value: Value to assign to the QoS request.
146b66213cdSJean Pihet  *
147b66213cdSJean Pihet  * Internal function to update the constraints list using the PM QoS core
148d08d1b27SViresh Kumar  * code and if needed call the per-device callbacks.
149b66213cdSJean Pihet  */
apply_constraint(struct dev_pm_qos_request * req,enum pm_qos_req_action action,s32 value)150b66213cdSJean Pihet static int apply_constraint(struct dev_pm_qos_request *req,
151ae0fb4b7SRafael J. Wysocki 			    enum pm_qos_req_action action, s32 value)
152b66213cdSJean Pihet {
153ae0fb4b7SRafael J. Wysocki 	struct dev_pm_qos *qos = req->dev->power.qos;
154ae0fb4b7SRafael J. Wysocki 	int ret;
155b66213cdSJean Pihet 
156ae0fb4b7SRafael J. Wysocki 	switch(req->type) {
157b02f6695SRafael J. Wysocki 	case DEV_PM_QOS_RESUME_LATENCY:
1580759e80bSRafael J. Wysocki 		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
1590759e80bSRafael J. Wysocki 			value = 0;
1600759e80bSRafael J. Wysocki 
161b02f6695SRafael J. Wysocki 		ret = pm_qos_update_target(&qos->resume_latency,
162b02f6695SRafael J. Wysocki 					   &req->data.pnode, action, value);
163ae0fb4b7SRafael J. Wysocki 		break;
1642d984ad1SRafael J. Wysocki 	case DEV_PM_QOS_LATENCY_TOLERANCE:
1652d984ad1SRafael J. Wysocki 		ret = pm_qos_update_target(&qos->latency_tolerance,
1662d984ad1SRafael J. Wysocki 					   &req->data.pnode, action, value);
1672d984ad1SRafael J. Wysocki 		if (ret) {
1682d984ad1SRafael J. Wysocki 			value = pm_qos_read_value(&qos->latency_tolerance);
1692d984ad1SRafael J. Wysocki 			req->dev->power.set_latency_tolerance(req->dev, value);
1702d984ad1SRafael J. Wysocki 		}
1712d984ad1SRafael J. Wysocki 		break;
17236a8015fSLeonard Crestez 	case DEV_PM_QOS_MIN_FREQUENCY:
17336a8015fSLeonard Crestez 	case DEV_PM_QOS_MAX_FREQUENCY:
17436a8015fSLeonard Crestez 		ret = freq_qos_apply(&req->data.freq, action, value);
17536a8015fSLeonard Crestez 		break;
176ae0fb4b7SRafael J. Wysocki 	case DEV_PM_QOS_FLAGS:
177ae0fb4b7SRafael J. Wysocki 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
178ae0fb4b7SRafael J. Wysocki 					  action, value);
179ae0fb4b7SRafael J. Wysocki 		break;
180ae0fb4b7SRafael J. Wysocki 	default:
181ae0fb4b7SRafael J. Wysocki 		ret = -EINVAL;
182ae0fb4b7SRafael J. Wysocki 	}
183b66213cdSJean Pihet 
184b66213cdSJean Pihet 	return ret;
185b66213cdSJean Pihet }
18691ff4cb8SJean Pihet 
18791ff4cb8SJean Pihet /*
18891ff4cb8SJean Pihet  * dev_pm_qos_constraints_allocate
18991ff4cb8SJean Pihet  * @dev: device to allocate data for
19091ff4cb8SJean Pihet  *
19191ff4cb8SJean Pihet  * Called at the first call to add_request, for constraint data allocation
19291ff4cb8SJean Pihet  * Must be called with the dev_pm_qos_mtx mutex held
19391ff4cb8SJean Pihet  */
dev_pm_qos_constraints_allocate(struct device * dev)19491ff4cb8SJean Pihet static int dev_pm_qos_constraints_allocate(struct device *dev)
19591ff4cb8SJean Pihet {
1965f986c59SRafael J. Wysocki 	struct dev_pm_qos *qos;
19791ff4cb8SJean Pihet 	struct pm_qos_constraints *c;
19891ff4cb8SJean Pihet 	struct blocking_notifier_head *n;
19991ff4cb8SJean Pihet 
2005f986c59SRafael J. Wysocki 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
2015f986c59SRafael J. Wysocki 	if (!qos)
20291ff4cb8SJean Pihet 		return -ENOMEM;
20391ff4cb8SJean Pihet 
204208637b3SViresh Kumar 	n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
20591ff4cb8SJean Pihet 	if (!n) {
2065f986c59SRafael J. Wysocki 		kfree(qos);
20791ff4cb8SJean Pihet 		return -ENOMEM;
20891ff4cb8SJean Pihet 	}
20991ff4cb8SJean Pihet 
210b02f6695SRafael J. Wysocki 	c = &qos->resume_latency;
2111a9a9152SRafael J. Wysocki 	plist_head_init(&c->list);
212b02f6695SRafael J. Wysocki 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
213b02f6695SRafael J. Wysocki 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
2140759e80bSRafael J. Wysocki 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
2151a9a9152SRafael J. Wysocki 	c->type = PM_QOS_MIN;
2161a9a9152SRafael J. Wysocki 	c->notifiers = n;
217208637b3SViresh Kumar 	BLOCKING_INIT_NOTIFIER_HEAD(n);
2181a9a9152SRafael J. Wysocki 
2192d984ad1SRafael J. Wysocki 	c = &qos->latency_tolerance;
2202d984ad1SRafael J. Wysocki 	plist_head_init(&c->list);
2212d984ad1SRafael J. Wysocki 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
2222d984ad1SRafael J. Wysocki 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
2232d984ad1SRafael J. Wysocki 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
2242d984ad1SRafael J. Wysocki 	c->type = PM_QOS_MIN;
2252d984ad1SRafael J. Wysocki 
22636a8015fSLeonard Crestez 	freq_constraints_init(&qos->freq);
22736a8015fSLeonard Crestez 
228ae0fb4b7SRafael J. Wysocki 	INIT_LIST_HEAD(&qos->flags.list);
229ae0fb4b7SRafael J. Wysocki 
2301a9a9152SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
2315f986c59SRafael J. Wysocki 	dev->power.qos = qos;
2321a9a9152SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
23391ff4cb8SJean Pihet 
23491ff4cb8SJean Pihet 	return 0;
23591ff4cb8SJean Pihet }
23691ff4cb8SJean Pihet 
23737530f2bSRafael J. Wysocki static void __dev_pm_qos_hide_latency_limit(struct device *dev);
23837530f2bSRafael J. Wysocki static void __dev_pm_qos_hide_flags(struct device *dev);
23991ff4cb8SJean Pihet 
24091ff4cb8SJean Pihet /**
24191ff4cb8SJean Pihet  * dev_pm_qos_constraints_destroy
24291ff4cb8SJean Pihet  * @dev: target device
24391ff4cb8SJean Pihet  *
2441a9a9152SRafael J. Wysocki  * Called from the device PM subsystem on device removal under device_pm_lock().
24591ff4cb8SJean Pihet  */
dev_pm_qos_constraints_destroy(struct device * dev)24691ff4cb8SJean Pihet void dev_pm_qos_constraints_destroy(struct device *dev)
24791ff4cb8SJean Pihet {
2485f986c59SRafael J. Wysocki 	struct dev_pm_qos *qos;
24991ff4cb8SJean Pihet 	struct dev_pm_qos_request *req, *tmp;
2501a9a9152SRafael J. Wysocki 	struct pm_qos_constraints *c;
25135546bd4SRafael J. Wysocki 	struct pm_qos_flags *f;
25291ff4cb8SJean Pihet 
2530f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
25437530f2bSRafael J. Wysocki 
25585dc0b8aSRafael J. Wysocki 	/*
25635546bd4SRafael J. Wysocki 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
25735546bd4SRafael J. Wysocki 	 * exposed to user space, they have to be hidden at this point.
25885dc0b8aSRafael J. Wysocki 	 */
259b02f6695SRafael J. Wysocki 	pm_qos_sysfs_remove_resume_latency(dev);
2600f703069SRafael J. Wysocki 	pm_qos_sysfs_remove_flags(dev);
2610f703069SRafael J. Wysocki 
2620f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
2630f703069SRafael J. Wysocki 
26437530f2bSRafael J. Wysocki 	__dev_pm_qos_hide_latency_limit(dev);
26537530f2bSRafael J. Wysocki 	__dev_pm_qos_hide_flags(dev);
26685dc0b8aSRafael J. Wysocki 
2675f986c59SRafael J. Wysocki 	qos = dev->power.qos;
2685f986c59SRafael J. Wysocki 	if (!qos)
2691a9a9152SRafael J. Wysocki 		goto out;
2701a9a9152SRafael J. Wysocki 
27135546bd4SRafael J. Wysocki 	/* Flush the constraints lists for the device. */
272b02f6695SRafael J. Wysocki 	c = &qos->resume_latency;
273021c870bSRafael J. Wysocki 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
27491ff4cb8SJean Pihet 		/*
275b66213cdSJean Pihet 		 * Update constraints list and call the notification
27691ff4cb8SJean Pihet 		 * callbacks if needed
27791ff4cb8SJean Pihet 		 */
2781a9a9152SRafael J. Wysocki 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
27991ff4cb8SJean Pihet 		memset(req, 0, sizeof(*req));
28091ff4cb8SJean Pihet 	}
281208637b3SViresh Kumar 
2822d984ad1SRafael J. Wysocki 	c = &qos->latency_tolerance;
2832d984ad1SRafael J. Wysocki 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
2842d984ad1SRafael J. Wysocki 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
2852d984ad1SRafael J. Wysocki 		memset(req, 0, sizeof(*req));
2862d984ad1SRafael J. Wysocki 	}
287208637b3SViresh Kumar 
28836a8015fSLeonard Crestez 	c = &qos->freq.min_freq;
28936a8015fSLeonard Crestez 	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
29036a8015fSLeonard Crestez 		apply_constraint(req, PM_QOS_REMOVE_REQ,
29136a8015fSLeonard Crestez 				 PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
29236a8015fSLeonard Crestez 		memset(req, 0, sizeof(*req));
29336a8015fSLeonard Crestez 	}
29436a8015fSLeonard Crestez 
29536a8015fSLeonard Crestez 	c = &qos->freq.max_freq;
29636a8015fSLeonard Crestez 	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
29736a8015fSLeonard Crestez 		apply_constraint(req, PM_QOS_REMOVE_REQ,
29836a8015fSLeonard Crestez 				 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
29936a8015fSLeonard Crestez 		memset(req, 0, sizeof(*req));
30036a8015fSLeonard Crestez 	}
30136a8015fSLeonard Crestez 
30235546bd4SRafael J. Wysocki 	f = &qos->flags;
30335546bd4SRafael J. Wysocki 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
30435546bd4SRafael J. Wysocki 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
30535546bd4SRafael J. Wysocki 		memset(req, 0, sizeof(*req));
30635546bd4SRafael J. Wysocki 	}
30791ff4cb8SJean Pihet 
3081a9a9152SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
30937530f2bSRafael J. Wysocki 	dev->power.qos = ERR_PTR(-ENODEV);
3101a9a9152SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
31191ff4cb8SJean Pihet 
312e84b4a84SJohn Keeping 	kfree(qos->resume_latency.notifiers);
3139eaee2cdSLan,Tianyu 	kfree(qos);
3141a9a9152SRafael J. Wysocki 
3151a9a9152SRafael J. Wysocki  out:
31691ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
3170f703069SRafael J. Wysocki 
3180f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
31991ff4cb8SJean Pihet }
32091ff4cb8SJean Pihet 
dev_pm_qos_invalid_req_type(struct device * dev,enum dev_pm_qos_req_type type)32141ba8bd0SJan H. Schönherr static bool dev_pm_qos_invalid_req_type(struct device *dev,
32241ba8bd0SJan H. Schönherr 					enum dev_pm_qos_req_type type)
3232d984ad1SRafael J. Wysocki {
32441ba8bd0SJan H. Schönherr 	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
32541ba8bd0SJan H. Schönherr 	       !dev->power.set_latency_tolerance;
3262d984ad1SRafael J. Wysocki }
3272d984ad1SRafael J. Wysocki 
__dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)3282d984ad1SRafael J. Wysocki static int __dev_pm_qos_add_request(struct device *dev,
3292d984ad1SRafael J. Wysocki 				    struct dev_pm_qos_request *req,
3302d984ad1SRafael J. Wysocki 				    enum dev_pm_qos_req_type type, s32 value)
3312d984ad1SRafael J. Wysocki {
3322d984ad1SRafael J. Wysocki 	int ret = 0;
3332d984ad1SRafael J. Wysocki 
33441ba8bd0SJan H. Schönherr 	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
3352d984ad1SRafael J. Wysocki 		return -EINVAL;
3362d984ad1SRafael J. Wysocki 
3372d984ad1SRafael J. Wysocki 	if (WARN(dev_pm_qos_request_active(req),
3382d984ad1SRafael J. Wysocki 		 "%s() called for already added request\n", __func__))
3392d984ad1SRafael J. Wysocki 		return -EINVAL;
3402d984ad1SRafael J. Wysocki 
3412d984ad1SRafael J. Wysocki 	if (IS_ERR(dev->power.qos))
3422d984ad1SRafael J. Wysocki 		ret = -ENODEV;
3432d984ad1SRafael J. Wysocki 	else if (!dev->power.qos)
3442d984ad1SRafael J. Wysocki 		ret = dev_pm_qos_constraints_allocate(dev);
3452d984ad1SRafael J. Wysocki 
3462d984ad1SRafael J. Wysocki 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
34736a8015fSLeonard Crestez 	if (ret)
34836a8015fSLeonard Crestez 		return ret;
34936a8015fSLeonard Crestez 
3502d984ad1SRafael J. Wysocki 	req->dev = dev;
3512d984ad1SRafael J. Wysocki 	req->type = type;
35236a8015fSLeonard Crestez 	if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
35336a8015fSLeonard Crestez 		ret = freq_qos_add_request(&dev->power.qos->freq,
35436a8015fSLeonard Crestez 					   &req->data.freq,
35536a8015fSLeonard Crestez 					   FREQ_QOS_MIN, value);
35636a8015fSLeonard Crestez 	else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
35736a8015fSLeonard Crestez 		ret = freq_qos_add_request(&dev->power.qos->freq,
35836a8015fSLeonard Crestez 					   &req->data.freq,
35936a8015fSLeonard Crestez 					   FREQ_QOS_MAX, value);
36036a8015fSLeonard Crestez 	else
3612d984ad1SRafael J. Wysocki 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
36236a8015fSLeonard Crestez 
3632d984ad1SRafael J. Wysocki 	return ret;
3642d984ad1SRafael J. Wysocki }
3652d984ad1SRafael J. Wysocki 
36691ff4cb8SJean Pihet /**
36791ff4cb8SJean Pihet  * dev_pm_qos_add_request - inserts new qos request into the list
36891ff4cb8SJean Pihet  * @dev: target device for the constraint
36991ff4cb8SJean Pihet  * @req: pointer to a preallocated handle
370ae0fb4b7SRafael J. Wysocki  * @type: type of the request
37191ff4cb8SJean Pihet  * @value: defines the qos request
37291ff4cb8SJean Pihet  *
37391ff4cb8SJean Pihet  * This function inserts a new entry in the device constraints list of
37491ff4cb8SJean Pihet  * requested qos performance characteristics. It recomputes the aggregate
37591ff4cb8SJean Pihet  * QoS expectations of parameters and initializes the dev_pm_qos_request
37691ff4cb8SJean Pihet  * handle.  Caller needs to save this handle for later use in updates and
37791ff4cb8SJean Pihet  * removal.
37891ff4cb8SJean Pihet  *
37991ff4cb8SJean Pihet  * Returns 1 if the aggregated constraint value has changed,
38091ff4cb8SJean Pihet  * 0 if the aggregated constraint value has not changed,
3811a9a9152SRafael J. Wysocki  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
3821a9a9152SRafael J. Wysocki  * to allocate for data structures, -ENODEV if the device has just been removed
3831a9a9152SRafael J. Wysocki  * from the system.
384436ede89SRafael J. Wysocki  *
385436ede89SRafael J. Wysocki  * Callers should ensure that the target device is not RPM_SUSPENDED before
386436ede89SRafael J. Wysocki  * using this function for requests of type DEV_PM_QOS_FLAGS.
38791ff4cb8SJean Pihet  */
dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)38891ff4cb8SJean Pihet int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
389ae0fb4b7SRafael J. Wysocki 			   enum dev_pm_qos_req_type type, s32 value)
39091ff4cb8SJean Pihet {
3912d984ad1SRafael J. Wysocki 	int ret;
39291ff4cb8SJean Pihet 
3931a9a9152SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
3942d984ad1SRafael J. Wysocki 	ret = __dev_pm_qos_add_request(dev, req, type, value);
39591ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
39691ff4cb8SJean Pihet 	return ret;
39791ff4cb8SJean Pihet }
39891ff4cb8SJean Pihet EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
39991ff4cb8SJean Pihet 
40091ff4cb8SJean Pihet /**
401e39473d0SRafael J. Wysocki  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
402e39473d0SRafael J. Wysocki  * @req : PM QoS request to modify.
403e39473d0SRafael J. Wysocki  * @new_value: New value to request.
404e39473d0SRafael J. Wysocki  */
__dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)405e39473d0SRafael J. Wysocki static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
406e39473d0SRafael J. Wysocki 				       s32 new_value)
407e39473d0SRafael J. Wysocki {
408e39473d0SRafael J. Wysocki 	s32 curr_value;
409e39473d0SRafael J. Wysocki 	int ret = 0;
410e39473d0SRafael J. Wysocki 
411b81ea1b5SRafael J. Wysocki 	if (!req) /*guard against callers passing in null */
412b81ea1b5SRafael J. Wysocki 		return -EINVAL;
413b81ea1b5SRafael J. Wysocki 
414b81ea1b5SRafael J. Wysocki 	if (WARN(!dev_pm_qos_request_active(req),
415b81ea1b5SRafael J. Wysocki 		 "%s() called for unknown object\n", __func__))
416b81ea1b5SRafael J. Wysocki 		return -EINVAL;
417b81ea1b5SRafael J. Wysocki 
41837530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(req->dev->power.qos))
419e39473d0SRafael J. Wysocki 		return -ENODEV;
420e39473d0SRafael J. Wysocki 
421e39473d0SRafael J. Wysocki 	switch(req->type) {
422b02f6695SRafael J. Wysocki 	case DEV_PM_QOS_RESUME_LATENCY:
4232d984ad1SRafael J. Wysocki 	case DEV_PM_QOS_LATENCY_TOLERANCE:
424e39473d0SRafael J. Wysocki 		curr_value = req->data.pnode.prio;
425e39473d0SRafael J. Wysocki 		break;
42636a8015fSLeonard Crestez 	case DEV_PM_QOS_MIN_FREQUENCY:
42736a8015fSLeonard Crestez 	case DEV_PM_QOS_MAX_FREQUENCY:
42836a8015fSLeonard Crestez 		curr_value = req->data.freq.pnode.prio;
42936a8015fSLeonard Crestez 		break;
430e39473d0SRafael J. Wysocki 	case DEV_PM_QOS_FLAGS:
431e39473d0SRafael J. Wysocki 		curr_value = req->data.flr.flags;
432e39473d0SRafael J. Wysocki 		break;
433e39473d0SRafael J. Wysocki 	default:
434e39473d0SRafael J. Wysocki 		return -EINVAL;
435e39473d0SRafael J. Wysocki 	}
436e39473d0SRafael J. Wysocki 
43796d9d0b5SSahara 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
43896d9d0b5SSahara 					new_value);
439e39473d0SRafael J. Wysocki 	if (curr_value != new_value)
440e39473d0SRafael J. Wysocki 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
441e39473d0SRafael J. Wysocki 
442e39473d0SRafael J. Wysocki 	return ret;
443e39473d0SRafael J. Wysocki }
444e39473d0SRafael J. Wysocki 
445e39473d0SRafael J. Wysocki /**
44691ff4cb8SJean Pihet  * dev_pm_qos_update_request - modifies an existing qos request
44791ff4cb8SJean Pihet  * @req : handle to list element holding a dev_pm_qos request to use
44891ff4cb8SJean Pihet  * @new_value: defines the qos request
44991ff4cb8SJean Pihet  *
45091ff4cb8SJean Pihet  * Updates an existing dev PM qos request along with updating the
45191ff4cb8SJean Pihet  * target value.
45291ff4cb8SJean Pihet  *
45391ff4cb8SJean Pihet  * Attempts are made to make this code callable on hot code paths.
45491ff4cb8SJean Pihet  *
45591ff4cb8SJean Pihet  * Returns 1 if the aggregated constraint value has changed,
45691ff4cb8SJean Pihet  * 0 if the aggregated constraint value has not changed,
45791ff4cb8SJean Pihet  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
45891ff4cb8SJean Pihet  * removed from the system
459436ede89SRafael J. Wysocki  *
460436ede89SRafael J. Wysocki  * Callers should ensure that the target device is not RPM_SUSPENDED before
461436ede89SRafael J. Wysocki  * using this function for requests of type DEV_PM_QOS_FLAGS.
46291ff4cb8SJean Pihet  */
dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)463e39473d0SRafael J. Wysocki int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
46491ff4cb8SJean Pihet {
465e39473d0SRafael J. Wysocki 	int ret;
46691ff4cb8SJean Pihet 
467b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
468b81ea1b5SRafael J. Wysocki 	ret = __dev_pm_qos_update_request(req, new_value);
469b81ea1b5SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
470b81ea1b5SRafael J. Wysocki 	return ret;
471b81ea1b5SRafael J. Wysocki }
472b81ea1b5SRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
473b81ea1b5SRafael J. Wysocki 
__dev_pm_qos_remove_request(struct dev_pm_qos_request * req)474b81ea1b5SRafael J. Wysocki static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
475b81ea1b5SRafael J. Wysocki {
47637530f2bSRafael J. Wysocki 	int ret;
477b81ea1b5SRafael J. Wysocki 
47891ff4cb8SJean Pihet 	if (!req) /*guard against callers passing in null */
47991ff4cb8SJean Pihet 		return -EINVAL;
48091ff4cb8SJean Pihet 
481af4c720eSGuennadi Liakhovetski 	if (WARN(!dev_pm_qos_request_active(req),
482af4c720eSGuennadi Liakhovetski 		 "%s() called for unknown object\n", __func__))
48391ff4cb8SJean Pihet 		return -EINVAL;
48491ff4cb8SJean Pihet 
48537530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(req->dev->power.qos))
48637530f2bSRafael J. Wysocki 		return -ENODEV;
48737530f2bSRafael J. Wysocki 
48896d9d0b5SSahara 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
48996d9d0b5SSahara 					PM_QOS_DEFAULT_VALUE);
49037530f2bSRafael J. Wysocki 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
491b81ea1b5SRafael J. Wysocki 	memset(req, 0, sizeof(*req));
49291ff4cb8SJean Pihet 	return ret;
49391ff4cb8SJean Pihet }
49491ff4cb8SJean Pihet 
49591ff4cb8SJean Pihet /**
49691ff4cb8SJean Pihet  * dev_pm_qos_remove_request - modifies an existing qos request
49791ff4cb8SJean Pihet  * @req: handle to request list element
49891ff4cb8SJean Pihet  *
49991ff4cb8SJean Pihet  * Will remove pm qos request from the list of constraints and
50091ff4cb8SJean Pihet  * recompute the current target value. Call this on slow code paths.
50191ff4cb8SJean Pihet  *
50291ff4cb8SJean Pihet  * Returns 1 if the aggregated constraint value has changed,
50391ff4cb8SJean Pihet  * 0 if the aggregated constraint value has not changed,
50491ff4cb8SJean Pihet  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
50591ff4cb8SJean Pihet  * removed from the system
506436ede89SRafael J. Wysocki  *
507436ede89SRafael J. Wysocki  * Callers should ensure that the target device is not RPM_SUSPENDED before
508436ede89SRafael J. Wysocki  * using this function for requests of type DEV_PM_QOS_FLAGS.
50991ff4cb8SJean Pihet  */
dev_pm_qos_remove_request(struct dev_pm_qos_request * req)51091ff4cb8SJean Pihet int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
51191ff4cb8SJean Pihet {
512b81ea1b5SRafael J. Wysocki 	int ret;
51391ff4cb8SJean Pihet 
51491ff4cb8SJean Pihet 	mutex_lock(&dev_pm_qos_mtx);
515b81ea1b5SRafael J. Wysocki 	ret = __dev_pm_qos_remove_request(req);
51691ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
51791ff4cb8SJean Pihet 	return ret;
51891ff4cb8SJean Pihet }
51991ff4cb8SJean Pihet EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
52091ff4cb8SJean Pihet 
52191ff4cb8SJean Pihet /**
52291ff4cb8SJean Pihet  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
52391ff4cb8SJean Pihet  * of per-device PM QoS constraints
52491ff4cb8SJean Pihet  *
52591ff4cb8SJean Pihet  * @dev: target device for the constraint
52691ff4cb8SJean Pihet  * @notifier: notifier block managed by caller.
5270b07ee94SViresh Kumar  * @type: request type.
52891ff4cb8SJean Pihet  *
52991ff4cb8SJean Pihet  * Will register the notifier into a notification chain that gets called
53091ff4cb8SJean Pihet  * upon changes to the target value for the device.
53123e0fc5aSRafael J. Wysocki  *
53223e0fc5aSRafael J. Wysocki  * If the device's constraints object doesn't exist when this routine is called,
53323e0fc5aSRafael J. Wysocki  * it will be created (or error code will be returned if that fails).
53491ff4cb8SJean Pihet  */
dev_pm_qos_add_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)5350b07ee94SViresh Kumar int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
5360b07ee94SViresh Kumar 			    enum dev_pm_qos_req_type type)
53791ff4cb8SJean Pihet {
53823e0fc5aSRafael J. Wysocki 	int ret = 0;
53991ff4cb8SJean Pihet 
54091ff4cb8SJean Pihet 	mutex_lock(&dev_pm_qos_mtx);
54191ff4cb8SJean Pihet 
54237530f2bSRafael J. Wysocki 	if (IS_ERR(dev->power.qos))
54337530f2bSRafael J. Wysocki 		ret = -ENODEV;
54437530f2bSRafael J. Wysocki 	else if (!dev->power.qos)
54537530f2bSRafael J. Wysocki 		ret = dev_pm_qos_constraints_allocate(dev);
54623e0fc5aSRafael J. Wysocki 
547208637b3SViresh Kumar 	if (ret)
548208637b3SViresh Kumar 		goto unlock;
549208637b3SViresh Kumar 
550208637b3SViresh Kumar 	switch (type) {
551208637b3SViresh Kumar 	case DEV_PM_QOS_RESUME_LATENCY:
552b02f6695SRafael J. Wysocki 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
553b02f6695SRafael J. Wysocki 						       notifier);
554208637b3SViresh Kumar 		break;
55536a8015fSLeonard Crestez 	case DEV_PM_QOS_MIN_FREQUENCY:
55636a8015fSLeonard Crestez 		ret = freq_qos_add_notifier(&dev->power.qos->freq,
55736a8015fSLeonard Crestez 					    FREQ_QOS_MIN, notifier);
55836a8015fSLeonard Crestez 		break;
55936a8015fSLeonard Crestez 	case DEV_PM_QOS_MAX_FREQUENCY:
56036a8015fSLeonard Crestez 		ret = freq_qos_add_notifier(&dev->power.qos->freq,
56136a8015fSLeonard Crestez 					    FREQ_QOS_MAX, notifier);
56236a8015fSLeonard Crestez 		break;
563208637b3SViresh Kumar 	default:
564208637b3SViresh Kumar 		WARN_ON(1);
565208637b3SViresh Kumar 		ret = -EINVAL;
566208637b3SViresh Kumar 	}
56791ff4cb8SJean Pihet 
568208637b3SViresh Kumar unlock:
56991ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
57023e0fc5aSRafael J. Wysocki 	return ret;
57191ff4cb8SJean Pihet }
57291ff4cb8SJean Pihet EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
57391ff4cb8SJean Pihet 
57491ff4cb8SJean Pihet /**
57591ff4cb8SJean Pihet  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
57691ff4cb8SJean Pihet  * of per-device PM QoS constraints
57791ff4cb8SJean Pihet  *
57891ff4cb8SJean Pihet  * @dev: target device for the constraint
57991ff4cb8SJean Pihet  * @notifier: notifier block to be removed.
5800b07ee94SViresh Kumar  * @type: request type.
58191ff4cb8SJean Pihet  *
58291ff4cb8SJean Pihet  * Will remove the notifier from the notification chain that gets called
58391ff4cb8SJean Pihet  * upon changes to the target value.
58491ff4cb8SJean Pihet  */
dev_pm_qos_remove_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)58591ff4cb8SJean Pihet int dev_pm_qos_remove_notifier(struct device *dev,
5860b07ee94SViresh Kumar 			       struct notifier_block *notifier,
5870b07ee94SViresh Kumar 			       enum dev_pm_qos_req_type type)
58891ff4cb8SJean Pihet {
589208637b3SViresh Kumar 	int ret = 0;
5900b07ee94SViresh Kumar 
59191ff4cb8SJean Pihet 	mutex_lock(&dev_pm_qos_mtx);
59291ff4cb8SJean Pihet 
5931a9a9152SRafael J. Wysocki 	/* Silently return if the constraints object is not present. */
594208637b3SViresh Kumar 	if (IS_ERR_OR_NULL(dev->power.qos))
595208637b3SViresh Kumar 		goto unlock;
59691ff4cb8SJean Pihet 
597208637b3SViresh Kumar 	switch (type) {
598208637b3SViresh Kumar 	case DEV_PM_QOS_RESUME_LATENCY:
599208637b3SViresh Kumar 		ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
600208637b3SViresh Kumar 							 notifier);
601208637b3SViresh Kumar 		break;
60236a8015fSLeonard Crestez 	case DEV_PM_QOS_MIN_FREQUENCY:
60336a8015fSLeonard Crestez 		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
60436a8015fSLeonard Crestez 					       FREQ_QOS_MIN, notifier);
60536a8015fSLeonard Crestez 		break;
60636a8015fSLeonard Crestez 	case DEV_PM_QOS_MAX_FREQUENCY:
60736a8015fSLeonard Crestez 		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
60836a8015fSLeonard Crestez 					       FREQ_QOS_MAX, notifier);
60936a8015fSLeonard Crestez 		break;
610208637b3SViresh Kumar 	default:
611208637b3SViresh Kumar 		WARN_ON(1);
612208637b3SViresh Kumar 		ret = -EINVAL;
613208637b3SViresh Kumar 	}
614208637b3SViresh Kumar 
615208637b3SViresh Kumar unlock:
61691ff4cb8SJean Pihet 	mutex_unlock(&dev_pm_qos_mtx);
617208637b3SViresh Kumar 	return ret;
61891ff4cb8SJean Pihet }
61991ff4cb8SJean Pihet EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
620b66213cdSJean Pihet 
621b66213cdSJean Pihet /**
62240a5f8beSRafael J. Wysocki  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
62340a5f8beSRafael J. Wysocki  * @dev: Device whose ancestor to add the request for.
62440a5f8beSRafael J. Wysocki  * @req: Pointer to the preallocated handle.
62571d821fdSRafael J. Wysocki  * @type: Type of the request.
62640a5f8beSRafael J. Wysocki  * @value: Constraint latency value.
62740a5f8beSRafael J. Wysocki  */
dev_pm_qos_add_ancestor_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)62840a5f8beSRafael J. Wysocki int dev_pm_qos_add_ancestor_request(struct device *dev,
62971d821fdSRafael J. Wysocki 				    struct dev_pm_qos_request *req,
63071d821fdSRafael J. Wysocki 				    enum dev_pm_qos_req_type type, s32 value)
63140a5f8beSRafael J. Wysocki {
63240a5f8beSRafael J. Wysocki 	struct device *ancestor = dev->parent;
6334ce47802SRafael J. Wysocki 	int ret = -ENODEV;
63440a5f8beSRafael J. Wysocki 
63571d821fdSRafael J. Wysocki 	switch (type) {
63671d821fdSRafael J. Wysocki 	case DEV_PM_QOS_RESUME_LATENCY:
63740a5f8beSRafael J. Wysocki 		while (ancestor && !ancestor->power.ignore_children)
63840a5f8beSRafael J. Wysocki 			ancestor = ancestor->parent;
63940a5f8beSRafael J. Wysocki 
64071d821fdSRafael J. Wysocki 		break;
64171d821fdSRafael J. Wysocki 	case DEV_PM_QOS_LATENCY_TOLERANCE:
64271d821fdSRafael J. Wysocki 		while (ancestor && !ancestor->power.set_latency_tolerance)
64371d821fdSRafael J. Wysocki 			ancestor = ancestor->parent;
64471d821fdSRafael J. Wysocki 
64571d821fdSRafael J. Wysocki 		break;
64671d821fdSRafael J. Wysocki 	default:
64771d821fdSRafael J. Wysocki 		ancestor = NULL;
64871d821fdSRafael J. Wysocki 	}
64940a5f8beSRafael J. Wysocki 	if (ancestor)
65071d821fdSRafael J. Wysocki 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
65140a5f8beSRafael J. Wysocki 
6524ce47802SRafael J. Wysocki 	if (ret < 0)
65340a5f8beSRafael J. Wysocki 		req->dev = NULL;
65440a5f8beSRafael J. Wysocki 
6554ce47802SRafael J. Wysocki 	return ret;
65640a5f8beSRafael J. Wysocki }
65740a5f8beSRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
65885dc0b8aSRafael J. Wysocki 
__dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)659e39473d0SRafael J. Wysocki static void __dev_pm_qos_drop_user_request(struct device *dev,
660e39473d0SRafael J. Wysocki 					   enum dev_pm_qos_req_type type)
66185dc0b8aSRafael J. Wysocki {
662b81ea1b5SRafael J. Wysocki 	struct dev_pm_qos_request *req = NULL;
663b81ea1b5SRafael J. Wysocki 
664e39473d0SRafael J. Wysocki 	switch(type) {
665b02f6695SRafael J. Wysocki 	case DEV_PM_QOS_RESUME_LATENCY:
666b02f6695SRafael J. Wysocki 		req = dev->power.qos->resume_latency_req;
667b02f6695SRafael J. Wysocki 		dev->power.qos->resume_latency_req = NULL;
668e39473d0SRafael J. Wysocki 		break;
6692d984ad1SRafael J. Wysocki 	case DEV_PM_QOS_LATENCY_TOLERANCE:
6702d984ad1SRafael J. Wysocki 		req = dev->power.qos->latency_tolerance_req;
6712d984ad1SRafael J. Wysocki 		dev->power.qos->latency_tolerance_req = NULL;
6722d984ad1SRafael J. Wysocki 		break;
673e39473d0SRafael J. Wysocki 	case DEV_PM_QOS_FLAGS:
674b81ea1b5SRafael J. Wysocki 		req = dev->power.qos->flags_req;
675e39473d0SRafael J. Wysocki 		dev->power.qos->flags_req = NULL;
676e39473d0SRafael J. Wysocki 		break;
677208637b3SViresh Kumar 	default:
678208637b3SViresh Kumar 		WARN_ON(1);
679208637b3SViresh Kumar 		return;
680e39473d0SRafael J. Wysocki 	}
681b81ea1b5SRafael J. Wysocki 	__dev_pm_qos_remove_request(req);
682b81ea1b5SRafael J. Wysocki 	kfree(req);
68385dc0b8aSRafael J. Wysocki }
68485dc0b8aSRafael J. Wysocki 
dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)6850f703069SRafael J. Wysocki static void dev_pm_qos_drop_user_request(struct device *dev,
6860f703069SRafael J. Wysocki 					 enum dev_pm_qos_req_type type)
6870f703069SRafael J. Wysocki {
6880f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
6890f703069SRafael J. Wysocki 	__dev_pm_qos_drop_user_request(dev, type);
6900f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
6910f703069SRafael J. Wysocki }
6920f703069SRafael J. Wysocki 
69385dc0b8aSRafael J. Wysocki /**
69485dc0b8aSRafael J. Wysocki  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
69585dc0b8aSRafael J. Wysocki  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
69685dc0b8aSRafael J. Wysocki  * @value: Initial value of the latency limit.
69785dc0b8aSRafael J. Wysocki  */
dev_pm_qos_expose_latency_limit(struct device * dev,s32 value)69885dc0b8aSRafael J. Wysocki int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
69985dc0b8aSRafael J. Wysocki {
70085dc0b8aSRafael J. Wysocki 	struct dev_pm_qos_request *req;
70185dc0b8aSRafael J. Wysocki 	int ret;
70285dc0b8aSRafael J. Wysocki 
70385dc0b8aSRafael J. Wysocki 	if (!device_is_registered(dev) || value < 0)
70485dc0b8aSRafael J. Wysocki 		return -EINVAL;
70585dc0b8aSRafael J. Wysocki 
70685dc0b8aSRafael J. Wysocki 	req = kzalloc(sizeof(*req), GFP_KERNEL);
70785dc0b8aSRafael J. Wysocki 	if (!req)
70885dc0b8aSRafael J. Wysocki 		return -ENOMEM;
70985dc0b8aSRafael J. Wysocki 
710b02f6695SRafael J. Wysocki 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
711b81ea1b5SRafael J. Wysocki 	if (ret < 0) {
712b81ea1b5SRafael J. Wysocki 		kfree(req);
71385dc0b8aSRafael J. Wysocki 		return ret;
714b81ea1b5SRafael J. Wysocki 	}
715b81ea1b5SRafael J. Wysocki 
7160f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
7170f703069SRafael J. Wysocki 
718b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
719b81ea1b5SRafael J. Wysocki 
72037530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(dev->power.qos))
721b81ea1b5SRafael J. Wysocki 		ret = -ENODEV;
722b02f6695SRafael J. Wysocki 	else if (dev->power.qos->resume_latency_req)
723b81ea1b5SRafael J. Wysocki 		ret = -EEXIST;
724b81ea1b5SRafael J. Wysocki 
725b81ea1b5SRafael J. Wysocki 	if (ret < 0) {
726b81ea1b5SRafael J. Wysocki 		__dev_pm_qos_remove_request(req);
727b81ea1b5SRafael J. Wysocki 		kfree(req);
7280f703069SRafael J. Wysocki 		mutex_unlock(&dev_pm_qos_mtx);
729b81ea1b5SRafael J. Wysocki 		goto out;
730b81ea1b5SRafael J. Wysocki 	}
731b02f6695SRafael J. Wysocki 	dev->power.qos->resume_latency_req = req;
7320f703069SRafael J. Wysocki 
7330f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
7340f703069SRafael J. Wysocki 
735b02f6695SRafael J. Wysocki 	ret = pm_qos_sysfs_add_resume_latency(dev);
73685dc0b8aSRafael J. Wysocki 	if (ret)
737b02f6695SRafael J. Wysocki 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
73885dc0b8aSRafael J. Wysocki 
739b81ea1b5SRafael J. Wysocki  out:
7400f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
74185dc0b8aSRafael J. Wysocki 	return ret;
74285dc0b8aSRafael J. Wysocki }
74385dc0b8aSRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
74485dc0b8aSRafael J. Wysocki 
__dev_pm_qos_hide_latency_limit(struct device * dev)74537530f2bSRafael J. Wysocki static void __dev_pm_qos_hide_latency_limit(struct device *dev)
74637530f2bSRafael J. Wysocki {
747b02f6695SRafael J. Wysocki 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
748b02f6695SRafael J. Wysocki 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
74937530f2bSRafael J. Wysocki }
75037530f2bSRafael J. Wysocki 
75185dc0b8aSRafael J. Wysocki /**
75285dc0b8aSRafael J. Wysocki  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
75385dc0b8aSRafael J. Wysocki  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
75485dc0b8aSRafael J. Wysocki  */
dev_pm_qos_hide_latency_limit(struct device * dev)75585dc0b8aSRafael J. Wysocki void dev_pm_qos_hide_latency_limit(struct device *dev)
75685dc0b8aSRafael J. Wysocki {
7570f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
7580f703069SRafael J. Wysocki 
759b02f6695SRafael J. Wysocki 	pm_qos_sysfs_remove_resume_latency(dev);
7600f703069SRafael J. Wysocki 
761b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
76237530f2bSRafael J. Wysocki 	__dev_pm_qos_hide_latency_limit(dev);
763b81ea1b5SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
7640f703069SRafael J. Wysocki 
7650f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
76685dc0b8aSRafael J. Wysocki }
76785dc0b8aSRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
768e39473d0SRafael J. Wysocki 
769e39473d0SRafael J. Wysocki /**
770e39473d0SRafael J. Wysocki  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
771e39473d0SRafael J. Wysocki  * @dev: Device whose PM QoS flags are to be exposed to user space.
772e39473d0SRafael J. Wysocki  * @val: Initial values of the flags.
773e39473d0SRafael J. Wysocki  */
dev_pm_qos_expose_flags(struct device * dev,s32 val)774e39473d0SRafael J. Wysocki int dev_pm_qos_expose_flags(struct device *dev, s32 val)
775e39473d0SRafael J. Wysocki {
776e39473d0SRafael J. Wysocki 	struct dev_pm_qos_request *req;
777e39473d0SRafael J. Wysocki 	int ret;
778e39473d0SRafael J. Wysocki 
779e39473d0SRafael J. Wysocki 	if (!device_is_registered(dev))
780e39473d0SRafael J. Wysocki 		return -EINVAL;
781e39473d0SRafael J. Wysocki 
782e39473d0SRafael J. Wysocki 	req = kzalloc(sizeof(*req), GFP_KERNEL);
783e39473d0SRafael J. Wysocki 	if (!req)
784e39473d0SRafael J. Wysocki 		return -ENOMEM;
785e39473d0SRafael J. Wysocki 
786e39473d0SRafael J. Wysocki 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
787b81ea1b5SRafael J. Wysocki 	if (ret < 0) {
788b81ea1b5SRafael J. Wysocki 		kfree(req);
789b81ea1b5SRafael J. Wysocki 		return ret;
790b81ea1b5SRafael J. Wysocki 	}
791b81ea1b5SRafael J. Wysocki 
792b81ea1b5SRafael J. Wysocki 	pm_runtime_get_sync(dev);
7930f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
7940f703069SRafael J. Wysocki 
795b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
796b81ea1b5SRafael J. Wysocki 
79737530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(dev->power.qos))
798b81ea1b5SRafael J. Wysocki 		ret = -ENODEV;
799b81ea1b5SRafael J. Wysocki 	else if (dev->power.qos->flags_req)
800b81ea1b5SRafael J. Wysocki 		ret = -EEXIST;
801b81ea1b5SRafael J. Wysocki 
802b81ea1b5SRafael J. Wysocki 	if (ret < 0) {
803b81ea1b5SRafael J. Wysocki 		__dev_pm_qos_remove_request(req);
804b81ea1b5SRafael J. Wysocki 		kfree(req);
8050f703069SRafael J. Wysocki 		mutex_unlock(&dev_pm_qos_mtx);
806b81ea1b5SRafael J. Wysocki 		goto out;
807b81ea1b5SRafael J. Wysocki 	}
808e39473d0SRafael J. Wysocki 	dev->power.qos->flags_req = req;
8090f703069SRafael J. Wysocki 
8100f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
8110f703069SRafael J. Wysocki 
812e39473d0SRafael J. Wysocki 	ret = pm_qos_sysfs_add_flags(dev);
813e39473d0SRafael J. Wysocki 	if (ret)
8140f703069SRafael J. Wysocki 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
815e39473d0SRafael J. Wysocki 
816b81ea1b5SRafael J. Wysocki  out:
8170f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
8187e4d6844SLan Tianyu 	pm_runtime_put(dev);
819e39473d0SRafael J. Wysocki 	return ret;
820e39473d0SRafael J. Wysocki }
821e39473d0SRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
822e39473d0SRafael J. Wysocki 
__dev_pm_qos_hide_flags(struct device * dev)82337530f2bSRafael J. Wysocki static void __dev_pm_qos_hide_flags(struct device *dev)
82437530f2bSRafael J. Wysocki {
8250f703069SRafael J. Wysocki 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
82637530f2bSRafael J. Wysocki 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
82737530f2bSRafael J. Wysocki }
82837530f2bSRafael J. Wysocki 
829e39473d0SRafael J. Wysocki /**
830e39473d0SRafael J. Wysocki  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
831e39473d0SRafael J. Wysocki  * @dev: Device whose PM QoS flags are to be hidden from user space.
832e39473d0SRafael J. Wysocki  */
dev_pm_qos_hide_flags(struct device * dev)833e39473d0SRafael J. Wysocki void dev_pm_qos_hide_flags(struct device *dev)
834e39473d0SRafael J. Wysocki {
835b81ea1b5SRafael J. Wysocki 	pm_runtime_get_sync(dev);
8360f703069SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_sysfs_mtx);
8370f703069SRafael J. Wysocki 
8380f703069SRafael J. Wysocki 	pm_qos_sysfs_remove_flags(dev);
8390f703069SRafael J. Wysocki 
840b81ea1b5SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
84137530f2bSRafael J. Wysocki 	__dev_pm_qos_hide_flags(dev);
842b81ea1b5SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
8430f703069SRafael J. Wysocki 
8440f703069SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
845b81ea1b5SRafael J. Wysocki 	pm_runtime_put(dev);
846e39473d0SRafael J. Wysocki }
847e39473d0SRafael J. Wysocki EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
848e39473d0SRafael J. Wysocki 
849e39473d0SRafael J. Wysocki /**
850e39473d0SRafael J. Wysocki  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
851e39473d0SRafael J. Wysocki  * @dev: Device to update the PM QoS flags request for.
852e39473d0SRafael J. Wysocki  * @mask: Flags to set/clear.
853e39473d0SRafael J. Wysocki  * @set: Whether to set or clear the flags (true means set).
854e39473d0SRafael J. Wysocki  */
dev_pm_qos_update_flags(struct device * dev,s32 mask,bool set)855e39473d0SRafael J. Wysocki int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
856e39473d0SRafael J. Wysocki {
857e39473d0SRafael J. Wysocki 	s32 value;
858e39473d0SRafael J. Wysocki 	int ret;
859e39473d0SRafael J. Wysocki 
860e39473d0SRafael J. Wysocki 	pm_runtime_get_sync(dev);
861e39473d0SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
862e39473d0SRafael J. Wysocki 
86337530f2bSRafael J. Wysocki 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
864b81ea1b5SRafael J. Wysocki 		ret = -EINVAL;
865b81ea1b5SRafael J. Wysocki 		goto out;
866b81ea1b5SRafael J. Wysocki 	}
867b81ea1b5SRafael J. Wysocki 
868e39473d0SRafael J. Wysocki 	value = dev_pm_qos_requested_flags(dev);
869e39473d0SRafael J. Wysocki 	if (set)
870e39473d0SRafael J. Wysocki 		value |= mask;
871e39473d0SRafael J. Wysocki 	else
872e39473d0SRafael J. Wysocki 		value &= ~mask;
873e39473d0SRafael J. Wysocki 
874e39473d0SRafael J. Wysocki 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
875e39473d0SRafael J. Wysocki 
876b81ea1b5SRafael J. Wysocki  out:
877e39473d0SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
878e39473d0SRafael J. Wysocki 	pm_runtime_put(dev);
879e39473d0SRafael J. Wysocki 	return ret;
880e39473d0SRafael J. Wysocki }
8812d984ad1SRafael J. Wysocki 
8822d984ad1SRafael J. Wysocki /**
8832d984ad1SRafael J. Wysocki  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
8842d984ad1SRafael J. Wysocki  * @dev: Device to obtain the user space latency tolerance for.
8852d984ad1SRafael J. Wysocki  */
dev_pm_qos_get_user_latency_tolerance(struct device * dev)8862d984ad1SRafael J. Wysocki s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
8872d984ad1SRafael J. Wysocki {
8882d984ad1SRafael J. Wysocki 	s32 ret;
8892d984ad1SRafael J. Wysocki 
8902d984ad1SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
8912d984ad1SRafael J. Wysocki 	ret = IS_ERR_OR_NULL(dev->power.qos)
8922d984ad1SRafael J. Wysocki 		|| !dev->power.qos->latency_tolerance_req ?
8932d984ad1SRafael J. Wysocki 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
8942d984ad1SRafael J. Wysocki 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
8952d984ad1SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
8962d984ad1SRafael J. Wysocki 	return ret;
8972d984ad1SRafael J. Wysocki }
8982d984ad1SRafael J. Wysocki 
8992d984ad1SRafael J. Wysocki /**
9002d984ad1SRafael J. Wysocki  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
9012d984ad1SRafael J. Wysocki  * @dev: Device to update the user space latency tolerance for.
9022d984ad1SRafael J. Wysocki  * @val: New user space latency tolerance for @dev (negative values disable).
9032d984ad1SRafael J. Wysocki  */
dev_pm_qos_update_user_latency_tolerance(struct device * dev,s32 val)9042d984ad1SRafael J. Wysocki int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
9052d984ad1SRafael J. Wysocki {
9062d984ad1SRafael J. Wysocki 	int ret;
9072d984ad1SRafael J. Wysocki 
9082d984ad1SRafael J. Wysocki 	mutex_lock(&dev_pm_qos_mtx);
9092d984ad1SRafael J. Wysocki 
9102d984ad1SRafael J. Wysocki 	if (IS_ERR_OR_NULL(dev->power.qos)
9112d984ad1SRafael J. Wysocki 	    || !dev->power.qos->latency_tolerance_req) {
9122d984ad1SRafael J. Wysocki 		struct dev_pm_qos_request *req;
9132d984ad1SRafael J. Wysocki 
9142d984ad1SRafael J. Wysocki 		if (val < 0) {
91580a6f7c7SAndrew Lutomirski 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
91680a6f7c7SAndrew Lutomirski 				ret = 0;
91780a6f7c7SAndrew Lutomirski 			else
9182d984ad1SRafael J. Wysocki 				ret = -EINVAL;
9192d984ad1SRafael J. Wysocki 			goto out;
9202d984ad1SRafael J. Wysocki 		}
9212d984ad1SRafael J. Wysocki 		req = kzalloc(sizeof(*req), GFP_KERNEL);
9222d984ad1SRafael J. Wysocki 		if (!req) {
9232d984ad1SRafael J. Wysocki 			ret = -ENOMEM;
9242d984ad1SRafael J. Wysocki 			goto out;
9252d984ad1SRafael J. Wysocki 		}
9262d984ad1SRafael J. Wysocki 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
9272d984ad1SRafael J. Wysocki 		if (ret < 0) {
9282d984ad1SRafael J. Wysocki 			kfree(req);
9292d984ad1SRafael J. Wysocki 			goto out;
9302d984ad1SRafael J. Wysocki 		}
9312d984ad1SRafael J. Wysocki 		dev->power.qos->latency_tolerance_req = req;
9322d984ad1SRafael J. Wysocki 	} else {
9332d984ad1SRafael J. Wysocki 		if (val < 0) {
9342d984ad1SRafael J. Wysocki 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
9352d984ad1SRafael J. Wysocki 			ret = 0;
9362d984ad1SRafael J. Wysocki 		} else {
9372d984ad1SRafael J. Wysocki 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
9382d984ad1SRafael J. Wysocki 		}
9392d984ad1SRafael J. Wysocki 	}
9402d984ad1SRafael J. Wysocki 
9412d984ad1SRafael J. Wysocki  out:
9422d984ad1SRafael J. Wysocki 	mutex_unlock(&dev_pm_qos_mtx);
9432d984ad1SRafael J. Wysocki 	return ret;
9442d984ad1SRafael J. Wysocki }
945034e7906SAndrew Lutomirski EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
94613b2c4a0SMika Westerberg 
94713b2c4a0SMika Westerberg /**
94813b2c4a0SMika Westerberg  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
94913b2c4a0SMika Westerberg  * @dev: Device whose latency tolerance to expose
95013b2c4a0SMika Westerberg  */
dev_pm_qos_expose_latency_tolerance(struct device * dev)95113b2c4a0SMika Westerberg int dev_pm_qos_expose_latency_tolerance(struct device *dev)
95213b2c4a0SMika Westerberg {
95313b2c4a0SMika Westerberg 	int ret;
95413b2c4a0SMika Westerberg 
95513b2c4a0SMika Westerberg 	if (!dev->power.set_latency_tolerance)
95613b2c4a0SMika Westerberg 		return -EINVAL;
95713b2c4a0SMika Westerberg 
95813b2c4a0SMika Westerberg 	mutex_lock(&dev_pm_qos_sysfs_mtx);
95913b2c4a0SMika Westerberg 	ret = pm_qos_sysfs_add_latency_tolerance(dev);
96013b2c4a0SMika Westerberg 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
96113b2c4a0SMika Westerberg 
96213b2c4a0SMika Westerberg 	return ret;
96313b2c4a0SMika Westerberg }
96413b2c4a0SMika Westerberg EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
96513b2c4a0SMika Westerberg 
96613b2c4a0SMika Westerberg /**
96713b2c4a0SMika Westerberg  * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
96813b2c4a0SMika Westerberg  * @dev: Device whose latency tolerance to hide
96913b2c4a0SMika Westerberg  */
dev_pm_qos_hide_latency_tolerance(struct device * dev)97013b2c4a0SMika Westerberg void dev_pm_qos_hide_latency_tolerance(struct device *dev)
97113b2c4a0SMika Westerberg {
97213b2c4a0SMika Westerberg 	mutex_lock(&dev_pm_qos_sysfs_mtx);
97313b2c4a0SMika Westerberg 	pm_qos_sysfs_remove_latency_tolerance(dev);
97413b2c4a0SMika Westerberg 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
97513b2c4a0SMika Westerberg 
97613b2c4a0SMika Westerberg 	/* Remove the request from user space now */
97713b2c4a0SMika Westerberg 	pm_runtime_get_sync(dev);
97813b2c4a0SMika Westerberg 	dev_pm_qos_update_user_latency_tolerance(dev,
97913b2c4a0SMika Westerberg 		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
98013b2c4a0SMika Westerberg 	pm_runtime_put(dev);
98113b2c4a0SMika Westerberg }
98213b2c4a0SMika Westerberg EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
983