xref: /openbmc/linux/drivers/base/power/qos.c (revision 96ac6d43)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Devices PM QoS constraints management
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  * This module exposes the interface to kernel space for specifying
8  * per-device PM QoS dependencies. It provides infrastructure for registration
9  * of:
10  *
11  * Dependents on a QoS value : register requests
12  * Watchers of QoS value : get notified when target QoS value changes
13  *
14  * This QoS design is best effort based. Dependents register their QoS needs.
15  * Watchers register to keep track of the current QoS needs of the system.
16  * Watchers can register a per-device notification callback using the
17  * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18  * per-device constraint data struct.
19  *
20  * Note about the per-device constraint data struct allocation:
21  * . The per-device constraints data struct ptr is stored into the device
22  *    dev_pm_info.
23  * . To minimize the data usage by the per-device constraints, the data struct
24  *   is only allocated at the first call to dev_pm_qos_add_request.
25  * . The data is later free'd when the device is removed from the system.
26  *  . A global mutex protects the constraints users from the data being
27  *     allocated and free'd.
28  */
29 
30 #include <linux/pm_qos.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/device.h>
34 #include <linux/mutex.h>
35 #include <linux/export.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/err.h>
38 #include <trace/events/power.h>
39 
40 #include "power.h"
41 
42 static DEFINE_MUTEX(dev_pm_qos_mtx);
43 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
44 
45 /**
46  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47  * @dev: Device to check the PM QoS flags for.
48  * @mask: Flags to check against.
49  *
50  * This routine must be called with dev->power.lock held.
51  */
52 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53 {
54 	struct dev_pm_qos *qos = dev->power.qos;
55 	struct pm_qos_flags *pqf;
56 	s32 val;
57 
58 	lockdep_assert_held(&dev->power.lock);
59 
60 	if (IS_ERR_OR_NULL(qos))
61 		return PM_QOS_FLAGS_UNDEFINED;
62 
63 	pqf = &qos->flags;
64 	if (list_empty(&pqf->list))
65 		return PM_QOS_FLAGS_UNDEFINED;
66 
67 	val = pqf->effective_flags & mask;
68 	if (val)
69 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70 
71 	return PM_QOS_FLAGS_NONE;
72 }
73 
74 /**
75  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76  * @dev: Device to check the PM QoS flags for.
77  * @mask: Flags to check against.
78  */
79 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80 {
81 	unsigned long irqflags;
82 	enum pm_qos_flags_status ret;
83 
84 	spin_lock_irqsave(&dev->power.lock, irqflags);
85 	ret = __dev_pm_qos_flags(dev, mask);
86 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
87 
88 	return ret;
89 }
90 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
91 
92 /**
93  * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
94  * @dev: Device to get the PM QoS constraint value for.
95  *
96  * This routine must be called with dev->power.lock held.
97  */
98 s32 __dev_pm_qos_read_value(struct device *dev)
99 {
100 	lockdep_assert_held(&dev->power.lock);
101 
102 	return dev_pm_qos_raw_read_value(dev);
103 }
104 
105 /**
106  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107  * @dev: Device to get the PM QoS constraint value for.
108  */
109 s32 dev_pm_qos_read_value(struct device *dev)
110 {
111 	unsigned long flags;
112 	s32 ret;
113 
114 	spin_lock_irqsave(&dev->power.lock, flags);
115 	ret = __dev_pm_qos_read_value(dev);
116 	spin_unlock_irqrestore(&dev->power.lock, flags);
117 
118 	return ret;
119 }
120 
121 /**
122  * apply_constraint - Add/modify/remove device PM QoS request.
123  * @req: Constraint request to apply
124  * @action: Action to perform (add/update/remove).
125  * @value: Value to assign to the QoS request.
126  *
127  * Internal function to update the constraints list using the PM QoS core
128  * code and if needed call the per-device callbacks.
129  */
130 static int apply_constraint(struct dev_pm_qos_request *req,
131 			    enum pm_qos_req_action action, s32 value)
132 {
133 	struct dev_pm_qos *qos = req->dev->power.qos;
134 	int ret;
135 
136 	switch(req->type) {
137 	case DEV_PM_QOS_RESUME_LATENCY:
138 		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
139 			value = 0;
140 
141 		ret = pm_qos_update_target(&qos->resume_latency,
142 					   &req->data.pnode, action, value);
143 		break;
144 	case DEV_PM_QOS_LATENCY_TOLERANCE:
145 		ret = pm_qos_update_target(&qos->latency_tolerance,
146 					   &req->data.pnode, action, value);
147 		if (ret) {
148 			value = pm_qos_read_value(&qos->latency_tolerance);
149 			req->dev->power.set_latency_tolerance(req->dev, value);
150 		}
151 		break;
152 	case DEV_PM_QOS_FLAGS:
153 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
154 					  action, value);
155 		break;
156 	default:
157 		ret = -EINVAL;
158 	}
159 
160 	return ret;
161 }
162 
163 /*
164  * dev_pm_qos_constraints_allocate
165  * @dev: device to allocate data for
166  *
167  * Called at the first call to add_request, for constraint data allocation
168  * Must be called with the dev_pm_qos_mtx mutex held
169  */
170 static int dev_pm_qos_constraints_allocate(struct device *dev)
171 {
172 	struct dev_pm_qos *qos;
173 	struct pm_qos_constraints *c;
174 	struct blocking_notifier_head *n;
175 
176 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
177 	if (!qos)
178 		return -ENOMEM;
179 
180 	n = kzalloc(sizeof(*n), GFP_KERNEL);
181 	if (!n) {
182 		kfree(qos);
183 		return -ENOMEM;
184 	}
185 	BLOCKING_INIT_NOTIFIER_HEAD(n);
186 
187 	c = &qos->resume_latency;
188 	plist_head_init(&c->list);
189 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
190 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
191 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
192 	c->type = PM_QOS_MIN;
193 	c->notifiers = n;
194 
195 	c = &qos->latency_tolerance;
196 	plist_head_init(&c->list);
197 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
198 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
199 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
200 	c->type = PM_QOS_MIN;
201 
202 	INIT_LIST_HEAD(&qos->flags.list);
203 
204 	spin_lock_irq(&dev->power.lock);
205 	dev->power.qos = qos;
206 	spin_unlock_irq(&dev->power.lock);
207 
208 	return 0;
209 }
210 
211 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
212 static void __dev_pm_qos_hide_flags(struct device *dev);
213 
214 /**
215  * dev_pm_qos_constraints_destroy
216  * @dev: target device
217  *
218  * Called from the device PM subsystem on device removal under device_pm_lock().
219  */
220 void dev_pm_qos_constraints_destroy(struct device *dev)
221 {
222 	struct dev_pm_qos *qos;
223 	struct dev_pm_qos_request *req, *tmp;
224 	struct pm_qos_constraints *c;
225 	struct pm_qos_flags *f;
226 
227 	mutex_lock(&dev_pm_qos_sysfs_mtx);
228 
229 	/*
230 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 	 * exposed to user space, they have to be hidden at this point.
232 	 */
233 	pm_qos_sysfs_remove_resume_latency(dev);
234 	pm_qos_sysfs_remove_flags(dev);
235 
236 	mutex_lock(&dev_pm_qos_mtx);
237 
238 	__dev_pm_qos_hide_latency_limit(dev);
239 	__dev_pm_qos_hide_flags(dev);
240 
241 	qos = dev->power.qos;
242 	if (!qos)
243 		goto out;
244 
245 	/* Flush the constraints lists for the device. */
246 	c = &qos->resume_latency;
247 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
248 		/*
249 		 * Update constraints list and call the notification
250 		 * callbacks if needed
251 		 */
252 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
253 		memset(req, 0, sizeof(*req));
254 	}
255 	c = &qos->latency_tolerance;
256 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
257 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
258 		memset(req, 0, sizeof(*req));
259 	}
260 	f = &qos->flags;
261 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
262 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
263 		memset(req, 0, sizeof(*req));
264 	}
265 
266 	spin_lock_irq(&dev->power.lock);
267 	dev->power.qos = ERR_PTR(-ENODEV);
268 	spin_unlock_irq(&dev->power.lock);
269 
270 	kfree(qos->resume_latency.notifiers);
271 	kfree(qos);
272 
273  out:
274 	mutex_unlock(&dev_pm_qos_mtx);
275 
276 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
277 }
278 
279 static bool dev_pm_qos_invalid_req_type(struct device *dev,
280 					enum dev_pm_qos_req_type type)
281 {
282 	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
283 	       !dev->power.set_latency_tolerance;
284 }
285 
286 static int __dev_pm_qos_add_request(struct device *dev,
287 				    struct dev_pm_qos_request *req,
288 				    enum dev_pm_qos_req_type type, s32 value)
289 {
290 	int ret = 0;
291 
292 	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
293 		return -EINVAL;
294 
295 	if (WARN(dev_pm_qos_request_active(req),
296 		 "%s() called for already added request\n", __func__))
297 		return -EINVAL;
298 
299 	if (IS_ERR(dev->power.qos))
300 		ret = -ENODEV;
301 	else if (!dev->power.qos)
302 		ret = dev_pm_qos_constraints_allocate(dev);
303 
304 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
305 	if (!ret) {
306 		req->dev = dev;
307 		req->type = type;
308 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
309 	}
310 	return ret;
311 }
312 
313 /**
314  * dev_pm_qos_add_request - inserts new qos request into the list
315  * @dev: target device for the constraint
316  * @req: pointer to a preallocated handle
317  * @type: type of the request
318  * @value: defines the qos request
319  *
320  * This function inserts a new entry in the device constraints list of
321  * requested qos performance characteristics. It recomputes the aggregate
322  * QoS expectations of parameters and initializes the dev_pm_qos_request
323  * handle.  Caller needs to save this handle for later use in updates and
324  * removal.
325  *
326  * Returns 1 if the aggregated constraint value has changed,
327  * 0 if the aggregated constraint value has not changed,
328  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
329  * to allocate for data structures, -ENODEV if the device has just been removed
330  * from the system.
331  *
332  * Callers should ensure that the target device is not RPM_SUSPENDED before
333  * using this function for requests of type DEV_PM_QOS_FLAGS.
334  */
335 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
336 			   enum dev_pm_qos_req_type type, s32 value)
337 {
338 	int ret;
339 
340 	mutex_lock(&dev_pm_qos_mtx);
341 	ret = __dev_pm_qos_add_request(dev, req, type, value);
342 	mutex_unlock(&dev_pm_qos_mtx);
343 	return ret;
344 }
345 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
346 
347 /**
348  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
349  * @req : PM QoS request to modify.
350  * @new_value: New value to request.
351  */
352 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
353 				       s32 new_value)
354 {
355 	s32 curr_value;
356 	int ret = 0;
357 
358 	if (!req) /*guard against callers passing in null */
359 		return -EINVAL;
360 
361 	if (WARN(!dev_pm_qos_request_active(req),
362 		 "%s() called for unknown object\n", __func__))
363 		return -EINVAL;
364 
365 	if (IS_ERR_OR_NULL(req->dev->power.qos))
366 		return -ENODEV;
367 
368 	switch(req->type) {
369 	case DEV_PM_QOS_RESUME_LATENCY:
370 	case DEV_PM_QOS_LATENCY_TOLERANCE:
371 		curr_value = req->data.pnode.prio;
372 		break;
373 	case DEV_PM_QOS_FLAGS:
374 		curr_value = req->data.flr.flags;
375 		break;
376 	default:
377 		return -EINVAL;
378 	}
379 
380 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
381 					new_value);
382 	if (curr_value != new_value)
383 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
384 
385 	return ret;
386 }
387 
388 /**
389  * dev_pm_qos_update_request - modifies an existing qos request
390  * @req : handle to list element holding a dev_pm_qos request to use
391  * @new_value: defines the qos request
392  *
393  * Updates an existing dev PM qos request along with updating the
394  * target value.
395  *
396  * Attempts are made to make this code callable on hot code paths.
397  *
398  * Returns 1 if the aggregated constraint value has changed,
399  * 0 if the aggregated constraint value has not changed,
400  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
401  * removed from the system
402  *
403  * Callers should ensure that the target device is not RPM_SUSPENDED before
404  * using this function for requests of type DEV_PM_QOS_FLAGS.
405  */
406 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
407 {
408 	int ret;
409 
410 	mutex_lock(&dev_pm_qos_mtx);
411 	ret = __dev_pm_qos_update_request(req, new_value);
412 	mutex_unlock(&dev_pm_qos_mtx);
413 	return ret;
414 }
415 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
416 
417 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
418 {
419 	int ret;
420 
421 	if (!req) /*guard against callers passing in null */
422 		return -EINVAL;
423 
424 	if (WARN(!dev_pm_qos_request_active(req),
425 		 "%s() called for unknown object\n", __func__))
426 		return -EINVAL;
427 
428 	if (IS_ERR_OR_NULL(req->dev->power.qos))
429 		return -ENODEV;
430 
431 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
432 					PM_QOS_DEFAULT_VALUE);
433 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
434 	memset(req, 0, sizeof(*req));
435 	return ret;
436 }
437 
438 /**
439  * dev_pm_qos_remove_request - modifies an existing qos request
440  * @req: handle to request list element
441  *
442  * Will remove pm qos request from the list of constraints and
443  * recompute the current target value. Call this on slow code paths.
444  *
445  * Returns 1 if the aggregated constraint value has changed,
446  * 0 if the aggregated constraint value has not changed,
447  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
448  * removed from the system
449  *
450  * Callers should ensure that the target device is not RPM_SUSPENDED before
451  * using this function for requests of type DEV_PM_QOS_FLAGS.
452  */
453 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
454 {
455 	int ret;
456 
457 	mutex_lock(&dev_pm_qos_mtx);
458 	ret = __dev_pm_qos_remove_request(req);
459 	mutex_unlock(&dev_pm_qos_mtx);
460 	return ret;
461 }
462 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
463 
464 /**
465  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
466  * of per-device PM QoS constraints
467  *
468  * @dev: target device for the constraint
469  * @notifier: notifier block managed by caller.
470  *
471  * Will register the notifier into a notification chain that gets called
472  * upon changes to the target value for the device.
473  *
474  * If the device's constraints object doesn't exist when this routine is called,
475  * it will be created (or error code will be returned if that fails).
476  */
477 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
478 {
479 	int ret = 0;
480 
481 	mutex_lock(&dev_pm_qos_mtx);
482 
483 	if (IS_ERR(dev->power.qos))
484 		ret = -ENODEV;
485 	else if (!dev->power.qos)
486 		ret = dev_pm_qos_constraints_allocate(dev);
487 
488 	if (!ret)
489 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
490 						       notifier);
491 
492 	mutex_unlock(&dev_pm_qos_mtx);
493 	return ret;
494 }
495 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
496 
497 /**
498  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
499  * of per-device PM QoS constraints
500  *
501  * @dev: target device for the constraint
502  * @notifier: notifier block to be removed.
503  *
504  * Will remove the notifier from the notification chain that gets called
505  * upon changes to the target value.
506  */
507 int dev_pm_qos_remove_notifier(struct device *dev,
508 			       struct notifier_block *notifier)
509 {
510 	int retval = 0;
511 
512 	mutex_lock(&dev_pm_qos_mtx);
513 
514 	/* Silently return if the constraints object is not present. */
515 	if (!IS_ERR_OR_NULL(dev->power.qos))
516 		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
517 							    notifier);
518 
519 	mutex_unlock(&dev_pm_qos_mtx);
520 	return retval;
521 }
522 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
523 
524 /**
525  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
526  * @dev: Device whose ancestor to add the request for.
527  * @req: Pointer to the preallocated handle.
528  * @type: Type of the request.
529  * @value: Constraint latency value.
530  */
531 int dev_pm_qos_add_ancestor_request(struct device *dev,
532 				    struct dev_pm_qos_request *req,
533 				    enum dev_pm_qos_req_type type, s32 value)
534 {
535 	struct device *ancestor = dev->parent;
536 	int ret = -ENODEV;
537 
538 	switch (type) {
539 	case DEV_PM_QOS_RESUME_LATENCY:
540 		while (ancestor && !ancestor->power.ignore_children)
541 			ancestor = ancestor->parent;
542 
543 		break;
544 	case DEV_PM_QOS_LATENCY_TOLERANCE:
545 		while (ancestor && !ancestor->power.set_latency_tolerance)
546 			ancestor = ancestor->parent;
547 
548 		break;
549 	default:
550 		ancestor = NULL;
551 	}
552 	if (ancestor)
553 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
554 
555 	if (ret < 0)
556 		req->dev = NULL;
557 
558 	return ret;
559 }
560 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
561 
562 static void __dev_pm_qos_drop_user_request(struct device *dev,
563 					   enum dev_pm_qos_req_type type)
564 {
565 	struct dev_pm_qos_request *req = NULL;
566 
567 	switch(type) {
568 	case DEV_PM_QOS_RESUME_LATENCY:
569 		req = dev->power.qos->resume_latency_req;
570 		dev->power.qos->resume_latency_req = NULL;
571 		break;
572 	case DEV_PM_QOS_LATENCY_TOLERANCE:
573 		req = dev->power.qos->latency_tolerance_req;
574 		dev->power.qos->latency_tolerance_req = NULL;
575 		break;
576 	case DEV_PM_QOS_FLAGS:
577 		req = dev->power.qos->flags_req;
578 		dev->power.qos->flags_req = NULL;
579 		break;
580 	}
581 	__dev_pm_qos_remove_request(req);
582 	kfree(req);
583 }
584 
585 static void dev_pm_qos_drop_user_request(struct device *dev,
586 					 enum dev_pm_qos_req_type type)
587 {
588 	mutex_lock(&dev_pm_qos_mtx);
589 	__dev_pm_qos_drop_user_request(dev, type);
590 	mutex_unlock(&dev_pm_qos_mtx);
591 }
592 
593 /**
594  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
595  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
596  * @value: Initial value of the latency limit.
597  */
598 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
599 {
600 	struct dev_pm_qos_request *req;
601 	int ret;
602 
603 	if (!device_is_registered(dev) || value < 0)
604 		return -EINVAL;
605 
606 	req = kzalloc(sizeof(*req), GFP_KERNEL);
607 	if (!req)
608 		return -ENOMEM;
609 
610 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
611 	if (ret < 0) {
612 		kfree(req);
613 		return ret;
614 	}
615 
616 	mutex_lock(&dev_pm_qos_sysfs_mtx);
617 
618 	mutex_lock(&dev_pm_qos_mtx);
619 
620 	if (IS_ERR_OR_NULL(dev->power.qos))
621 		ret = -ENODEV;
622 	else if (dev->power.qos->resume_latency_req)
623 		ret = -EEXIST;
624 
625 	if (ret < 0) {
626 		__dev_pm_qos_remove_request(req);
627 		kfree(req);
628 		mutex_unlock(&dev_pm_qos_mtx);
629 		goto out;
630 	}
631 	dev->power.qos->resume_latency_req = req;
632 
633 	mutex_unlock(&dev_pm_qos_mtx);
634 
635 	ret = pm_qos_sysfs_add_resume_latency(dev);
636 	if (ret)
637 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
638 
639  out:
640 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
641 	return ret;
642 }
643 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
644 
645 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
646 {
647 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
648 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
649 }
650 
651 /**
652  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
653  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
654  */
655 void dev_pm_qos_hide_latency_limit(struct device *dev)
656 {
657 	mutex_lock(&dev_pm_qos_sysfs_mtx);
658 
659 	pm_qos_sysfs_remove_resume_latency(dev);
660 
661 	mutex_lock(&dev_pm_qos_mtx);
662 	__dev_pm_qos_hide_latency_limit(dev);
663 	mutex_unlock(&dev_pm_qos_mtx);
664 
665 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
666 }
667 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
668 
669 /**
670  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
671  * @dev: Device whose PM QoS flags are to be exposed to user space.
672  * @val: Initial values of the flags.
673  */
674 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
675 {
676 	struct dev_pm_qos_request *req;
677 	int ret;
678 
679 	if (!device_is_registered(dev))
680 		return -EINVAL;
681 
682 	req = kzalloc(sizeof(*req), GFP_KERNEL);
683 	if (!req)
684 		return -ENOMEM;
685 
686 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
687 	if (ret < 0) {
688 		kfree(req);
689 		return ret;
690 	}
691 
692 	pm_runtime_get_sync(dev);
693 	mutex_lock(&dev_pm_qos_sysfs_mtx);
694 
695 	mutex_lock(&dev_pm_qos_mtx);
696 
697 	if (IS_ERR_OR_NULL(dev->power.qos))
698 		ret = -ENODEV;
699 	else if (dev->power.qos->flags_req)
700 		ret = -EEXIST;
701 
702 	if (ret < 0) {
703 		__dev_pm_qos_remove_request(req);
704 		kfree(req);
705 		mutex_unlock(&dev_pm_qos_mtx);
706 		goto out;
707 	}
708 	dev->power.qos->flags_req = req;
709 
710 	mutex_unlock(&dev_pm_qos_mtx);
711 
712 	ret = pm_qos_sysfs_add_flags(dev);
713 	if (ret)
714 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
715 
716  out:
717 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
718 	pm_runtime_put(dev);
719 	return ret;
720 }
721 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
722 
723 static void __dev_pm_qos_hide_flags(struct device *dev)
724 {
725 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
726 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
727 }
728 
729 /**
730  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
731  * @dev: Device whose PM QoS flags are to be hidden from user space.
732  */
733 void dev_pm_qos_hide_flags(struct device *dev)
734 {
735 	pm_runtime_get_sync(dev);
736 	mutex_lock(&dev_pm_qos_sysfs_mtx);
737 
738 	pm_qos_sysfs_remove_flags(dev);
739 
740 	mutex_lock(&dev_pm_qos_mtx);
741 	__dev_pm_qos_hide_flags(dev);
742 	mutex_unlock(&dev_pm_qos_mtx);
743 
744 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
745 	pm_runtime_put(dev);
746 }
747 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
748 
749 /**
750  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
751  * @dev: Device to update the PM QoS flags request for.
752  * @mask: Flags to set/clear.
753  * @set: Whether to set or clear the flags (true means set).
754  */
755 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
756 {
757 	s32 value;
758 	int ret;
759 
760 	pm_runtime_get_sync(dev);
761 	mutex_lock(&dev_pm_qos_mtx);
762 
763 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
764 		ret = -EINVAL;
765 		goto out;
766 	}
767 
768 	value = dev_pm_qos_requested_flags(dev);
769 	if (set)
770 		value |= mask;
771 	else
772 		value &= ~mask;
773 
774 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
775 
776  out:
777 	mutex_unlock(&dev_pm_qos_mtx);
778 	pm_runtime_put(dev);
779 	return ret;
780 }
781 
782 /**
783  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
784  * @dev: Device to obtain the user space latency tolerance for.
785  */
786 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
787 {
788 	s32 ret;
789 
790 	mutex_lock(&dev_pm_qos_mtx);
791 	ret = IS_ERR_OR_NULL(dev->power.qos)
792 		|| !dev->power.qos->latency_tolerance_req ?
793 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
794 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
795 	mutex_unlock(&dev_pm_qos_mtx);
796 	return ret;
797 }
798 
799 /**
800  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
801  * @dev: Device to update the user space latency tolerance for.
802  * @val: New user space latency tolerance for @dev (negative values disable).
803  */
804 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
805 {
806 	int ret;
807 
808 	mutex_lock(&dev_pm_qos_mtx);
809 
810 	if (IS_ERR_OR_NULL(dev->power.qos)
811 	    || !dev->power.qos->latency_tolerance_req) {
812 		struct dev_pm_qos_request *req;
813 
814 		if (val < 0) {
815 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
816 				ret = 0;
817 			else
818 				ret = -EINVAL;
819 			goto out;
820 		}
821 		req = kzalloc(sizeof(*req), GFP_KERNEL);
822 		if (!req) {
823 			ret = -ENOMEM;
824 			goto out;
825 		}
826 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
827 		if (ret < 0) {
828 			kfree(req);
829 			goto out;
830 		}
831 		dev->power.qos->latency_tolerance_req = req;
832 	} else {
833 		if (val < 0) {
834 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
835 			ret = 0;
836 		} else {
837 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
838 		}
839 	}
840 
841  out:
842 	mutex_unlock(&dev_pm_qos_mtx);
843 	return ret;
844 }
845 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
846 
847 /**
848  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
849  * @dev: Device whose latency tolerance to expose
850  */
851 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
852 {
853 	int ret;
854 
855 	if (!dev->power.set_latency_tolerance)
856 		return -EINVAL;
857 
858 	mutex_lock(&dev_pm_qos_sysfs_mtx);
859 	ret = pm_qos_sysfs_add_latency_tolerance(dev);
860 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
861 
862 	return ret;
863 }
864 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
865 
866 /**
867  * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
868  * @dev: Device whose latency tolerance to hide
869  */
870 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
871 {
872 	mutex_lock(&dev_pm_qos_sysfs_mtx);
873 	pm_qos_sysfs_remove_latency_tolerance(dev);
874 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
875 
876 	/* Remove the request from user space now */
877 	pm_runtime_get_sync(dev);
878 	dev_pm_qos_update_user_latency_tolerance(dev,
879 		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
880 	pm_runtime_put(dev);
881 }
882 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
883