xref: /openbmc/linux/drivers/base/power/qos.c (revision c819e2cf)
1 /*
2  * Devices PM QoS constraints management
3  *
4  * Copyright (C) 2011 Texas Instruments, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *
11  * This module exposes the interface to kernel space for specifying
12  * per-device PM QoS dependencies. It provides infrastructure for registration
13  * of:
14  *
15  * Dependents on a QoS value : register requests
16  * Watchers of QoS value : get notified when target QoS value changes
17  *
18  * This QoS design is best effort based. Dependents register their QoS needs.
19  * Watchers register to keep track of the current QoS needs of the system.
20  * Watchers can register different types of notification callbacks:
21  *  . a per-device notification callback using the dev_pm_qos_*_notifier API.
22  *    The notification chain data is stored in the per-device constraint
23  *    data struct.
24  *  . a system-wide notification callback using the dev_pm_qos_*_global_notifier
25  *    API. The notification chain data is stored in a static variable.
26  *
27  * Note about the per-device constraint data struct allocation:
28  * . The per-device constraints data struct ptr is tored into the device
29  *    dev_pm_info.
30  * . To minimize the data usage by the per-device constraints, the data struct
31  *   is only allocated at the first call to dev_pm_qos_add_request.
32  * . The data is later free'd when the device is removed from the system.
33  *  . A global mutex protects the constraints users from the data being
34  *     allocated and free'd.
35  */
36 
37 #include <linux/pm_qos.h>
38 #include <linux/spinlock.h>
39 #include <linux/slab.h>
40 #include <linux/device.h>
41 #include <linux/mutex.h>
42 #include <linux/export.h>
43 #include <linux/pm_runtime.h>
44 #include <linux/err.h>
45 #include <trace/events/power.h>
46 
47 #include "power.h"
48 
49 static DEFINE_MUTEX(dev_pm_qos_mtx);
50 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
51 
52 static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
53 
54 /**
55  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
56  * @dev: Device to check the PM QoS flags for.
57  * @mask: Flags to check against.
58  *
59  * This routine must be called with dev->power.lock held.
60  */
61 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
62 {
63 	struct dev_pm_qos *qos = dev->power.qos;
64 	struct pm_qos_flags *pqf;
65 	s32 val;
66 
67 	if (IS_ERR_OR_NULL(qos))
68 		return PM_QOS_FLAGS_UNDEFINED;
69 
70 	pqf = &qos->flags;
71 	if (list_empty(&pqf->list))
72 		return PM_QOS_FLAGS_UNDEFINED;
73 
74 	val = pqf->effective_flags & mask;
75 	if (val)
76 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
77 
78 	return PM_QOS_FLAGS_NONE;
79 }
80 
81 /**
82  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
83  * @dev: Device to check the PM QoS flags for.
84  * @mask: Flags to check against.
85  */
86 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
87 {
88 	unsigned long irqflags;
89 	enum pm_qos_flags_status ret;
90 
91 	spin_lock_irqsave(&dev->power.lock, irqflags);
92 	ret = __dev_pm_qos_flags(dev, mask);
93 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
94 
95 	return ret;
96 }
97 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
98 
99 /**
100  * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
101  * @dev: Device to get the PM QoS constraint value for.
102  *
103  * This routine must be called with dev->power.lock held.
104  */
105 s32 __dev_pm_qos_read_value(struct device *dev)
106 {
107 	return IS_ERR_OR_NULL(dev->power.qos) ?
108 		0 : pm_qos_read_value(&dev->power.qos->resume_latency);
109 }
110 
111 /**
112  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
113  * @dev: Device to get the PM QoS constraint value for.
114  */
115 s32 dev_pm_qos_read_value(struct device *dev)
116 {
117 	unsigned long flags;
118 	s32 ret;
119 
120 	spin_lock_irqsave(&dev->power.lock, flags);
121 	ret = __dev_pm_qos_read_value(dev);
122 	spin_unlock_irqrestore(&dev->power.lock, flags);
123 
124 	return ret;
125 }
126 
127 /**
128  * apply_constraint - Add/modify/remove device PM QoS request.
129  * @req: Constraint request to apply
130  * @action: Action to perform (add/update/remove).
131  * @value: Value to assign to the QoS request.
132  *
133  * Internal function to update the constraints list using the PM QoS core
134  * code and if needed call the per-device and the global notification
135  * callbacks
136  */
137 static int apply_constraint(struct dev_pm_qos_request *req,
138 			    enum pm_qos_req_action action, s32 value)
139 {
140 	struct dev_pm_qos *qos = req->dev->power.qos;
141 	int ret;
142 
143 	switch(req->type) {
144 	case DEV_PM_QOS_RESUME_LATENCY:
145 		ret = pm_qos_update_target(&qos->resume_latency,
146 					   &req->data.pnode, action, value);
147 		if (ret) {
148 			value = pm_qos_read_value(&qos->resume_latency);
149 			blocking_notifier_call_chain(&dev_pm_notifiers,
150 						     (unsigned long)value,
151 						     req);
152 		}
153 		break;
154 	case DEV_PM_QOS_LATENCY_TOLERANCE:
155 		ret = pm_qos_update_target(&qos->latency_tolerance,
156 					   &req->data.pnode, action, value);
157 		if (ret) {
158 			value = pm_qos_read_value(&qos->latency_tolerance);
159 			req->dev->power.set_latency_tolerance(req->dev, value);
160 		}
161 		break;
162 	case DEV_PM_QOS_FLAGS:
163 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
164 					  action, value);
165 		break;
166 	default:
167 		ret = -EINVAL;
168 	}
169 
170 	return ret;
171 }
172 
173 /*
174  * dev_pm_qos_constraints_allocate
175  * @dev: device to allocate data for
176  *
177  * Called at the first call to add_request, for constraint data allocation
178  * Must be called with the dev_pm_qos_mtx mutex held
179  */
180 static int dev_pm_qos_constraints_allocate(struct device *dev)
181 {
182 	struct dev_pm_qos *qos;
183 	struct pm_qos_constraints *c;
184 	struct blocking_notifier_head *n;
185 
186 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
187 	if (!qos)
188 		return -ENOMEM;
189 
190 	n = kzalloc(sizeof(*n), GFP_KERNEL);
191 	if (!n) {
192 		kfree(qos);
193 		return -ENOMEM;
194 	}
195 	BLOCKING_INIT_NOTIFIER_HEAD(n);
196 
197 	c = &qos->resume_latency;
198 	plist_head_init(&c->list);
199 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
200 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
201 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
202 	c->type = PM_QOS_MIN;
203 	c->notifiers = n;
204 
205 	c = &qos->latency_tolerance;
206 	plist_head_init(&c->list);
207 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
208 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
209 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
210 	c->type = PM_QOS_MIN;
211 
212 	INIT_LIST_HEAD(&qos->flags.list);
213 
214 	spin_lock_irq(&dev->power.lock);
215 	dev->power.qos = qos;
216 	spin_unlock_irq(&dev->power.lock);
217 
218 	return 0;
219 }
220 
221 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
222 static void __dev_pm_qos_hide_flags(struct device *dev);
223 
224 /**
225  * dev_pm_qos_constraints_destroy
226  * @dev: target device
227  *
228  * Called from the device PM subsystem on device removal under device_pm_lock().
229  */
230 void dev_pm_qos_constraints_destroy(struct device *dev)
231 {
232 	struct dev_pm_qos *qos;
233 	struct dev_pm_qos_request *req, *tmp;
234 	struct pm_qos_constraints *c;
235 	struct pm_qos_flags *f;
236 
237 	mutex_lock(&dev_pm_qos_sysfs_mtx);
238 
239 	/*
240 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
241 	 * exposed to user space, they have to be hidden at this point.
242 	 */
243 	pm_qos_sysfs_remove_resume_latency(dev);
244 	pm_qos_sysfs_remove_flags(dev);
245 
246 	mutex_lock(&dev_pm_qos_mtx);
247 
248 	__dev_pm_qos_hide_latency_limit(dev);
249 	__dev_pm_qos_hide_flags(dev);
250 
251 	qos = dev->power.qos;
252 	if (!qos)
253 		goto out;
254 
255 	/* Flush the constraints lists for the device. */
256 	c = &qos->resume_latency;
257 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
258 		/*
259 		 * Update constraints list and call the notification
260 		 * callbacks if needed
261 		 */
262 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
263 		memset(req, 0, sizeof(*req));
264 	}
265 	c = &qos->latency_tolerance;
266 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
267 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
268 		memset(req, 0, sizeof(*req));
269 	}
270 	f = &qos->flags;
271 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
272 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
273 		memset(req, 0, sizeof(*req));
274 	}
275 
276 	spin_lock_irq(&dev->power.lock);
277 	dev->power.qos = ERR_PTR(-ENODEV);
278 	spin_unlock_irq(&dev->power.lock);
279 
280 	kfree(c->notifiers);
281 	kfree(qos);
282 
283  out:
284 	mutex_unlock(&dev_pm_qos_mtx);
285 
286 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
287 }
288 
289 static bool dev_pm_qos_invalid_request(struct device *dev,
290 				       struct dev_pm_qos_request *req)
291 {
292 	return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
293 			&& !dev->power.set_latency_tolerance);
294 }
295 
296 static int __dev_pm_qos_add_request(struct device *dev,
297 				    struct dev_pm_qos_request *req,
298 				    enum dev_pm_qos_req_type type, s32 value)
299 {
300 	int ret = 0;
301 
302 	if (!dev || dev_pm_qos_invalid_request(dev, req))
303 		return -EINVAL;
304 
305 	if (WARN(dev_pm_qos_request_active(req),
306 		 "%s() called for already added request\n", __func__))
307 		return -EINVAL;
308 
309 	if (IS_ERR(dev->power.qos))
310 		ret = -ENODEV;
311 	else if (!dev->power.qos)
312 		ret = dev_pm_qos_constraints_allocate(dev);
313 
314 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
315 	if (!ret) {
316 		req->dev = dev;
317 		req->type = type;
318 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
319 	}
320 	return ret;
321 }
322 
323 /**
324  * dev_pm_qos_add_request - inserts new qos request into the list
325  * @dev: target device for the constraint
326  * @req: pointer to a preallocated handle
327  * @type: type of the request
328  * @value: defines the qos request
329  *
330  * This function inserts a new entry in the device constraints list of
331  * requested qos performance characteristics. It recomputes the aggregate
332  * QoS expectations of parameters and initializes the dev_pm_qos_request
333  * handle.  Caller needs to save this handle for later use in updates and
334  * removal.
335  *
336  * Returns 1 if the aggregated constraint value has changed,
337  * 0 if the aggregated constraint value has not changed,
338  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
339  * to allocate for data structures, -ENODEV if the device has just been removed
340  * from the system.
341  *
342  * Callers should ensure that the target device is not RPM_SUSPENDED before
343  * using this function for requests of type DEV_PM_QOS_FLAGS.
344  */
345 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
346 			   enum dev_pm_qos_req_type type, s32 value)
347 {
348 	int ret;
349 
350 	mutex_lock(&dev_pm_qos_mtx);
351 	ret = __dev_pm_qos_add_request(dev, req, type, value);
352 	mutex_unlock(&dev_pm_qos_mtx);
353 	return ret;
354 }
355 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
356 
357 /**
358  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
359  * @req : PM QoS request to modify.
360  * @new_value: New value to request.
361  */
362 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
363 				       s32 new_value)
364 {
365 	s32 curr_value;
366 	int ret = 0;
367 
368 	if (!req) /*guard against callers passing in null */
369 		return -EINVAL;
370 
371 	if (WARN(!dev_pm_qos_request_active(req),
372 		 "%s() called for unknown object\n", __func__))
373 		return -EINVAL;
374 
375 	if (IS_ERR_OR_NULL(req->dev->power.qos))
376 		return -ENODEV;
377 
378 	switch(req->type) {
379 	case DEV_PM_QOS_RESUME_LATENCY:
380 	case DEV_PM_QOS_LATENCY_TOLERANCE:
381 		curr_value = req->data.pnode.prio;
382 		break;
383 	case DEV_PM_QOS_FLAGS:
384 		curr_value = req->data.flr.flags;
385 		break;
386 	default:
387 		return -EINVAL;
388 	}
389 
390 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
391 					new_value);
392 	if (curr_value != new_value)
393 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
394 
395 	return ret;
396 }
397 
398 /**
399  * dev_pm_qos_update_request - modifies an existing qos request
400  * @req : handle to list element holding a dev_pm_qos request to use
401  * @new_value: defines the qos request
402  *
403  * Updates an existing dev PM qos request along with updating the
404  * target value.
405  *
406  * Attempts are made to make this code callable on hot code paths.
407  *
408  * Returns 1 if the aggregated constraint value has changed,
409  * 0 if the aggregated constraint value has not changed,
410  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
411  * removed from the system
412  *
413  * Callers should ensure that the target device is not RPM_SUSPENDED before
414  * using this function for requests of type DEV_PM_QOS_FLAGS.
415  */
416 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
417 {
418 	int ret;
419 
420 	mutex_lock(&dev_pm_qos_mtx);
421 	ret = __dev_pm_qos_update_request(req, new_value);
422 	mutex_unlock(&dev_pm_qos_mtx);
423 	return ret;
424 }
425 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
426 
427 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
428 {
429 	int ret;
430 
431 	if (!req) /*guard against callers passing in null */
432 		return -EINVAL;
433 
434 	if (WARN(!dev_pm_qos_request_active(req),
435 		 "%s() called for unknown object\n", __func__))
436 		return -EINVAL;
437 
438 	if (IS_ERR_OR_NULL(req->dev->power.qos))
439 		return -ENODEV;
440 
441 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
442 					PM_QOS_DEFAULT_VALUE);
443 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
444 	memset(req, 0, sizeof(*req));
445 	return ret;
446 }
447 
448 /**
449  * dev_pm_qos_remove_request - modifies an existing qos request
450  * @req: handle to request list element
451  *
452  * Will remove pm qos request from the list of constraints and
453  * recompute the current target value. Call this on slow code paths.
454  *
455  * Returns 1 if the aggregated constraint value has changed,
456  * 0 if the aggregated constraint value has not changed,
457  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
458  * removed from the system
459  *
460  * Callers should ensure that the target device is not RPM_SUSPENDED before
461  * using this function for requests of type DEV_PM_QOS_FLAGS.
462  */
463 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
464 {
465 	int ret;
466 
467 	mutex_lock(&dev_pm_qos_mtx);
468 	ret = __dev_pm_qos_remove_request(req);
469 	mutex_unlock(&dev_pm_qos_mtx);
470 	return ret;
471 }
472 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
473 
474 /**
475  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
476  * of per-device PM QoS constraints
477  *
478  * @dev: target device for the constraint
479  * @notifier: notifier block managed by caller.
480  *
481  * Will register the notifier into a notification chain that gets called
482  * upon changes to the target value for the device.
483  *
484  * If the device's constraints object doesn't exist when this routine is called,
485  * it will be created (or error code will be returned if that fails).
486  */
487 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
488 {
489 	int ret = 0;
490 
491 	mutex_lock(&dev_pm_qos_mtx);
492 
493 	if (IS_ERR(dev->power.qos))
494 		ret = -ENODEV;
495 	else if (!dev->power.qos)
496 		ret = dev_pm_qos_constraints_allocate(dev);
497 
498 	if (!ret)
499 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
500 						       notifier);
501 
502 	mutex_unlock(&dev_pm_qos_mtx);
503 	return ret;
504 }
505 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
506 
507 /**
508  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
509  * of per-device PM QoS constraints
510  *
511  * @dev: target device for the constraint
512  * @notifier: notifier block to be removed.
513  *
514  * Will remove the notifier from the notification chain that gets called
515  * upon changes to the target value.
516  */
517 int dev_pm_qos_remove_notifier(struct device *dev,
518 			       struct notifier_block *notifier)
519 {
520 	int retval = 0;
521 
522 	mutex_lock(&dev_pm_qos_mtx);
523 
524 	/* Silently return if the constraints object is not present. */
525 	if (!IS_ERR_OR_NULL(dev->power.qos))
526 		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
527 							    notifier);
528 
529 	mutex_unlock(&dev_pm_qos_mtx);
530 	return retval;
531 }
532 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
533 
534 /**
535  * dev_pm_qos_add_global_notifier - sets notification entry for changes to
536  * target value of the PM QoS constraints for any device
537  *
538  * @notifier: notifier block managed by caller.
539  *
540  * Will register the notifier into a notification chain that gets called
541  * upon changes to the target value for any device.
542  */
543 int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
544 {
545 	return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
546 }
547 EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
548 
549 /**
550  * dev_pm_qos_remove_global_notifier - deletes notification for changes to
551  * target value of PM QoS constraints for any device
552  *
553  * @notifier: notifier block to be removed.
554  *
555  * Will remove the notifier from the notification chain that gets called
556  * upon changes to the target value for any device.
557  */
558 int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
559 {
560 	return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
561 }
562 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
563 
564 /**
565  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
566  * @dev: Device whose ancestor to add the request for.
567  * @req: Pointer to the preallocated handle.
568  * @type: Type of the request.
569  * @value: Constraint latency value.
570  */
571 int dev_pm_qos_add_ancestor_request(struct device *dev,
572 				    struct dev_pm_qos_request *req,
573 				    enum dev_pm_qos_req_type type, s32 value)
574 {
575 	struct device *ancestor = dev->parent;
576 	int ret = -ENODEV;
577 
578 	switch (type) {
579 	case DEV_PM_QOS_RESUME_LATENCY:
580 		while (ancestor && !ancestor->power.ignore_children)
581 			ancestor = ancestor->parent;
582 
583 		break;
584 	case DEV_PM_QOS_LATENCY_TOLERANCE:
585 		while (ancestor && !ancestor->power.set_latency_tolerance)
586 			ancestor = ancestor->parent;
587 
588 		break;
589 	default:
590 		ancestor = NULL;
591 	}
592 	if (ancestor)
593 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
594 
595 	if (ret < 0)
596 		req->dev = NULL;
597 
598 	return ret;
599 }
600 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
601 
602 static void __dev_pm_qos_drop_user_request(struct device *dev,
603 					   enum dev_pm_qos_req_type type)
604 {
605 	struct dev_pm_qos_request *req = NULL;
606 
607 	switch(type) {
608 	case DEV_PM_QOS_RESUME_LATENCY:
609 		req = dev->power.qos->resume_latency_req;
610 		dev->power.qos->resume_latency_req = NULL;
611 		break;
612 	case DEV_PM_QOS_LATENCY_TOLERANCE:
613 		req = dev->power.qos->latency_tolerance_req;
614 		dev->power.qos->latency_tolerance_req = NULL;
615 		break;
616 	case DEV_PM_QOS_FLAGS:
617 		req = dev->power.qos->flags_req;
618 		dev->power.qos->flags_req = NULL;
619 		break;
620 	}
621 	__dev_pm_qos_remove_request(req);
622 	kfree(req);
623 }
624 
625 static void dev_pm_qos_drop_user_request(struct device *dev,
626 					 enum dev_pm_qos_req_type type)
627 {
628 	mutex_lock(&dev_pm_qos_mtx);
629 	__dev_pm_qos_drop_user_request(dev, type);
630 	mutex_unlock(&dev_pm_qos_mtx);
631 }
632 
633 /**
634  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
635  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
636  * @value: Initial value of the latency limit.
637  */
638 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
639 {
640 	struct dev_pm_qos_request *req;
641 	int ret;
642 
643 	if (!device_is_registered(dev) || value < 0)
644 		return -EINVAL;
645 
646 	req = kzalloc(sizeof(*req), GFP_KERNEL);
647 	if (!req)
648 		return -ENOMEM;
649 
650 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
651 	if (ret < 0) {
652 		kfree(req);
653 		return ret;
654 	}
655 
656 	mutex_lock(&dev_pm_qos_sysfs_mtx);
657 
658 	mutex_lock(&dev_pm_qos_mtx);
659 
660 	if (IS_ERR_OR_NULL(dev->power.qos))
661 		ret = -ENODEV;
662 	else if (dev->power.qos->resume_latency_req)
663 		ret = -EEXIST;
664 
665 	if (ret < 0) {
666 		__dev_pm_qos_remove_request(req);
667 		kfree(req);
668 		mutex_unlock(&dev_pm_qos_mtx);
669 		goto out;
670 	}
671 	dev->power.qos->resume_latency_req = req;
672 
673 	mutex_unlock(&dev_pm_qos_mtx);
674 
675 	ret = pm_qos_sysfs_add_resume_latency(dev);
676 	if (ret)
677 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
678 
679  out:
680 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
681 	return ret;
682 }
683 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
684 
685 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
686 {
687 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
688 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
689 }
690 
691 /**
692  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
693  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
694  */
695 void dev_pm_qos_hide_latency_limit(struct device *dev)
696 {
697 	mutex_lock(&dev_pm_qos_sysfs_mtx);
698 
699 	pm_qos_sysfs_remove_resume_latency(dev);
700 
701 	mutex_lock(&dev_pm_qos_mtx);
702 	__dev_pm_qos_hide_latency_limit(dev);
703 	mutex_unlock(&dev_pm_qos_mtx);
704 
705 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
706 }
707 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
708 
709 /**
710  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
711  * @dev: Device whose PM QoS flags are to be exposed to user space.
712  * @val: Initial values of the flags.
713  */
714 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
715 {
716 	struct dev_pm_qos_request *req;
717 	int ret;
718 
719 	if (!device_is_registered(dev))
720 		return -EINVAL;
721 
722 	req = kzalloc(sizeof(*req), GFP_KERNEL);
723 	if (!req)
724 		return -ENOMEM;
725 
726 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
727 	if (ret < 0) {
728 		kfree(req);
729 		return ret;
730 	}
731 
732 	pm_runtime_get_sync(dev);
733 	mutex_lock(&dev_pm_qos_sysfs_mtx);
734 
735 	mutex_lock(&dev_pm_qos_mtx);
736 
737 	if (IS_ERR_OR_NULL(dev->power.qos))
738 		ret = -ENODEV;
739 	else if (dev->power.qos->flags_req)
740 		ret = -EEXIST;
741 
742 	if (ret < 0) {
743 		__dev_pm_qos_remove_request(req);
744 		kfree(req);
745 		mutex_unlock(&dev_pm_qos_mtx);
746 		goto out;
747 	}
748 	dev->power.qos->flags_req = req;
749 
750 	mutex_unlock(&dev_pm_qos_mtx);
751 
752 	ret = pm_qos_sysfs_add_flags(dev);
753 	if (ret)
754 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
755 
756  out:
757 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
758 	pm_runtime_put(dev);
759 	return ret;
760 }
761 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
762 
763 static void __dev_pm_qos_hide_flags(struct device *dev)
764 {
765 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
766 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
767 }
768 
769 /**
770  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
771  * @dev: Device whose PM QoS flags are to be hidden from user space.
772  */
773 void dev_pm_qos_hide_flags(struct device *dev)
774 {
775 	pm_runtime_get_sync(dev);
776 	mutex_lock(&dev_pm_qos_sysfs_mtx);
777 
778 	pm_qos_sysfs_remove_flags(dev);
779 
780 	mutex_lock(&dev_pm_qos_mtx);
781 	__dev_pm_qos_hide_flags(dev);
782 	mutex_unlock(&dev_pm_qos_mtx);
783 
784 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
785 	pm_runtime_put(dev);
786 }
787 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
788 
789 /**
790  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
791  * @dev: Device to update the PM QoS flags request for.
792  * @mask: Flags to set/clear.
793  * @set: Whether to set or clear the flags (true means set).
794  */
795 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
796 {
797 	s32 value;
798 	int ret;
799 
800 	pm_runtime_get_sync(dev);
801 	mutex_lock(&dev_pm_qos_mtx);
802 
803 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
804 		ret = -EINVAL;
805 		goto out;
806 	}
807 
808 	value = dev_pm_qos_requested_flags(dev);
809 	if (set)
810 		value |= mask;
811 	else
812 		value &= ~mask;
813 
814 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
815 
816  out:
817 	mutex_unlock(&dev_pm_qos_mtx);
818 	pm_runtime_put(dev);
819 	return ret;
820 }
821 
822 /**
823  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
824  * @dev: Device to obtain the user space latency tolerance for.
825  */
826 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
827 {
828 	s32 ret;
829 
830 	mutex_lock(&dev_pm_qos_mtx);
831 	ret = IS_ERR_OR_NULL(dev->power.qos)
832 		|| !dev->power.qos->latency_tolerance_req ?
833 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
834 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
835 	mutex_unlock(&dev_pm_qos_mtx);
836 	return ret;
837 }
838 
839 /**
840  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
841  * @dev: Device to update the user space latency tolerance for.
842  * @val: New user space latency tolerance for @dev (negative values disable).
843  */
844 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
845 {
846 	int ret;
847 
848 	mutex_lock(&dev_pm_qos_mtx);
849 
850 	if (IS_ERR_OR_NULL(dev->power.qos)
851 	    || !dev->power.qos->latency_tolerance_req) {
852 		struct dev_pm_qos_request *req;
853 
854 		if (val < 0) {
855 			ret = -EINVAL;
856 			goto out;
857 		}
858 		req = kzalloc(sizeof(*req), GFP_KERNEL);
859 		if (!req) {
860 			ret = -ENOMEM;
861 			goto out;
862 		}
863 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
864 		if (ret < 0) {
865 			kfree(req);
866 			goto out;
867 		}
868 		dev->power.qos->latency_tolerance_req = req;
869 	} else {
870 		if (val < 0) {
871 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
872 			ret = 0;
873 		} else {
874 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
875 		}
876 	}
877 
878  out:
879 	mutex_unlock(&dev_pm_qos_mtx);
880 	return ret;
881 }
882