xref: /openbmc/linux/drivers/base/power/qos.c (revision 2127c01b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Devices PM QoS constraints management
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  * This module exposes the interface to kernel space for specifying
8  * per-device PM QoS dependencies. It provides infrastructure for registration
9  * of:
10  *
11  * Dependents on a QoS value : register requests
12  * Watchers of QoS value : get notified when target QoS value changes
13  *
14  * This QoS design is best effort based. Dependents register their QoS needs.
15  * Watchers register to keep track of the current QoS needs of the system.
16  * Watchers can register a per-device notification callback using the
17  * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18  * per-device constraint data struct.
19  *
20  * Note about the per-device constraint data struct allocation:
21  * . The per-device constraints data struct ptr is stored into the device
22  *    dev_pm_info.
23  * . To minimize the data usage by the per-device constraints, the data struct
24  *   is only allocated at the first call to dev_pm_qos_add_request.
25  * . The data is later free'd when the device is removed from the system.
26  *  . A global mutex protects the constraints users from the data being
27  *     allocated and free'd.
28  */
29 
30 #include <linux/pm_qos.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/device.h>
34 #include <linux/mutex.h>
35 #include <linux/export.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/err.h>
38 #include <trace/events/power.h>
39 
40 #include "power.h"
41 
42 static DEFINE_MUTEX(dev_pm_qos_mtx);
43 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
44 
45 /**
46  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47  * @dev: Device to check the PM QoS flags for.
48  * @mask: Flags to check against.
49  *
50  * This routine must be called with dev->power.lock held.
51  */
52 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53 {
54 	struct dev_pm_qos *qos = dev->power.qos;
55 	struct pm_qos_flags *pqf;
56 	s32 val;
57 
58 	lockdep_assert_held(&dev->power.lock);
59 
60 	if (IS_ERR_OR_NULL(qos))
61 		return PM_QOS_FLAGS_UNDEFINED;
62 
63 	pqf = &qos->flags;
64 	if (list_empty(&pqf->list))
65 		return PM_QOS_FLAGS_UNDEFINED;
66 
67 	val = pqf->effective_flags & mask;
68 	if (val)
69 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70 
71 	return PM_QOS_FLAGS_NONE;
72 }
73 
74 /**
75  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76  * @dev: Device to check the PM QoS flags for.
77  * @mask: Flags to check against.
78  */
79 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80 {
81 	unsigned long irqflags;
82 	enum pm_qos_flags_status ret;
83 
84 	spin_lock_irqsave(&dev->power.lock, irqflags);
85 	ret = __dev_pm_qos_flags(dev, mask);
86 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
87 
88 	return ret;
89 }
90 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
91 
92 /**
93  * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
94  * @dev: Device to get the PM QoS constraint value for.
95  *
96  * This routine must be called with dev->power.lock held.
97  */
98 s32 __dev_pm_qos_resume_latency(struct device *dev)
99 {
100 	lockdep_assert_held(&dev->power.lock);
101 
102 	return dev_pm_qos_raw_resume_latency(dev);
103 }
104 
105 /**
106  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107  * @dev: Device to get the PM QoS constraint value for.
108  * @type: QoS request type.
109  */
110 s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
111 {
112 	struct dev_pm_qos *qos = dev->power.qos;
113 	unsigned long flags;
114 	s32 ret;
115 
116 	spin_lock_irqsave(&dev->power.lock, flags);
117 
118 	switch (type) {
119 	case DEV_PM_QOS_RESUME_LATENCY:
120 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
121 			: pm_qos_read_value(&qos->resume_latency);
122 		break;
123 	case DEV_PM_QOS_MIN_FREQUENCY:
124 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
125 			: pm_qos_read_value(&qos->min_frequency);
126 		break;
127 	case DEV_PM_QOS_MAX_FREQUENCY:
128 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
129 			: pm_qos_read_value(&qos->max_frequency);
130 		break;
131 	default:
132 		WARN_ON(1);
133 		ret = 0;
134 	}
135 
136 	spin_unlock_irqrestore(&dev->power.lock, flags);
137 
138 	return ret;
139 }
140 
141 /**
142  * apply_constraint - Add/modify/remove device PM QoS request.
143  * @req: Constraint request to apply
144  * @action: Action to perform (add/update/remove).
145  * @value: Value to assign to the QoS request.
146  *
147  * Internal function to update the constraints list using the PM QoS core
148  * code and if needed call the per-device callbacks.
149  */
150 static int apply_constraint(struct dev_pm_qos_request *req,
151 			    enum pm_qos_req_action action, s32 value)
152 {
153 	struct dev_pm_qos *qos = req->dev->power.qos;
154 	int ret;
155 
156 	switch(req->type) {
157 	case DEV_PM_QOS_RESUME_LATENCY:
158 		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
159 			value = 0;
160 
161 		ret = pm_qos_update_target(&qos->resume_latency,
162 					   &req->data.pnode, action, value);
163 		break;
164 	case DEV_PM_QOS_LATENCY_TOLERANCE:
165 		ret = pm_qos_update_target(&qos->latency_tolerance,
166 					   &req->data.pnode, action, value);
167 		if (ret) {
168 			value = pm_qos_read_value(&qos->latency_tolerance);
169 			req->dev->power.set_latency_tolerance(req->dev, value);
170 		}
171 		break;
172 	case DEV_PM_QOS_MIN_FREQUENCY:
173 		ret = pm_qos_update_target(&qos->min_frequency,
174 					   &req->data.pnode, action, value);
175 		break;
176 	case DEV_PM_QOS_MAX_FREQUENCY:
177 		ret = pm_qos_update_target(&qos->max_frequency,
178 					   &req->data.pnode, action, value);
179 		break;
180 	case DEV_PM_QOS_FLAGS:
181 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
182 					  action, value);
183 		break;
184 	default:
185 		ret = -EINVAL;
186 	}
187 
188 	return ret;
189 }
190 
191 /*
192  * dev_pm_qos_constraints_allocate
193  * @dev: device to allocate data for
194  *
195  * Called at the first call to add_request, for constraint data allocation
196  * Must be called with the dev_pm_qos_mtx mutex held
197  */
198 static int dev_pm_qos_constraints_allocate(struct device *dev)
199 {
200 	struct dev_pm_qos *qos;
201 	struct pm_qos_constraints *c;
202 	struct blocking_notifier_head *n;
203 
204 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
205 	if (!qos)
206 		return -ENOMEM;
207 
208 	n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
209 	if (!n) {
210 		kfree(qos);
211 		return -ENOMEM;
212 	}
213 
214 	c = &qos->resume_latency;
215 	plist_head_init(&c->list);
216 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
217 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
218 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
219 	c->type = PM_QOS_MIN;
220 	c->notifiers = n;
221 	BLOCKING_INIT_NOTIFIER_HEAD(n);
222 
223 	c = &qos->latency_tolerance;
224 	plist_head_init(&c->list);
225 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
226 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
227 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
228 	c->type = PM_QOS_MIN;
229 
230 	c = &qos->min_frequency;
231 	plist_head_init(&c->list);
232 	c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
233 	c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
234 	c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
235 	c->type = PM_QOS_MAX;
236 	c->notifiers = ++n;
237 	BLOCKING_INIT_NOTIFIER_HEAD(n);
238 
239 	c = &qos->max_frequency;
240 	plist_head_init(&c->list);
241 	c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
242 	c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
243 	c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
244 	c->type = PM_QOS_MIN;
245 	c->notifiers = ++n;
246 	BLOCKING_INIT_NOTIFIER_HEAD(n);
247 
248 	INIT_LIST_HEAD(&qos->flags.list);
249 
250 	spin_lock_irq(&dev->power.lock);
251 	dev->power.qos = qos;
252 	spin_unlock_irq(&dev->power.lock);
253 
254 	return 0;
255 }
256 
257 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
258 static void __dev_pm_qos_hide_flags(struct device *dev);
259 
260 /**
261  * dev_pm_qos_constraints_destroy
262  * @dev: target device
263  *
264  * Called from the device PM subsystem on device removal under device_pm_lock().
265  */
266 void dev_pm_qos_constraints_destroy(struct device *dev)
267 {
268 	struct dev_pm_qos *qos;
269 	struct dev_pm_qos_request *req, *tmp;
270 	struct pm_qos_constraints *c;
271 	struct pm_qos_flags *f;
272 
273 	mutex_lock(&dev_pm_qos_sysfs_mtx);
274 
275 	/*
276 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
277 	 * exposed to user space, they have to be hidden at this point.
278 	 */
279 	pm_qos_sysfs_remove_resume_latency(dev);
280 	pm_qos_sysfs_remove_flags(dev);
281 
282 	mutex_lock(&dev_pm_qos_mtx);
283 
284 	__dev_pm_qos_hide_latency_limit(dev);
285 	__dev_pm_qos_hide_flags(dev);
286 
287 	qos = dev->power.qos;
288 	if (!qos)
289 		goto out;
290 
291 	/* Flush the constraints lists for the device. */
292 	c = &qos->resume_latency;
293 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
294 		/*
295 		 * Update constraints list and call the notification
296 		 * callbacks if needed
297 		 */
298 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
299 		memset(req, 0, sizeof(*req));
300 	}
301 
302 	c = &qos->latency_tolerance;
303 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
304 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
305 		memset(req, 0, sizeof(*req));
306 	}
307 
308 	c = &qos->min_frequency;
309 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
310 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
311 		memset(req, 0, sizeof(*req));
312 	}
313 
314 	c = &qos->max_frequency;
315 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
316 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
317 		memset(req, 0, sizeof(*req));
318 	}
319 
320 	f = &qos->flags;
321 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
322 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
323 		memset(req, 0, sizeof(*req));
324 	}
325 
326 	spin_lock_irq(&dev->power.lock);
327 	dev->power.qos = ERR_PTR(-ENODEV);
328 	spin_unlock_irq(&dev->power.lock);
329 
330 	kfree(qos->resume_latency.notifiers);
331 	kfree(qos);
332 
333  out:
334 	mutex_unlock(&dev_pm_qos_mtx);
335 
336 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
337 }
338 
339 static bool dev_pm_qos_invalid_req_type(struct device *dev,
340 					enum dev_pm_qos_req_type type)
341 {
342 	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
343 	       !dev->power.set_latency_tolerance;
344 }
345 
346 static int __dev_pm_qos_add_request(struct device *dev,
347 				    struct dev_pm_qos_request *req,
348 				    enum dev_pm_qos_req_type type, s32 value)
349 {
350 	int ret = 0;
351 
352 	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
353 		return -EINVAL;
354 
355 	if (WARN(dev_pm_qos_request_active(req),
356 		 "%s() called for already added request\n", __func__))
357 		return -EINVAL;
358 
359 	if (IS_ERR(dev->power.qos))
360 		ret = -ENODEV;
361 	else if (!dev->power.qos)
362 		ret = dev_pm_qos_constraints_allocate(dev);
363 
364 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
365 	if (!ret) {
366 		req->dev = dev;
367 		req->type = type;
368 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
369 	}
370 	return ret;
371 }
372 
373 /**
374  * dev_pm_qos_add_request - inserts new qos request into the list
375  * @dev: target device for the constraint
376  * @req: pointer to a preallocated handle
377  * @type: type of the request
378  * @value: defines the qos request
379  *
380  * This function inserts a new entry in the device constraints list of
381  * requested qos performance characteristics. It recomputes the aggregate
382  * QoS expectations of parameters and initializes the dev_pm_qos_request
383  * handle.  Caller needs to save this handle for later use in updates and
384  * removal.
385  *
386  * Returns 1 if the aggregated constraint value has changed,
387  * 0 if the aggregated constraint value has not changed,
388  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
389  * to allocate for data structures, -ENODEV if the device has just been removed
390  * from the system.
391  *
392  * Callers should ensure that the target device is not RPM_SUSPENDED before
393  * using this function for requests of type DEV_PM_QOS_FLAGS.
394  */
395 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
396 			   enum dev_pm_qos_req_type type, s32 value)
397 {
398 	int ret;
399 
400 	mutex_lock(&dev_pm_qos_mtx);
401 	ret = __dev_pm_qos_add_request(dev, req, type, value);
402 	mutex_unlock(&dev_pm_qos_mtx);
403 	return ret;
404 }
405 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
406 
407 /**
408  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
409  * @req : PM QoS request to modify.
410  * @new_value: New value to request.
411  */
412 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
413 				       s32 new_value)
414 {
415 	s32 curr_value;
416 	int ret = 0;
417 
418 	if (!req) /*guard against callers passing in null */
419 		return -EINVAL;
420 
421 	if (WARN(!dev_pm_qos_request_active(req),
422 		 "%s() called for unknown object\n", __func__))
423 		return -EINVAL;
424 
425 	if (IS_ERR_OR_NULL(req->dev->power.qos))
426 		return -ENODEV;
427 
428 	switch(req->type) {
429 	case DEV_PM_QOS_RESUME_LATENCY:
430 	case DEV_PM_QOS_LATENCY_TOLERANCE:
431 	case DEV_PM_QOS_MIN_FREQUENCY:
432 	case DEV_PM_QOS_MAX_FREQUENCY:
433 		curr_value = req->data.pnode.prio;
434 		break;
435 	case DEV_PM_QOS_FLAGS:
436 		curr_value = req->data.flr.flags;
437 		break;
438 	default:
439 		return -EINVAL;
440 	}
441 
442 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
443 					new_value);
444 	if (curr_value != new_value)
445 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
446 
447 	return ret;
448 }
449 
450 /**
451  * dev_pm_qos_update_request - modifies an existing qos request
452  * @req : handle to list element holding a dev_pm_qos request to use
453  * @new_value: defines the qos request
454  *
455  * Updates an existing dev PM qos request along with updating the
456  * target value.
457  *
458  * Attempts are made to make this code callable on hot code paths.
459  *
460  * Returns 1 if the aggregated constraint value has changed,
461  * 0 if the aggregated constraint value has not changed,
462  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
463  * removed from the system
464  *
465  * Callers should ensure that the target device is not RPM_SUSPENDED before
466  * using this function for requests of type DEV_PM_QOS_FLAGS.
467  */
468 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
469 {
470 	int ret;
471 
472 	mutex_lock(&dev_pm_qos_mtx);
473 	ret = __dev_pm_qos_update_request(req, new_value);
474 	mutex_unlock(&dev_pm_qos_mtx);
475 	return ret;
476 }
477 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
478 
479 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
480 {
481 	int ret;
482 
483 	if (!req) /*guard against callers passing in null */
484 		return -EINVAL;
485 
486 	if (WARN(!dev_pm_qos_request_active(req),
487 		 "%s() called for unknown object\n", __func__))
488 		return -EINVAL;
489 
490 	if (IS_ERR_OR_NULL(req->dev->power.qos))
491 		return -ENODEV;
492 
493 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
494 					PM_QOS_DEFAULT_VALUE);
495 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
496 	memset(req, 0, sizeof(*req));
497 	return ret;
498 }
499 
500 /**
501  * dev_pm_qos_remove_request - modifies an existing qos request
502  * @req: handle to request list element
503  *
504  * Will remove pm qos request from the list of constraints and
505  * recompute the current target value. Call this on slow code paths.
506  *
507  * Returns 1 if the aggregated constraint value has changed,
508  * 0 if the aggregated constraint value has not changed,
509  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
510  * removed from the system
511  *
512  * Callers should ensure that the target device is not RPM_SUSPENDED before
513  * using this function for requests of type DEV_PM_QOS_FLAGS.
514  */
515 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
516 {
517 	int ret;
518 
519 	mutex_lock(&dev_pm_qos_mtx);
520 	ret = __dev_pm_qos_remove_request(req);
521 	mutex_unlock(&dev_pm_qos_mtx);
522 	return ret;
523 }
524 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
525 
526 /**
527  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
528  * of per-device PM QoS constraints
529  *
530  * @dev: target device for the constraint
531  * @notifier: notifier block managed by caller.
532  * @type: request type.
533  *
534  * Will register the notifier into a notification chain that gets called
535  * upon changes to the target value for the device.
536  *
537  * If the device's constraints object doesn't exist when this routine is called,
538  * it will be created (or error code will be returned if that fails).
539  */
540 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
541 			    enum dev_pm_qos_req_type type)
542 {
543 	int ret = 0;
544 
545 	mutex_lock(&dev_pm_qos_mtx);
546 
547 	if (IS_ERR(dev->power.qos))
548 		ret = -ENODEV;
549 	else if (!dev->power.qos)
550 		ret = dev_pm_qos_constraints_allocate(dev);
551 
552 	if (ret)
553 		goto unlock;
554 
555 	switch (type) {
556 	case DEV_PM_QOS_RESUME_LATENCY:
557 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
558 						       notifier);
559 		break;
560 	case DEV_PM_QOS_MIN_FREQUENCY:
561 		ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
562 						       notifier);
563 		break;
564 	case DEV_PM_QOS_MAX_FREQUENCY:
565 		ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
566 						       notifier);
567 		break;
568 	default:
569 		WARN_ON(1);
570 		ret = -EINVAL;
571 	}
572 
573 unlock:
574 	mutex_unlock(&dev_pm_qos_mtx);
575 	return ret;
576 }
577 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
578 
579 /**
580  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
581  * of per-device PM QoS constraints
582  *
583  * @dev: target device for the constraint
584  * @notifier: notifier block to be removed.
585  * @type: request type.
586  *
587  * Will remove the notifier from the notification chain that gets called
588  * upon changes to the target value.
589  */
590 int dev_pm_qos_remove_notifier(struct device *dev,
591 			       struct notifier_block *notifier,
592 			       enum dev_pm_qos_req_type type)
593 {
594 	int ret = 0;
595 
596 	mutex_lock(&dev_pm_qos_mtx);
597 
598 	/* Silently return if the constraints object is not present. */
599 	if (IS_ERR_OR_NULL(dev->power.qos))
600 		goto unlock;
601 
602 	switch (type) {
603 	case DEV_PM_QOS_RESUME_LATENCY:
604 		ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
605 							 notifier);
606 		break;
607 	case DEV_PM_QOS_MIN_FREQUENCY:
608 		ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
609 							 notifier);
610 		break;
611 	case DEV_PM_QOS_MAX_FREQUENCY:
612 		ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
613 							 notifier);
614 		break;
615 	default:
616 		WARN_ON(1);
617 		ret = -EINVAL;
618 	}
619 
620 unlock:
621 	mutex_unlock(&dev_pm_qos_mtx);
622 	return ret;
623 }
624 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
625 
626 /**
627  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
628  * @dev: Device whose ancestor to add the request for.
629  * @req: Pointer to the preallocated handle.
630  * @type: Type of the request.
631  * @value: Constraint latency value.
632  */
633 int dev_pm_qos_add_ancestor_request(struct device *dev,
634 				    struct dev_pm_qos_request *req,
635 				    enum dev_pm_qos_req_type type, s32 value)
636 {
637 	struct device *ancestor = dev->parent;
638 	int ret = -ENODEV;
639 
640 	switch (type) {
641 	case DEV_PM_QOS_RESUME_LATENCY:
642 		while (ancestor && !ancestor->power.ignore_children)
643 			ancestor = ancestor->parent;
644 
645 		break;
646 	case DEV_PM_QOS_LATENCY_TOLERANCE:
647 		while (ancestor && !ancestor->power.set_latency_tolerance)
648 			ancestor = ancestor->parent;
649 
650 		break;
651 	default:
652 		ancestor = NULL;
653 	}
654 	if (ancestor)
655 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
656 
657 	if (ret < 0)
658 		req->dev = NULL;
659 
660 	return ret;
661 }
662 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
663 
664 static void __dev_pm_qos_drop_user_request(struct device *dev,
665 					   enum dev_pm_qos_req_type type)
666 {
667 	struct dev_pm_qos_request *req = NULL;
668 
669 	switch(type) {
670 	case DEV_PM_QOS_RESUME_LATENCY:
671 		req = dev->power.qos->resume_latency_req;
672 		dev->power.qos->resume_latency_req = NULL;
673 		break;
674 	case DEV_PM_QOS_LATENCY_TOLERANCE:
675 		req = dev->power.qos->latency_tolerance_req;
676 		dev->power.qos->latency_tolerance_req = NULL;
677 		break;
678 	case DEV_PM_QOS_FLAGS:
679 		req = dev->power.qos->flags_req;
680 		dev->power.qos->flags_req = NULL;
681 		break;
682 	default:
683 		WARN_ON(1);
684 		return;
685 	}
686 	__dev_pm_qos_remove_request(req);
687 	kfree(req);
688 }
689 
690 static void dev_pm_qos_drop_user_request(struct device *dev,
691 					 enum dev_pm_qos_req_type type)
692 {
693 	mutex_lock(&dev_pm_qos_mtx);
694 	__dev_pm_qos_drop_user_request(dev, type);
695 	mutex_unlock(&dev_pm_qos_mtx);
696 }
697 
698 /**
699  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
700  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
701  * @value: Initial value of the latency limit.
702  */
703 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
704 {
705 	struct dev_pm_qos_request *req;
706 	int ret;
707 
708 	if (!device_is_registered(dev) || value < 0)
709 		return -EINVAL;
710 
711 	req = kzalloc(sizeof(*req), GFP_KERNEL);
712 	if (!req)
713 		return -ENOMEM;
714 
715 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
716 	if (ret < 0) {
717 		kfree(req);
718 		return ret;
719 	}
720 
721 	mutex_lock(&dev_pm_qos_sysfs_mtx);
722 
723 	mutex_lock(&dev_pm_qos_mtx);
724 
725 	if (IS_ERR_OR_NULL(dev->power.qos))
726 		ret = -ENODEV;
727 	else if (dev->power.qos->resume_latency_req)
728 		ret = -EEXIST;
729 
730 	if (ret < 0) {
731 		__dev_pm_qos_remove_request(req);
732 		kfree(req);
733 		mutex_unlock(&dev_pm_qos_mtx);
734 		goto out;
735 	}
736 	dev->power.qos->resume_latency_req = req;
737 
738 	mutex_unlock(&dev_pm_qos_mtx);
739 
740 	ret = pm_qos_sysfs_add_resume_latency(dev);
741 	if (ret)
742 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
743 
744  out:
745 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
746 	return ret;
747 }
748 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
749 
750 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
751 {
752 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
753 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
754 }
755 
756 /**
757  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
758  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
759  */
760 void dev_pm_qos_hide_latency_limit(struct device *dev)
761 {
762 	mutex_lock(&dev_pm_qos_sysfs_mtx);
763 
764 	pm_qos_sysfs_remove_resume_latency(dev);
765 
766 	mutex_lock(&dev_pm_qos_mtx);
767 	__dev_pm_qos_hide_latency_limit(dev);
768 	mutex_unlock(&dev_pm_qos_mtx);
769 
770 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
771 }
772 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
773 
774 /**
775  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
776  * @dev: Device whose PM QoS flags are to be exposed to user space.
777  * @val: Initial values of the flags.
778  */
779 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
780 {
781 	struct dev_pm_qos_request *req;
782 	int ret;
783 
784 	if (!device_is_registered(dev))
785 		return -EINVAL;
786 
787 	req = kzalloc(sizeof(*req), GFP_KERNEL);
788 	if (!req)
789 		return -ENOMEM;
790 
791 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
792 	if (ret < 0) {
793 		kfree(req);
794 		return ret;
795 	}
796 
797 	pm_runtime_get_sync(dev);
798 	mutex_lock(&dev_pm_qos_sysfs_mtx);
799 
800 	mutex_lock(&dev_pm_qos_mtx);
801 
802 	if (IS_ERR_OR_NULL(dev->power.qos))
803 		ret = -ENODEV;
804 	else if (dev->power.qos->flags_req)
805 		ret = -EEXIST;
806 
807 	if (ret < 0) {
808 		__dev_pm_qos_remove_request(req);
809 		kfree(req);
810 		mutex_unlock(&dev_pm_qos_mtx);
811 		goto out;
812 	}
813 	dev->power.qos->flags_req = req;
814 
815 	mutex_unlock(&dev_pm_qos_mtx);
816 
817 	ret = pm_qos_sysfs_add_flags(dev);
818 	if (ret)
819 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
820 
821  out:
822 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
823 	pm_runtime_put(dev);
824 	return ret;
825 }
826 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
827 
828 static void __dev_pm_qos_hide_flags(struct device *dev)
829 {
830 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
831 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
832 }
833 
834 /**
835  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
836  * @dev: Device whose PM QoS flags are to be hidden from user space.
837  */
838 void dev_pm_qos_hide_flags(struct device *dev)
839 {
840 	pm_runtime_get_sync(dev);
841 	mutex_lock(&dev_pm_qos_sysfs_mtx);
842 
843 	pm_qos_sysfs_remove_flags(dev);
844 
845 	mutex_lock(&dev_pm_qos_mtx);
846 	__dev_pm_qos_hide_flags(dev);
847 	mutex_unlock(&dev_pm_qos_mtx);
848 
849 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
850 	pm_runtime_put(dev);
851 }
852 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
853 
854 /**
855  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
856  * @dev: Device to update the PM QoS flags request for.
857  * @mask: Flags to set/clear.
858  * @set: Whether to set or clear the flags (true means set).
859  */
860 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
861 {
862 	s32 value;
863 	int ret;
864 
865 	pm_runtime_get_sync(dev);
866 	mutex_lock(&dev_pm_qos_mtx);
867 
868 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
869 		ret = -EINVAL;
870 		goto out;
871 	}
872 
873 	value = dev_pm_qos_requested_flags(dev);
874 	if (set)
875 		value |= mask;
876 	else
877 		value &= ~mask;
878 
879 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
880 
881  out:
882 	mutex_unlock(&dev_pm_qos_mtx);
883 	pm_runtime_put(dev);
884 	return ret;
885 }
886 
887 /**
888  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
889  * @dev: Device to obtain the user space latency tolerance for.
890  */
891 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
892 {
893 	s32 ret;
894 
895 	mutex_lock(&dev_pm_qos_mtx);
896 	ret = IS_ERR_OR_NULL(dev->power.qos)
897 		|| !dev->power.qos->latency_tolerance_req ?
898 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
899 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
900 	mutex_unlock(&dev_pm_qos_mtx);
901 	return ret;
902 }
903 
904 /**
905  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
906  * @dev: Device to update the user space latency tolerance for.
907  * @val: New user space latency tolerance for @dev (negative values disable).
908  */
909 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
910 {
911 	int ret;
912 
913 	mutex_lock(&dev_pm_qos_mtx);
914 
915 	if (IS_ERR_OR_NULL(dev->power.qos)
916 	    || !dev->power.qos->latency_tolerance_req) {
917 		struct dev_pm_qos_request *req;
918 
919 		if (val < 0) {
920 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
921 				ret = 0;
922 			else
923 				ret = -EINVAL;
924 			goto out;
925 		}
926 		req = kzalloc(sizeof(*req), GFP_KERNEL);
927 		if (!req) {
928 			ret = -ENOMEM;
929 			goto out;
930 		}
931 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
932 		if (ret < 0) {
933 			kfree(req);
934 			goto out;
935 		}
936 		dev->power.qos->latency_tolerance_req = req;
937 	} else {
938 		if (val < 0) {
939 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
940 			ret = 0;
941 		} else {
942 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
943 		}
944 	}
945 
946  out:
947 	mutex_unlock(&dev_pm_qos_mtx);
948 	return ret;
949 }
950 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
951 
952 /**
953  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
954  * @dev: Device whose latency tolerance to expose
955  */
956 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
957 {
958 	int ret;
959 
960 	if (!dev->power.set_latency_tolerance)
961 		return -EINVAL;
962 
963 	mutex_lock(&dev_pm_qos_sysfs_mtx);
964 	ret = pm_qos_sysfs_add_latency_tolerance(dev);
965 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
966 
967 	return ret;
968 }
969 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
970 
971 /**
972  * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
973  * @dev: Device whose latency tolerance to hide
974  */
975 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
976 {
977 	mutex_lock(&dev_pm_qos_sysfs_mtx);
978 	pm_qos_sysfs_remove_latency_tolerance(dev);
979 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
980 
981 	/* Remove the request from user space now */
982 	pm_runtime_get_sync(dev);
983 	dev_pm_qos_update_user_latency_tolerance(dev,
984 		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
985 	pm_runtime_put(dev);
986 }
987 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
988