xref: /openbmc/linux/drivers/devfreq/devfreq.c (revision 6d99a79c)
1 /*
2  * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3  *	    for Non-CPU Devices.
4  *
5  * Copyright (C) 2011 Samsung Electronics
6  *	MyungJoo Ham <myungjoo.ham@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/kmod.h>
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/stat.h>
22 #include <linux/pm_opp.h>
23 #include <linux/devfreq.h>
24 #include <linux/workqueue.h>
25 #include <linux/platform_device.h>
26 #include <linux/list.h>
27 #include <linux/printk.h>
28 #include <linux/hrtimer.h>
29 #include <linux/of.h>
30 #include "governor.h"
31 
32 static struct class *devfreq_class;
33 
34 /*
35  * devfreq core provides delayed work based load monitoring helper
36  * functions. Governors can use these or can implement their own
37  * monitoring mechanism.
38  */
39 static struct workqueue_struct *devfreq_wq;
40 
41 /* The list of all device-devfreq governors */
42 static LIST_HEAD(devfreq_governor_list);
43 /* The list of all device-devfreq */
44 static LIST_HEAD(devfreq_list);
45 static DEFINE_MUTEX(devfreq_list_lock);
46 
47 /**
48  * find_device_devfreq() - find devfreq struct using device pointer
49  * @dev:	device pointer used to lookup device devfreq.
50  *
51  * Search the list of device devfreqs and return the matched device's
52  * devfreq info. devfreq_list_lock should be held by the caller.
53  */
54 static struct devfreq *find_device_devfreq(struct device *dev)
55 {
56 	struct devfreq *tmp_devfreq;
57 
58 	if (IS_ERR_OR_NULL(dev)) {
59 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
60 		return ERR_PTR(-EINVAL);
61 	}
62 	WARN(!mutex_is_locked(&devfreq_list_lock),
63 	     "devfreq_list_lock must be locked.");
64 
65 	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
66 		if (tmp_devfreq->dev.parent == dev)
67 			return tmp_devfreq;
68 	}
69 
70 	return ERR_PTR(-ENODEV);
71 }
72 
73 static unsigned long find_available_min_freq(struct devfreq *devfreq)
74 {
75 	struct dev_pm_opp *opp;
76 	unsigned long min_freq = 0;
77 
78 	opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
79 	if (IS_ERR(opp))
80 		min_freq = 0;
81 	else
82 		dev_pm_opp_put(opp);
83 
84 	return min_freq;
85 }
86 
87 static unsigned long find_available_max_freq(struct devfreq *devfreq)
88 {
89 	struct dev_pm_opp *opp;
90 	unsigned long max_freq = ULONG_MAX;
91 
92 	opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
93 	if (IS_ERR(opp))
94 		max_freq = 0;
95 	else
96 		dev_pm_opp_put(opp);
97 
98 	return max_freq;
99 }
100 
101 /**
102  * devfreq_get_freq_level() - Lookup freq_table for the frequency
103  * @devfreq:	the devfreq instance
104  * @freq:	the target frequency
105  */
106 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
107 {
108 	int lev;
109 
110 	for (lev = 0; lev < devfreq->profile->max_state; lev++)
111 		if (freq == devfreq->profile->freq_table[lev])
112 			return lev;
113 
114 	return -EINVAL;
115 }
116 
117 static int set_freq_table(struct devfreq *devfreq)
118 {
119 	struct devfreq_dev_profile *profile = devfreq->profile;
120 	struct dev_pm_opp *opp;
121 	unsigned long freq;
122 	int i, count;
123 
124 	/* Initialize the freq_table from OPP table */
125 	count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
126 	if (count <= 0)
127 		return -EINVAL;
128 
129 	profile->max_state = count;
130 	profile->freq_table = devm_kcalloc(devfreq->dev.parent,
131 					profile->max_state,
132 					sizeof(*profile->freq_table),
133 					GFP_KERNEL);
134 	if (!profile->freq_table) {
135 		profile->max_state = 0;
136 		return -ENOMEM;
137 	}
138 
139 	for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
140 		opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
141 		if (IS_ERR(opp)) {
142 			devm_kfree(devfreq->dev.parent, profile->freq_table);
143 			profile->max_state = 0;
144 			return PTR_ERR(opp);
145 		}
146 		dev_pm_opp_put(opp);
147 		profile->freq_table[i] = freq;
148 	}
149 
150 	return 0;
151 }
152 
153 /**
154  * devfreq_update_status() - Update statistics of devfreq behavior
155  * @devfreq:	the devfreq instance
156  * @freq:	the update target frequency
157  */
158 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
159 {
160 	int lev, prev_lev, ret = 0;
161 	unsigned long cur_time;
162 
163 	cur_time = jiffies;
164 
165 	/* Immediately exit if previous_freq is not initialized yet. */
166 	if (!devfreq->previous_freq)
167 		goto out;
168 
169 	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
170 	if (prev_lev < 0) {
171 		ret = prev_lev;
172 		goto out;
173 	}
174 
175 	devfreq->time_in_state[prev_lev] +=
176 			 cur_time - devfreq->last_stat_updated;
177 
178 	lev = devfreq_get_freq_level(devfreq, freq);
179 	if (lev < 0) {
180 		ret = lev;
181 		goto out;
182 	}
183 
184 	if (lev != prev_lev) {
185 		devfreq->trans_table[(prev_lev *
186 				devfreq->profile->max_state) + lev]++;
187 		devfreq->total_trans++;
188 	}
189 
190 out:
191 	devfreq->last_stat_updated = cur_time;
192 	return ret;
193 }
194 EXPORT_SYMBOL(devfreq_update_status);
195 
196 /**
197  * find_devfreq_governor() - find devfreq governor from name
198  * @name:	name of the governor
199  *
200  * Search the list of devfreq governors and return the matched
201  * governor's pointer. devfreq_list_lock should be held by the caller.
202  */
203 static struct devfreq_governor *find_devfreq_governor(const char *name)
204 {
205 	struct devfreq_governor *tmp_governor;
206 
207 	if (IS_ERR_OR_NULL(name)) {
208 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
209 		return ERR_PTR(-EINVAL);
210 	}
211 	WARN(!mutex_is_locked(&devfreq_list_lock),
212 	     "devfreq_list_lock must be locked.");
213 
214 	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
215 		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
216 			return tmp_governor;
217 	}
218 
219 	return ERR_PTR(-ENODEV);
220 }
221 
222 /**
223  * try_then_request_governor() - Try to find the governor and request the
224  *                               module if is not found.
225  * @name:	name of the governor
226  *
227  * Search the list of devfreq governors and request the module and try again
228  * if is not found. This can happen when both drivers (the governor driver
229  * and the driver that call devfreq_add_device) are built as modules.
230  * devfreq_list_lock should be held by the caller. Returns the matched
231  * governor's pointer.
232  */
233 static struct devfreq_governor *try_then_request_governor(const char *name)
234 {
235 	struct devfreq_governor *governor;
236 	int err = 0;
237 
238 	if (IS_ERR_OR_NULL(name)) {
239 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
240 		return ERR_PTR(-EINVAL);
241 	}
242 	WARN(!mutex_is_locked(&devfreq_list_lock),
243 	     "devfreq_list_lock must be locked.");
244 
245 	governor = find_devfreq_governor(name);
246 	if (IS_ERR(governor)) {
247 		mutex_unlock(&devfreq_list_lock);
248 
249 		if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
250 			     DEVFREQ_NAME_LEN))
251 			err = request_module("governor_%s", "simpleondemand");
252 		else
253 			err = request_module("governor_%s", name);
254 		/* Restore previous state before return */
255 		mutex_lock(&devfreq_list_lock);
256 		if (err)
257 			return NULL;
258 
259 		governor = find_devfreq_governor(name);
260 	}
261 
262 	return governor;
263 }
264 
265 static int devfreq_notify_transition(struct devfreq *devfreq,
266 		struct devfreq_freqs *freqs, unsigned int state)
267 {
268 	if (!devfreq)
269 		return -EINVAL;
270 
271 	switch (state) {
272 	case DEVFREQ_PRECHANGE:
273 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
274 				DEVFREQ_PRECHANGE, freqs);
275 		break;
276 
277 	case DEVFREQ_POSTCHANGE:
278 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
279 				DEVFREQ_POSTCHANGE, freqs);
280 		break;
281 	default:
282 		return -EINVAL;
283 	}
284 
285 	return 0;
286 }
287 
288 /* Load monitoring helper functions for governors use */
289 
290 /**
291  * update_devfreq() - Reevaluate the device and configure frequency.
292  * @devfreq:	the devfreq instance.
293  *
294  * Note: Lock devfreq->lock before calling update_devfreq
295  *	 This function is exported for governors.
296  */
297 int update_devfreq(struct devfreq *devfreq)
298 {
299 	struct devfreq_freqs freqs;
300 	unsigned long freq, cur_freq, min_freq, max_freq;
301 	int err = 0;
302 	u32 flags = 0;
303 
304 	if (!mutex_is_locked(&devfreq->lock)) {
305 		WARN(true, "devfreq->lock must be locked by the caller.\n");
306 		return -EINVAL;
307 	}
308 
309 	if (!devfreq->governor)
310 		return -EINVAL;
311 
312 	/* Reevaluate the proper frequency */
313 	err = devfreq->governor->get_target_freq(devfreq, &freq);
314 	if (err)
315 		return err;
316 
317 	/*
318 	 * Adjust the frequency with user freq, QoS and available freq.
319 	 *
320 	 * List from the highest priority
321 	 * max_freq
322 	 * min_freq
323 	 */
324 	max_freq = min(devfreq->scaling_max_freq, devfreq->max_freq);
325 	min_freq = max(devfreq->scaling_min_freq, devfreq->min_freq);
326 
327 	if (freq < min_freq) {
328 		freq = min_freq;
329 		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
330 	}
331 	if (freq > max_freq) {
332 		freq = max_freq;
333 		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
334 	}
335 
336 	if (devfreq->profile->get_cur_freq)
337 		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
338 	else
339 		cur_freq = devfreq->previous_freq;
340 
341 	freqs.old = cur_freq;
342 	freqs.new = freq;
343 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
344 
345 	err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
346 	if (err) {
347 		freqs.new = cur_freq;
348 		devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
349 		return err;
350 	}
351 
352 	freqs.new = freq;
353 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
354 
355 	if (devfreq_update_status(devfreq, freq))
356 		dev_err(&devfreq->dev,
357 			"Couldn't update frequency transition information.\n");
358 
359 	devfreq->previous_freq = freq;
360 	return err;
361 }
362 EXPORT_SYMBOL(update_devfreq);
363 
364 /**
365  * devfreq_monitor() - Periodically poll devfreq objects.
366  * @work:	the work struct used to run devfreq_monitor periodically.
367  *
368  */
369 static void devfreq_monitor(struct work_struct *work)
370 {
371 	int err;
372 	struct devfreq *devfreq = container_of(work,
373 					struct devfreq, work.work);
374 
375 	mutex_lock(&devfreq->lock);
376 	err = update_devfreq(devfreq);
377 	if (err)
378 		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
379 
380 	queue_delayed_work(devfreq_wq, &devfreq->work,
381 				msecs_to_jiffies(devfreq->profile->polling_ms));
382 	mutex_unlock(&devfreq->lock);
383 }
384 
385 /**
386  * devfreq_monitor_start() - Start load monitoring of devfreq instance
387  * @devfreq:	the devfreq instance.
388  *
389  * Helper function for starting devfreq device load monitoing. By
390  * default delayed work based monitoring is supported. Function
391  * to be called from governor in response to DEVFREQ_GOV_START
392  * event when device is added to devfreq framework.
393  */
394 void devfreq_monitor_start(struct devfreq *devfreq)
395 {
396 	INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
397 	if (devfreq->profile->polling_ms)
398 		queue_delayed_work(devfreq_wq, &devfreq->work,
399 			msecs_to_jiffies(devfreq->profile->polling_ms));
400 }
401 EXPORT_SYMBOL(devfreq_monitor_start);
402 
403 /**
404  * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
405  * @devfreq:	the devfreq instance.
406  *
407  * Helper function to stop devfreq device load monitoing. Function
408  * to be called from governor in response to DEVFREQ_GOV_STOP
409  * event when device is removed from devfreq framework.
410  */
411 void devfreq_monitor_stop(struct devfreq *devfreq)
412 {
413 	cancel_delayed_work_sync(&devfreq->work);
414 }
415 EXPORT_SYMBOL(devfreq_monitor_stop);
416 
417 /**
418  * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
419  * @devfreq:	the devfreq instance.
420  *
421  * Helper function to suspend devfreq device load monitoing. Function
422  * to be called from governor in response to DEVFREQ_GOV_SUSPEND
423  * event or when polling interval is set to zero.
424  *
425  * Note: Though this function is same as devfreq_monitor_stop(),
426  * intentionally kept separate to provide hooks for collecting
427  * transition statistics.
428  */
429 void devfreq_monitor_suspend(struct devfreq *devfreq)
430 {
431 	mutex_lock(&devfreq->lock);
432 	if (devfreq->stop_polling) {
433 		mutex_unlock(&devfreq->lock);
434 		return;
435 	}
436 
437 	devfreq_update_status(devfreq, devfreq->previous_freq);
438 	devfreq->stop_polling = true;
439 	mutex_unlock(&devfreq->lock);
440 	cancel_delayed_work_sync(&devfreq->work);
441 }
442 EXPORT_SYMBOL(devfreq_monitor_suspend);
443 
444 /**
445  * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
446  * @devfreq:    the devfreq instance.
447  *
448  * Helper function to resume devfreq device load monitoing. Function
449  * to be called from governor in response to DEVFREQ_GOV_RESUME
450  * event or when polling interval is set to non-zero.
451  */
452 void devfreq_monitor_resume(struct devfreq *devfreq)
453 {
454 	unsigned long freq;
455 
456 	mutex_lock(&devfreq->lock);
457 	if (!devfreq->stop_polling)
458 		goto out;
459 
460 	if (!delayed_work_pending(&devfreq->work) &&
461 			devfreq->profile->polling_ms)
462 		queue_delayed_work(devfreq_wq, &devfreq->work,
463 			msecs_to_jiffies(devfreq->profile->polling_ms));
464 
465 	devfreq->last_stat_updated = jiffies;
466 	devfreq->stop_polling = false;
467 
468 	if (devfreq->profile->get_cur_freq &&
469 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
470 		devfreq->previous_freq = freq;
471 
472 out:
473 	mutex_unlock(&devfreq->lock);
474 }
475 EXPORT_SYMBOL(devfreq_monitor_resume);
476 
477 /**
478  * devfreq_interval_update() - Update device devfreq monitoring interval
479  * @devfreq:    the devfreq instance.
480  * @delay:      new polling interval to be set.
481  *
482  * Helper function to set new load monitoring polling interval. Function
483  * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
484  */
485 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
486 {
487 	unsigned int cur_delay = devfreq->profile->polling_ms;
488 	unsigned int new_delay = *delay;
489 
490 	mutex_lock(&devfreq->lock);
491 	devfreq->profile->polling_ms = new_delay;
492 
493 	if (devfreq->stop_polling)
494 		goto out;
495 
496 	/* if new delay is zero, stop polling */
497 	if (!new_delay) {
498 		mutex_unlock(&devfreq->lock);
499 		cancel_delayed_work_sync(&devfreq->work);
500 		return;
501 	}
502 
503 	/* if current delay is zero, start polling with new delay */
504 	if (!cur_delay) {
505 		queue_delayed_work(devfreq_wq, &devfreq->work,
506 			msecs_to_jiffies(devfreq->profile->polling_ms));
507 		goto out;
508 	}
509 
510 	/* if current delay is greater than new delay, restart polling */
511 	if (cur_delay > new_delay) {
512 		mutex_unlock(&devfreq->lock);
513 		cancel_delayed_work_sync(&devfreq->work);
514 		mutex_lock(&devfreq->lock);
515 		if (!devfreq->stop_polling)
516 			queue_delayed_work(devfreq_wq, &devfreq->work,
517 			      msecs_to_jiffies(devfreq->profile->polling_ms));
518 	}
519 out:
520 	mutex_unlock(&devfreq->lock);
521 }
522 EXPORT_SYMBOL(devfreq_interval_update);
523 
524 /**
525  * devfreq_notifier_call() - Notify that the device frequency requirements
526  *			   has been changed out of devfreq framework.
527  * @nb:		the notifier_block (supposed to be devfreq->nb)
528  * @type:	not used
529  * @devp:	not used
530  *
531  * Called by a notifier that uses devfreq->nb.
532  */
533 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
534 				 void *devp)
535 {
536 	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
537 	int ret;
538 
539 	mutex_lock(&devfreq->lock);
540 
541 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
542 	if (!devfreq->scaling_min_freq) {
543 		mutex_unlock(&devfreq->lock);
544 		return -EINVAL;
545 	}
546 
547 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
548 	if (!devfreq->scaling_max_freq) {
549 		mutex_unlock(&devfreq->lock);
550 		return -EINVAL;
551 	}
552 
553 	ret = update_devfreq(devfreq);
554 	mutex_unlock(&devfreq->lock);
555 
556 	return ret;
557 }
558 
559 /**
560  * devfreq_dev_release() - Callback for struct device to release the device.
561  * @dev:	the devfreq device
562  *
563  * Remove devfreq from the list and release its resources.
564  */
565 static void devfreq_dev_release(struct device *dev)
566 {
567 	struct devfreq *devfreq = to_devfreq(dev);
568 
569 	mutex_lock(&devfreq_list_lock);
570 	if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
571 		mutex_unlock(&devfreq_list_lock);
572 		dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
573 		return;
574 	}
575 	list_del(&devfreq->node);
576 	mutex_unlock(&devfreq_list_lock);
577 
578 	if (devfreq->profile->exit)
579 		devfreq->profile->exit(devfreq->dev.parent);
580 
581 	mutex_destroy(&devfreq->lock);
582 	kfree(devfreq);
583 }
584 
585 /**
586  * devfreq_add_device() - Add devfreq feature to the device
587  * @dev:	the device to add devfreq feature.
588  * @profile:	device-specific profile to run devfreq.
589  * @governor_name:	name of the policy to choose frequency.
590  * @data:	private data for the governor. The devfreq framework does not
591  *		touch this value.
592  */
593 struct devfreq *devfreq_add_device(struct device *dev,
594 				   struct devfreq_dev_profile *profile,
595 				   const char *governor_name,
596 				   void *data)
597 {
598 	struct devfreq *devfreq;
599 	struct devfreq_governor *governor;
600 	static atomic_t devfreq_no = ATOMIC_INIT(-1);
601 	int err = 0;
602 
603 	if (!dev || !profile || !governor_name) {
604 		dev_err(dev, "%s: Invalid parameters.\n", __func__);
605 		return ERR_PTR(-EINVAL);
606 	}
607 
608 	mutex_lock(&devfreq_list_lock);
609 	devfreq = find_device_devfreq(dev);
610 	mutex_unlock(&devfreq_list_lock);
611 	if (!IS_ERR(devfreq)) {
612 		dev_err(dev, "%s: Unable to create devfreq for the device.\n",
613 			__func__);
614 		err = -EINVAL;
615 		goto err_out;
616 	}
617 
618 	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
619 	if (!devfreq) {
620 		err = -ENOMEM;
621 		goto err_out;
622 	}
623 
624 	mutex_init(&devfreq->lock);
625 	mutex_lock(&devfreq->lock);
626 	devfreq->dev.parent = dev;
627 	devfreq->dev.class = devfreq_class;
628 	devfreq->dev.release = devfreq_dev_release;
629 	devfreq->profile = profile;
630 	strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
631 	devfreq->previous_freq = profile->initial_freq;
632 	devfreq->last_status.current_frequency = profile->initial_freq;
633 	devfreq->data = data;
634 	devfreq->nb.notifier_call = devfreq_notifier_call;
635 
636 	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
637 		mutex_unlock(&devfreq->lock);
638 		err = set_freq_table(devfreq);
639 		if (err < 0)
640 			goto err_out;
641 		mutex_lock(&devfreq->lock);
642 	}
643 
644 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
645 	if (!devfreq->scaling_min_freq) {
646 		mutex_unlock(&devfreq->lock);
647 		err = -EINVAL;
648 		goto err_dev;
649 	}
650 	devfreq->min_freq = devfreq->scaling_min_freq;
651 
652 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
653 	if (!devfreq->scaling_max_freq) {
654 		mutex_unlock(&devfreq->lock);
655 		err = -EINVAL;
656 		goto err_dev;
657 	}
658 	devfreq->max_freq = devfreq->scaling_max_freq;
659 
660 	dev_set_name(&devfreq->dev, "devfreq%d",
661 				atomic_inc_return(&devfreq_no));
662 	err = device_register(&devfreq->dev);
663 	if (err) {
664 		mutex_unlock(&devfreq->lock);
665 		put_device(&devfreq->dev);
666 		goto err_out;
667 	}
668 
669 	devfreq->trans_table =
670 		devm_kzalloc(&devfreq->dev,
671 			     array3_size(sizeof(unsigned int),
672 					 devfreq->profile->max_state,
673 					 devfreq->profile->max_state),
674 			     GFP_KERNEL);
675 	devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
676 						devfreq->profile->max_state,
677 						sizeof(unsigned long),
678 						GFP_KERNEL);
679 	devfreq->last_stat_updated = jiffies;
680 
681 	srcu_init_notifier_head(&devfreq->transition_notifier_list);
682 
683 	mutex_unlock(&devfreq->lock);
684 
685 	mutex_lock(&devfreq_list_lock);
686 
687 	governor = try_then_request_governor(devfreq->governor_name);
688 	if (IS_ERR(governor)) {
689 		dev_err(dev, "%s: Unable to find governor for the device\n",
690 			__func__);
691 		err = PTR_ERR(governor);
692 		goto err_init;
693 	}
694 
695 	devfreq->governor = governor;
696 	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
697 						NULL);
698 	if (err) {
699 		dev_err(dev, "%s: Unable to start governor for the device\n",
700 			__func__);
701 		goto err_init;
702 	}
703 
704 	list_add(&devfreq->node, &devfreq_list);
705 
706 	mutex_unlock(&devfreq_list_lock);
707 
708 	return devfreq;
709 
710 err_init:
711 	mutex_unlock(&devfreq_list_lock);
712 
713 	devfreq_remove_device(devfreq);
714 	devfreq = NULL;
715 err_dev:
716 	kfree(devfreq);
717 err_out:
718 	return ERR_PTR(err);
719 }
720 EXPORT_SYMBOL(devfreq_add_device);
721 
722 /**
723  * devfreq_remove_device() - Remove devfreq feature from a device.
724  * @devfreq:	the devfreq instance to be removed
725  *
726  * The opposite of devfreq_add_device().
727  */
728 int devfreq_remove_device(struct devfreq *devfreq)
729 {
730 	if (!devfreq)
731 		return -EINVAL;
732 
733 	if (devfreq->governor)
734 		devfreq->governor->event_handler(devfreq,
735 						 DEVFREQ_GOV_STOP, NULL);
736 	device_unregister(&devfreq->dev);
737 
738 	return 0;
739 }
740 EXPORT_SYMBOL(devfreq_remove_device);
741 
742 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
743 {
744 	struct devfreq **r = res;
745 
746 	if (WARN_ON(!r || !*r))
747 		return 0;
748 
749 	return *r == data;
750 }
751 
752 static void devm_devfreq_dev_release(struct device *dev, void *res)
753 {
754 	devfreq_remove_device(*(struct devfreq **)res);
755 }
756 
757 /**
758  * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
759  * @dev:	the device to add devfreq feature.
760  * @profile:	device-specific profile to run devfreq.
761  * @governor_name:	name of the policy to choose frequency.
762  * @data:	private data for the governor. The devfreq framework does not
763  *		touch this value.
764  *
765  * This function manages automatically the memory of devfreq device using device
766  * resource management and simplify the free operation for memory of devfreq
767  * device.
768  */
769 struct devfreq *devm_devfreq_add_device(struct device *dev,
770 					struct devfreq_dev_profile *profile,
771 					const char *governor_name,
772 					void *data)
773 {
774 	struct devfreq **ptr, *devfreq;
775 
776 	ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
777 	if (!ptr)
778 		return ERR_PTR(-ENOMEM);
779 
780 	devfreq = devfreq_add_device(dev, profile, governor_name, data);
781 	if (IS_ERR(devfreq)) {
782 		devres_free(ptr);
783 		return devfreq;
784 	}
785 
786 	*ptr = devfreq;
787 	devres_add(dev, ptr);
788 
789 	return devfreq;
790 }
791 EXPORT_SYMBOL(devm_devfreq_add_device);
792 
793 #ifdef CONFIG_OF
794 /*
795  * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
796  * @dev - instance to the given device
797  * @index - index into list of devfreq
798  *
799  * return the instance of devfreq device
800  */
801 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
802 {
803 	struct device_node *node;
804 	struct devfreq *devfreq;
805 
806 	if (!dev)
807 		return ERR_PTR(-EINVAL);
808 
809 	if (!dev->of_node)
810 		return ERR_PTR(-EINVAL);
811 
812 	node = of_parse_phandle(dev->of_node, "devfreq", index);
813 	if (!node)
814 		return ERR_PTR(-ENODEV);
815 
816 	mutex_lock(&devfreq_list_lock);
817 	list_for_each_entry(devfreq, &devfreq_list, node) {
818 		if (devfreq->dev.parent
819 			&& devfreq->dev.parent->of_node == node) {
820 			mutex_unlock(&devfreq_list_lock);
821 			of_node_put(node);
822 			return devfreq;
823 		}
824 	}
825 	mutex_unlock(&devfreq_list_lock);
826 	of_node_put(node);
827 
828 	return ERR_PTR(-EPROBE_DEFER);
829 }
830 #else
831 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
832 {
833 	return ERR_PTR(-ENODEV);
834 }
835 #endif /* CONFIG_OF */
836 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
837 
838 /**
839  * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
840  * @dev:	the device to add devfreq feature.
841  * @devfreq:	the devfreq instance to be removed
842  */
843 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
844 {
845 	WARN_ON(devres_release(dev, devm_devfreq_dev_release,
846 			       devm_devfreq_dev_match, devfreq));
847 }
848 EXPORT_SYMBOL(devm_devfreq_remove_device);
849 
850 /**
851  * devfreq_suspend_device() - Suspend devfreq of a device.
852  * @devfreq: the devfreq instance to be suspended
853  *
854  * This function is intended to be called by the pm callbacks
855  * (e.g., runtime_suspend, suspend) of the device driver that
856  * holds the devfreq.
857  */
858 int devfreq_suspend_device(struct devfreq *devfreq)
859 {
860 	if (!devfreq)
861 		return -EINVAL;
862 
863 	if (!devfreq->governor)
864 		return 0;
865 
866 	return devfreq->governor->event_handler(devfreq,
867 				DEVFREQ_GOV_SUSPEND, NULL);
868 }
869 EXPORT_SYMBOL(devfreq_suspend_device);
870 
871 /**
872  * devfreq_resume_device() - Resume devfreq of a device.
873  * @devfreq: the devfreq instance to be resumed
874  *
875  * This function is intended to be called by the pm callbacks
876  * (e.g., runtime_resume, resume) of the device driver that
877  * holds the devfreq.
878  */
879 int devfreq_resume_device(struct devfreq *devfreq)
880 {
881 	if (!devfreq)
882 		return -EINVAL;
883 
884 	if (!devfreq->governor)
885 		return 0;
886 
887 	return devfreq->governor->event_handler(devfreq,
888 				DEVFREQ_GOV_RESUME, NULL);
889 }
890 EXPORT_SYMBOL(devfreq_resume_device);
891 
892 /**
893  * devfreq_add_governor() - Add devfreq governor
894  * @governor:	the devfreq governor to be added
895  */
896 int devfreq_add_governor(struct devfreq_governor *governor)
897 {
898 	struct devfreq_governor *g;
899 	struct devfreq *devfreq;
900 	int err = 0;
901 
902 	if (!governor) {
903 		pr_err("%s: Invalid parameters.\n", __func__);
904 		return -EINVAL;
905 	}
906 
907 	mutex_lock(&devfreq_list_lock);
908 	g = find_devfreq_governor(governor->name);
909 	if (!IS_ERR(g)) {
910 		pr_err("%s: governor %s already registered\n", __func__,
911 		       g->name);
912 		err = -EINVAL;
913 		goto err_out;
914 	}
915 
916 	list_add(&governor->node, &devfreq_governor_list);
917 
918 	list_for_each_entry(devfreq, &devfreq_list, node) {
919 		int ret = 0;
920 		struct device *dev = devfreq->dev.parent;
921 
922 		if (!strncmp(devfreq->governor_name, governor->name,
923 			     DEVFREQ_NAME_LEN)) {
924 			/* The following should never occur */
925 			if (devfreq->governor) {
926 				dev_warn(dev,
927 					 "%s: Governor %s already present\n",
928 					 __func__, devfreq->governor->name);
929 				ret = devfreq->governor->event_handler(devfreq,
930 							DEVFREQ_GOV_STOP, NULL);
931 				if (ret) {
932 					dev_warn(dev,
933 						 "%s: Governor %s stop = %d\n",
934 						 __func__,
935 						 devfreq->governor->name, ret);
936 				}
937 				/* Fall through */
938 			}
939 			devfreq->governor = governor;
940 			ret = devfreq->governor->event_handler(devfreq,
941 						DEVFREQ_GOV_START, NULL);
942 			if (ret) {
943 				dev_warn(dev, "%s: Governor %s start=%d\n",
944 					 __func__, devfreq->governor->name,
945 					 ret);
946 			}
947 		}
948 	}
949 
950 err_out:
951 	mutex_unlock(&devfreq_list_lock);
952 
953 	return err;
954 }
955 EXPORT_SYMBOL(devfreq_add_governor);
956 
957 /**
958  * devfreq_remove_governor() - Remove devfreq feature from a device.
959  * @governor:	the devfreq governor to be removed
960  */
961 int devfreq_remove_governor(struct devfreq_governor *governor)
962 {
963 	struct devfreq_governor *g;
964 	struct devfreq *devfreq;
965 	int err = 0;
966 
967 	if (!governor) {
968 		pr_err("%s: Invalid parameters.\n", __func__);
969 		return -EINVAL;
970 	}
971 
972 	mutex_lock(&devfreq_list_lock);
973 	g = find_devfreq_governor(governor->name);
974 	if (IS_ERR(g)) {
975 		pr_err("%s: governor %s not registered\n", __func__,
976 		       governor->name);
977 		err = PTR_ERR(g);
978 		goto err_out;
979 	}
980 	list_for_each_entry(devfreq, &devfreq_list, node) {
981 		int ret;
982 		struct device *dev = devfreq->dev.parent;
983 
984 		if (!strncmp(devfreq->governor_name, governor->name,
985 			     DEVFREQ_NAME_LEN)) {
986 			/* we should have a devfreq governor! */
987 			if (!devfreq->governor) {
988 				dev_warn(dev, "%s: Governor %s NOT present\n",
989 					 __func__, governor->name);
990 				continue;
991 				/* Fall through */
992 			}
993 			ret = devfreq->governor->event_handler(devfreq,
994 						DEVFREQ_GOV_STOP, NULL);
995 			if (ret) {
996 				dev_warn(dev, "%s: Governor %s stop=%d\n",
997 					 __func__, devfreq->governor->name,
998 					 ret);
999 			}
1000 			devfreq->governor = NULL;
1001 		}
1002 	}
1003 
1004 	list_del(&governor->node);
1005 err_out:
1006 	mutex_unlock(&devfreq_list_lock);
1007 
1008 	return err;
1009 }
1010 EXPORT_SYMBOL(devfreq_remove_governor);
1011 
1012 static ssize_t governor_show(struct device *dev,
1013 			     struct device_attribute *attr, char *buf)
1014 {
1015 	if (!to_devfreq(dev)->governor)
1016 		return -EINVAL;
1017 
1018 	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1019 }
1020 
1021 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1022 			      const char *buf, size_t count)
1023 {
1024 	struct devfreq *df = to_devfreq(dev);
1025 	int ret;
1026 	char str_governor[DEVFREQ_NAME_LEN + 1];
1027 	struct devfreq_governor *governor;
1028 
1029 	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1030 	if (ret != 1)
1031 		return -EINVAL;
1032 
1033 	mutex_lock(&devfreq_list_lock);
1034 	governor = try_then_request_governor(str_governor);
1035 	if (IS_ERR(governor)) {
1036 		ret = PTR_ERR(governor);
1037 		goto out;
1038 	}
1039 	if (df->governor == governor) {
1040 		ret = 0;
1041 		goto out;
1042 	} else if ((df->governor && df->governor->immutable) ||
1043 					governor->immutable) {
1044 		ret = -EINVAL;
1045 		goto out;
1046 	}
1047 
1048 	if (df->governor) {
1049 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1050 		if (ret) {
1051 			dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1052 				 __func__, df->governor->name, ret);
1053 			goto out;
1054 		}
1055 	}
1056 	df->governor = governor;
1057 	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1058 	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1059 	if (ret)
1060 		dev_warn(dev, "%s: Governor %s not started(%d)\n",
1061 			 __func__, df->governor->name, ret);
1062 out:
1063 	mutex_unlock(&devfreq_list_lock);
1064 
1065 	if (!ret)
1066 		ret = count;
1067 	return ret;
1068 }
1069 static DEVICE_ATTR_RW(governor);
1070 
1071 static ssize_t available_governors_show(struct device *d,
1072 					struct device_attribute *attr,
1073 					char *buf)
1074 {
1075 	struct devfreq *df = to_devfreq(d);
1076 	ssize_t count = 0;
1077 
1078 	mutex_lock(&devfreq_list_lock);
1079 
1080 	/*
1081 	 * The devfreq with immutable governor (e.g., passive) shows
1082 	 * only own governor.
1083 	 */
1084 	if (df->governor->immutable) {
1085 		count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1086 				   "%s ", df->governor_name);
1087 	/*
1088 	 * The devfreq device shows the registered governor except for
1089 	 * immutable governors such as passive governor .
1090 	 */
1091 	} else {
1092 		struct devfreq_governor *governor;
1093 
1094 		list_for_each_entry(governor, &devfreq_governor_list, node) {
1095 			if (governor->immutable)
1096 				continue;
1097 			count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1098 					   "%s ", governor->name);
1099 		}
1100 	}
1101 
1102 	mutex_unlock(&devfreq_list_lock);
1103 
1104 	/* Truncate the trailing space */
1105 	if (count)
1106 		count--;
1107 
1108 	count += sprintf(&buf[count], "\n");
1109 
1110 	return count;
1111 }
1112 static DEVICE_ATTR_RO(available_governors);
1113 
1114 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1115 			     char *buf)
1116 {
1117 	unsigned long freq;
1118 	struct devfreq *devfreq = to_devfreq(dev);
1119 
1120 	if (devfreq->profile->get_cur_freq &&
1121 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1122 		return sprintf(buf, "%lu\n", freq);
1123 
1124 	return sprintf(buf, "%lu\n", devfreq->previous_freq);
1125 }
1126 static DEVICE_ATTR_RO(cur_freq);
1127 
1128 static ssize_t target_freq_show(struct device *dev,
1129 				struct device_attribute *attr, char *buf)
1130 {
1131 	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1132 }
1133 static DEVICE_ATTR_RO(target_freq);
1134 
1135 static ssize_t polling_interval_show(struct device *dev,
1136 				     struct device_attribute *attr, char *buf)
1137 {
1138 	return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1139 }
1140 
1141 static ssize_t polling_interval_store(struct device *dev,
1142 				      struct device_attribute *attr,
1143 				      const char *buf, size_t count)
1144 {
1145 	struct devfreq *df = to_devfreq(dev);
1146 	unsigned int value;
1147 	int ret;
1148 
1149 	if (!df->governor)
1150 		return -EINVAL;
1151 
1152 	ret = sscanf(buf, "%u", &value);
1153 	if (ret != 1)
1154 		return -EINVAL;
1155 
1156 	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1157 	ret = count;
1158 
1159 	return ret;
1160 }
1161 static DEVICE_ATTR_RW(polling_interval);
1162 
1163 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1164 			      const char *buf, size_t count)
1165 {
1166 	struct devfreq *df = to_devfreq(dev);
1167 	unsigned long value;
1168 	int ret;
1169 
1170 	ret = sscanf(buf, "%lu", &value);
1171 	if (ret != 1)
1172 		return -EINVAL;
1173 
1174 	mutex_lock(&df->lock);
1175 
1176 	if (value) {
1177 		if (value > df->max_freq) {
1178 			ret = -EINVAL;
1179 			goto unlock;
1180 		}
1181 	} else {
1182 		unsigned long *freq_table = df->profile->freq_table;
1183 
1184 		/* Get minimum frequency according to sorting order */
1185 		if (freq_table[0] < freq_table[df->profile->max_state - 1])
1186 			value = freq_table[0];
1187 		else
1188 			value = freq_table[df->profile->max_state - 1];
1189 	}
1190 
1191 	df->min_freq = value;
1192 	update_devfreq(df);
1193 	ret = count;
1194 unlock:
1195 	mutex_unlock(&df->lock);
1196 	return ret;
1197 }
1198 
1199 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1200 			     char *buf)
1201 {
1202 	struct devfreq *df = to_devfreq(dev);
1203 
1204 	return sprintf(buf, "%lu\n", max(df->scaling_min_freq, df->min_freq));
1205 }
1206 
1207 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1208 			      const char *buf, size_t count)
1209 {
1210 	struct devfreq *df = to_devfreq(dev);
1211 	unsigned long value;
1212 	int ret;
1213 
1214 	ret = sscanf(buf, "%lu", &value);
1215 	if (ret != 1)
1216 		return -EINVAL;
1217 
1218 	mutex_lock(&df->lock);
1219 
1220 	if (value) {
1221 		if (value < df->min_freq) {
1222 			ret = -EINVAL;
1223 			goto unlock;
1224 		}
1225 	} else {
1226 		unsigned long *freq_table = df->profile->freq_table;
1227 
1228 		/* Get maximum frequency according to sorting order */
1229 		if (freq_table[0] < freq_table[df->profile->max_state - 1])
1230 			value = freq_table[df->profile->max_state - 1];
1231 		else
1232 			value = freq_table[0];
1233 	}
1234 
1235 	df->max_freq = value;
1236 	update_devfreq(df);
1237 	ret = count;
1238 unlock:
1239 	mutex_unlock(&df->lock);
1240 	return ret;
1241 }
1242 static DEVICE_ATTR_RW(min_freq);
1243 
1244 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1245 			     char *buf)
1246 {
1247 	struct devfreq *df = to_devfreq(dev);
1248 
1249 	return sprintf(buf, "%lu\n", min(df->scaling_max_freq, df->max_freq));
1250 }
1251 static DEVICE_ATTR_RW(max_freq);
1252 
1253 static ssize_t available_frequencies_show(struct device *d,
1254 					  struct device_attribute *attr,
1255 					  char *buf)
1256 {
1257 	struct devfreq *df = to_devfreq(d);
1258 	ssize_t count = 0;
1259 	int i;
1260 
1261 	mutex_lock(&df->lock);
1262 
1263 	for (i = 0; i < df->profile->max_state; i++)
1264 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1265 				"%lu ", df->profile->freq_table[i]);
1266 
1267 	mutex_unlock(&df->lock);
1268 	/* Truncate the trailing space */
1269 	if (count)
1270 		count--;
1271 
1272 	count += sprintf(&buf[count], "\n");
1273 
1274 	return count;
1275 }
1276 static DEVICE_ATTR_RO(available_frequencies);
1277 
1278 static ssize_t trans_stat_show(struct device *dev,
1279 			       struct device_attribute *attr, char *buf)
1280 {
1281 	struct devfreq *devfreq = to_devfreq(dev);
1282 	ssize_t len;
1283 	int i, j;
1284 	unsigned int max_state = devfreq->profile->max_state;
1285 
1286 	if (!devfreq->stop_polling &&
1287 			devfreq_update_status(devfreq, devfreq->previous_freq))
1288 		return 0;
1289 	if (max_state == 0)
1290 		return sprintf(buf, "Not Supported.\n");
1291 
1292 	len = sprintf(buf, "     From  :   To\n");
1293 	len += sprintf(buf + len, "           :");
1294 	for (i = 0; i < max_state; i++)
1295 		len += sprintf(buf + len, "%10lu",
1296 				devfreq->profile->freq_table[i]);
1297 
1298 	len += sprintf(buf + len, "   time(ms)\n");
1299 
1300 	for (i = 0; i < max_state; i++) {
1301 		if (devfreq->profile->freq_table[i]
1302 					== devfreq->previous_freq) {
1303 			len += sprintf(buf + len, "*");
1304 		} else {
1305 			len += sprintf(buf + len, " ");
1306 		}
1307 		len += sprintf(buf + len, "%10lu:",
1308 				devfreq->profile->freq_table[i]);
1309 		for (j = 0; j < max_state; j++)
1310 			len += sprintf(buf + len, "%10u",
1311 				devfreq->trans_table[(i * max_state) + j]);
1312 		len += sprintf(buf + len, "%10u\n",
1313 			jiffies_to_msecs(devfreq->time_in_state[i]));
1314 	}
1315 
1316 	len += sprintf(buf + len, "Total transition : %u\n",
1317 					devfreq->total_trans);
1318 	return len;
1319 }
1320 static DEVICE_ATTR_RO(trans_stat);
1321 
1322 static struct attribute *devfreq_attrs[] = {
1323 	&dev_attr_governor.attr,
1324 	&dev_attr_available_governors.attr,
1325 	&dev_attr_cur_freq.attr,
1326 	&dev_attr_available_frequencies.attr,
1327 	&dev_attr_target_freq.attr,
1328 	&dev_attr_polling_interval.attr,
1329 	&dev_attr_min_freq.attr,
1330 	&dev_attr_max_freq.attr,
1331 	&dev_attr_trans_stat.attr,
1332 	NULL,
1333 };
1334 ATTRIBUTE_GROUPS(devfreq);
1335 
1336 static int __init devfreq_init(void)
1337 {
1338 	devfreq_class = class_create(THIS_MODULE, "devfreq");
1339 	if (IS_ERR(devfreq_class)) {
1340 		pr_err("%s: couldn't create class\n", __FILE__);
1341 		return PTR_ERR(devfreq_class);
1342 	}
1343 
1344 	devfreq_wq = create_freezable_workqueue("devfreq_wq");
1345 	if (!devfreq_wq) {
1346 		class_destroy(devfreq_class);
1347 		pr_err("%s: couldn't create workqueue\n", __FILE__);
1348 		return -ENOMEM;
1349 	}
1350 	devfreq_class->dev_groups = devfreq_groups;
1351 
1352 	return 0;
1353 }
1354 subsys_initcall(devfreq_init);
1355 
1356 /*
1357  * The following are helper functions for devfreq user device drivers with
1358  * OPP framework.
1359  */
1360 
1361 /**
1362  * devfreq_recommended_opp() - Helper function to get proper OPP for the
1363  *			     freq value given to target callback.
1364  * @dev:	The devfreq user device. (parent of devfreq)
1365  * @freq:	The frequency given to target function
1366  * @flags:	Flags handed from devfreq framework.
1367  *
1368  * The callers are required to call dev_pm_opp_put() for the returned OPP after
1369  * use.
1370  */
1371 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1372 					   unsigned long *freq,
1373 					   u32 flags)
1374 {
1375 	struct dev_pm_opp *opp;
1376 
1377 	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1378 		/* The freq is an upper bound. opp should be lower */
1379 		opp = dev_pm_opp_find_freq_floor(dev, freq);
1380 
1381 		/* If not available, use the closest opp */
1382 		if (opp == ERR_PTR(-ERANGE))
1383 			opp = dev_pm_opp_find_freq_ceil(dev, freq);
1384 	} else {
1385 		/* The freq is an lower bound. opp should be higher */
1386 		opp = dev_pm_opp_find_freq_ceil(dev, freq);
1387 
1388 		/* If not available, use the closest opp */
1389 		if (opp == ERR_PTR(-ERANGE))
1390 			opp = dev_pm_opp_find_freq_floor(dev, freq);
1391 	}
1392 
1393 	return opp;
1394 }
1395 EXPORT_SYMBOL(devfreq_recommended_opp);
1396 
1397 /**
1398  * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1399  *				   for any changes in the OPP availability
1400  *				   changes
1401  * @dev:	The devfreq user device. (parent of devfreq)
1402  * @devfreq:	The devfreq object.
1403  */
1404 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1405 {
1406 	return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1407 }
1408 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1409 
1410 /**
1411  * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1412  *				     notified for any changes in the OPP
1413  *				     availability changes anymore.
1414  * @dev:	The devfreq user device. (parent of devfreq)
1415  * @devfreq:	The devfreq object.
1416  *
1417  * At exit() callback of devfreq_dev_profile, this must be included if
1418  * devfreq_recommended_opp is used.
1419  */
1420 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1421 {
1422 	return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1423 }
1424 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1425 
1426 static void devm_devfreq_opp_release(struct device *dev, void *res)
1427 {
1428 	devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1429 }
1430 
1431 /**
1432  * devm_ devfreq_register_opp_notifier()
1433  *		- Resource-managed devfreq_register_opp_notifier()
1434  * @dev:	The devfreq user device. (parent of devfreq)
1435  * @devfreq:	The devfreq object.
1436  */
1437 int devm_devfreq_register_opp_notifier(struct device *dev,
1438 				       struct devfreq *devfreq)
1439 {
1440 	struct devfreq **ptr;
1441 	int ret;
1442 
1443 	ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1444 	if (!ptr)
1445 		return -ENOMEM;
1446 
1447 	ret = devfreq_register_opp_notifier(dev, devfreq);
1448 	if (ret) {
1449 		devres_free(ptr);
1450 		return ret;
1451 	}
1452 
1453 	*ptr = devfreq;
1454 	devres_add(dev, ptr);
1455 
1456 	return 0;
1457 }
1458 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1459 
1460 /**
1461  * devm_devfreq_unregister_opp_notifier()
1462  *		- Resource-managed devfreq_unregister_opp_notifier()
1463  * @dev:	The devfreq user device. (parent of devfreq)
1464  * @devfreq:	The devfreq object.
1465  */
1466 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1467 					 struct devfreq *devfreq)
1468 {
1469 	WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1470 			       devm_devfreq_dev_match, devfreq));
1471 }
1472 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1473 
1474 /**
1475  * devfreq_register_notifier() - Register a driver with devfreq
1476  * @devfreq:	The devfreq object.
1477  * @nb:		The notifier block to register.
1478  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1479  */
1480 int devfreq_register_notifier(struct devfreq *devfreq,
1481 				struct notifier_block *nb,
1482 				unsigned int list)
1483 {
1484 	int ret = 0;
1485 
1486 	if (!devfreq)
1487 		return -EINVAL;
1488 
1489 	switch (list) {
1490 	case DEVFREQ_TRANSITION_NOTIFIER:
1491 		ret = srcu_notifier_chain_register(
1492 				&devfreq->transition_notifier_list, nb);
1493 		break;
1494 	default:
1495 		ret = -EINVAL;
1496 	}
1497 
1498 	return ret;
1499 }
1500 EXPORT_SYMBOL(devfreq_register_notifier);
1501 
1502 /*
1503  * devfreq_unregister_notifier() - Unregister a driver with devfreq
1504  * @devfreq:	The devfreq object.
1505  * @nb:		The notifier block to be unregistered.
1506  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1507  */
1508 int devfreq_unregister_notifier(struct devfreq *devfreq,
1509 				struct notifier_block *nb,
1510 				unsigned int list)
1511 {
1512 	int ret = 0;
1513 
1514 	if (!devfreq)
1515 		return -EINVAL;
1516 
1517 	switch (list) {
1518 	case DEVFREQ_TRANSITION_NOTIFIER:
1519 		ret = srcu_notifier_chain_unregister(
1520 				&devfreq->transition_notifier_list, nb);
1521 		break;
1522 	default:
1523 		ret = -EINVAL;
1524 	}
1525 
1526 	return ret;
1527 }
1528 EXPORT_SYMBOL(devfreq_unregister_notifier);
1529 
1530 struct devfreq_notifier_devres {
1531 	struct devfreq *devfreq;
1532 	struct notifier_block *nb;
1533 	unsigned int list;
1534 };
1535 
1536 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1537 {
1538 	struct devfreq_notifier_devres *this = res;
1539 
1540 	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1541 }
1542 
1543 /**
1544  * devm_devfreq_register_notifier()
1545 	- Resource-managed devfreq_register_notifier()
1546  * @dev:	The devfreq user device. (parent of devfreq)
1547  * @devfreq:	The devfreq object.
1548  * @nb:		The notifier block to be unregistered.
1549  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1550  */
1551 int devm_devfreq_register_notifier(struct device *dev,
1552 				struct devfreq *devfreq,
1553 				struct notifier_block *nb,
1554 				unsigned int list)
1555 {
1556 	struct devfreq_notifier_devres *ptr;
1557 	int ret;
1558 
1559 	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1560 				GFP_KERNEL);
1561 	if (!ptr)
1562 		return -ENOMEM;
1563 
1564 	ret = devfreq_register_notifier(devfreq, nb, list);
1565 	if (ret) {
1566 		devres_free(ptr);
1567 		return ret;
1568 	}
1569 
1570 	ptr->devfreq = devfreq;
1571 	ptr->nb = nb;
1572 	ptr->list = list;
1573 	devres_add(dev, ptr);
1574 
1575 	return 0;
1576 }
1577 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1578 
1579 /**
1580  * devm_devfreq_unregister_notifier()
1581 	- Resource-managed devfreq_unregister_notifier()
1582  * @dev:	The devfreq user device. (parent of devfreq)
1583  * @devfreq:	The devfreq object.
1584  * @nb:		The notifier block to be unregistered.
1585  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1586  */
1587 void devm_devfreq_unregister_notifier(struct device *dev,
1588 				struct devfreq *devfreq,
1589 				struct notifier_block *nb,
1590 				unsigned int list)
1591 {
1592 	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1593 			       devm_devfreq_dev_match, devfreq));
1594 }
1595 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
1596