xref: /openbmc/linux/drivers/devfreq/devfreq.c (revision ff148d8a)
1 /*
2  * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3  *	    for Non-CPU Devices.
4  *
5  * Copyright (C) 2011 Samsung Electronics
6  *	MyungJoo Ham <myungjoo.ham@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/kmod.h>
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/stat.h>
22 #include <linux/pm_opp.h>
23 #include <linux/devfreq.h>
24 #include <linux/workqueue.h>
25 #include <linux/platform_device.h>
26 #include <linux/list.h>
27 #include <linux/printk.h>
28 #include <linux/hrtimer.h>
29 #include <linux/of.h>
30 #include "governor.h"
31 
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/devfreq.h>
34 
35 static struct class *devfreq_class;
36 
37 /*
38  * devfreq core provides delayed work based load monitoring helper
39  * functions. Governors can use these or can implement their own
40  * monitoring mechanism.
41  */
42 static struct workqueue_struct *devfreq_wq;
43 
44 /* The list of all device-devfreq governors */
45 static LIST_HEAD(devfreq_governor_list);
46 /* The list of all device-devfreq */
47 static LIST_HEAD(devfreq_list);
48 static DEFINE_MUTEX(devfreq_list_lock);
49 
50 /**
51  * find_device_devfreq() - find devfreq struct using device pointer
52  * @dev:	device pointer used to lookup device devfreq.
53  *
54  * Search the list of device devfreqs and return the matched device's
55  * devfreq info. devfreq_list_lock should be held by the caller.
56  */
57 static struct devfreq *find_device_devfreq(struct device *dev)
58 {
59 	struct devfreq *tmp_devfreq;
60 
61 	if (IS_ERR_OR_NULL(dev)) {
62 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
63 		return ERR_PTR(-EINVAL);
64 	}
65 	WARN(!mutex_is_locked(&devfreq_list_lock),
66 	     "devfreq_list_lock must be locked.");
67 
68 	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
69 		if (tmp_devfreq->dev.parent == dev)
70 			return tmp_devfreq;
71 	}
72 
73 	return ERR_PTR(-ENODEV);
74 }
75 
76 static unsigned long find_available_min_freq(struct devfreq *devfreq)
77 {
78 	struct dev_pm_opp *opp;
79 	unsigned long min_freq = 0;
80 
81 	opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
82 	if (IS_ERR(opp))
83 		min_freq = 0;
84 	else
85 		dev_pm_opp_put(opp);
86 
87 	return min_freq;
88 }
89 
90 static unsigned long find_available_max_freq(struct devfreq *devfreq)
91 {
92 	struct dev_pm_opp *opp;
93 	unsigned long max_freq = ULONG_MAX;
94 
95 	opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
96 	if (IS_ERR(opp))
97 		max_freq = 0;
98 	else
99 		dev_pm_opp_put(opp);
100 
101 	return max_freq;
102 }
103 
104 /**
105  * devfreq_get_freq_level() - Lookup freq_table for the frequency
106  * @devfreq:	the devfreq instance
107  * @freq:	the target frequency
108  */
109 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
110 {
111 	int lev;
112 
113 	for (lev = 0; lev < devfreq->profile->max_state; lev++)
114 		if (freq == devfreq->profile->freq_table[lev])
115 			return lev;
116 
117 	return -EINVAL;
118 }
119 
120 static int set_freq_table(struct devfreq *devfreq)
121 {
122 	struct devfreq_dev_profile *profile = devfreq->profile;
123 	struct dev_pm_opp *opp;
124 	unsigned long freq;
125 	int i, count;
126 
127 	/* Initialize the freq_table from OPP table */
128 	count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
129 	if (count <= 0)
130 		return -EINVAL;
131 
132 	profile->max_state = count;
133 	profile->freq_table = devm_kcalloc(devfreq->dev.parent,
134 					profile->max_state,
135 					sizeof(*profile->freq_table),
136 					GFP_KERNEL);
137 	if (!profile->freq_table) {
138 		profile->max_state = 0;
139 		return -ENOMEM;
140 	}
141 
142 	for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
143 		opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
144 		if (IS_ERR(opp)) {
145 			devm_kfree(devfreq->dev.parent, profile->freq_table);
146 			profile->max_state = 0;
147 			return PTR_ERR(opp);
148 		}
149 		dev_pm_opp_put(opp);
150 		profile->freq_table[i] = freq;
151 	}
152 
153 	return 0;
154 }
155 
156 /**
157  * devfreq_update_status() - Update statistics of devfreq behavior
158  * @devfreq:	the devfreq instance
159  * @freq:	the update target frequency
160  */
161 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
162 {
163 	int lev, prev_lev, ret = 0;
164 	unsigned long cur_time;
165 
166 	cur_time = jiffies;
167 
168 	/* Immediately exit if previous_freq is not initialized yet. */
169 	if (!devfreq->previous_freq)
170 		goto out;
171 
172 	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
173 	if (prev_lev < 0) {
174 		ret = prev_lev;
175 		goto out;
176 	}
177 
178 	devfreq->time_in_state[prev_lev] +=
179 			 cur_time - devfreq->last_stat_updated;
180 
181 	lev = devfreq_get_freq_level(devfreq, freq);
182 	if (lev < 0) {
183 		ret = lev;
184 		goto out;
185 	}
186 
187 	if (lev != prev_lev) {
188 		devfreq->trans_table[(prev_lev *
189 				devfreq->profile->max_state) + lev]++;
190 		devfreq->total_trans++;
191 	}
192 
193 out:
194 	devfreq->last_stat_updated = cur_time;
195 	return ret;
196 }
197 EXPORT_SYMBOL(devfreq_update_status);
198 
199 /**
200  * find_devfreq_governor() - find devfreq governor from name
201  * @name:	name of the governor
202  *
203  * Search the list of devfreq governors and return the matched
204  * governor's pointer. devfreq_list_lock should be held by the caller.
205  */
206 static struct devfreq_governor *find_devfreq_governor(const char *name)
207 {
208 	struct devfreq_governor *tmp_governor;
209 
210 	if (IS_ERR_OR_NULL(name)) {
211 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
212 		return ERR_PTR(-EINVAL);
213 	}
214 	WARN(!mutex_is_locked(&devfreq_list_lock),
215 	     "devfreq_list_lock must be locked.");
216 
217 	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
218 		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
219 			return tmp_governor;
220 	}
221 
222 	return ERR_PTR(-ENODEV);
223 }
224 
225 /**
226  * try_then_request_governor() - Try to find the governor and request the
227  *                               module if is not found.
228  * @name:	name of the governor
229  *
230  * Search the list of devfreq governors and request the module and try again
231  * if is not found. This can happen when both drivers (the governor driver
232  * and the driver that call devfreq_add_device) are built as modules.
233  * devfreq_list_lock should be held by the caller. Returns the matched
234  * governor's pointer or an error pointer.
235  */
236 static struct devfreq_governor *try_then_request_governor(const char *name)
237 {
238 	struct devfreq_governor *governor;
239 	int err = 0;
240 
241 	if (IS_ERR_OR_NULL(name)) {
242 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
243 		return ERR_PTR(-EINVAL);
244 	}
245 	WARN(!mutex_is_locked(&devfreq_list_lock),
246 	     "devfreq_list_lock must be locked.");
247 
248 	governor = find_devfreq_governor(name);
249 	if (IS_ERR(governor)) {
250 		mutex_unlock(&devfreq_list_lock);
251 
252 		if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
253 			     DEVFREQ_NAME_LEN))
254 			err = request_module("governor_%s", "simpleondemand");
255 		else
256 			err = request_module("governor_%s", name);
257 		/* Restore previous state before return */
258 		mutex_lock(&devfreq_list_lock);
259 		if (err)
260 			return ERR_PTR(err);
261 
262 		governor = find_devfreq_governor(name);
263 	}
264 
265 	return governor;
266 }
267 
268 static int devfreq_notify_transition(struct devfreq *devfreq,
269 		struct devfreq_freqs *freqs, unsigned int state)
270 {
271 	if (!devfreq)
272 		return -EINVAL;
273 
274 	switch (state) {
275 	case DEVFREQ_PRECHANGE:
276 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
277 				DEVFREQ_PRECHANGE, freqs);
278 		break;
279 
280 	case DEVFREQ_POSTCHANGE:
281 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
282 				DEVFREQ_POSTCHANGE, freqs);
283 		break;
284 	default:
285 		return -EINVAL;
286 	}
287 
288 	return 0;
289 }
290 
291 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
292 			      u32 flags)
293 {
294 	struct devfreq_freqs freqs;
295 	unsigned long cur_freq;
296 	int err = 0;
297 
298 	if (devfreq->profile->get_cur_freq)
299 		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
300 	else
301 		cur_freq = devfreq->previous_freq;
302 
303 	freqs.old = cur_freq;
304 	freqs.new = new_freq;
305 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
306 
307 	err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
308 	if (err) {
309 		freqs.new = cur_freq;
310 		devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
311 		return err;
312 	}
313 
314 	freqs.new = new_freq;
315 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
316 
317 	if (devfreq_update_status(devfreq, new_freq))
318 		dev_err(&devfreq->dev,
319 			"Couldn't update frequency transition information.\n");
320 
321 	devfreq->previous_freq = new_freq;
322 
323 	if (devfreq->suspend_freq)
324 		devfreq->resume_freq = cur_freq;
325 
326 	return err;
327 }
328 
329 /* Load monitoring helper functions for governors use */
330 
331 /**
332  * update_devfreq() - Reevaluate the device and configure frequency.
333  * @devfreq:	the devfreq instance.
334  *
335  * Note: Lock devfreq->lock before calling update_devfreq
336  *	 This function is exported for governors.
337  */
338 int update_devfreq(struct devfreq *devfreq)
339 {
340 	unsigned long freq, min_freq, max_freq;
341 	int err = 0;
342 	u32 flags = 0;
343 
344 	if (!mutex_is_locked(&devfreq->lock)) {
345 		WARN(true, "devfreq->lock must be locked by the caller.\n");
346 		return -EINVAL;
347 	}
348 
349 	if (!devfreq->governor)
350 		return -EINVAL;
351 
352 	/* Reevaluate the proper frequency */
353 	err = devfreq->governor->get_target_freq(devfreq, &freq);
354 	if (err)
355 		return err;
356 
357 	/*
358 	 * Adjust the frequency with user freq, QoS and available freq.
359 	 *
360 	 * List from the highest priority
361 	 * max_freq
362 	 * min_freq
363 	 */
364 	max_freq = min(devfreq->scaling_max_freq, devfreq->max_freq);
365 	min_freq = max(devfreq->scaling_min_freq, devfreq->min_freq);
366 
367 	if (freq < min_freq) {
368 		freq = min_freq;
369 		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
370 	}
371 	if (freq > max_freq) {
372 		freq = max_freq;
373 		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
374 	}
375 
376 	return devfreq_set_target(devfreq, freq, flags);
377 
378 }
379 EXPORT_SYMBOL(update_devfreq);
380 
381 /**
382  * devfreq_monitor() - Periodically poll devfreq objects.
383  * @work:	the work struct used to run devfreq_monitor periodically.
384  *
385  */
386 static void devfreq_monitor(struct work_struct *work)
387 {
388 	int err;
389 	struct devfreq *devfreq = container_of(work,
390 					struct devfreq, work.work);
391 
392 	mutex_lock(&devfreq->lock);
393 	err = update_devfreq(devfreq);
394 	if (err)
395 		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
396 
397 	queue_delayed_work(devfreq_wq, &devfreq->work,
398 				msecs_to_jiffies(devfreq->profile->polling_ms));
399 	mutex_unlock(&devfreq->lock);
400 
401 	trace_devfreq_monitor(devfreq);
402 }
403 
404 /**
405  * devfreq_monitor_start() - Start load monitoring of devfreq instance
406  * @devfreq:	the devfreq instance.
407  *
408  * Helper function for starting devfreq device load monitoing. By
409  * default delayed work based monitoring is supported. Function
410  * to be called from governor in response to DEVFREQ_GOV_START
411  * event when device is added to devfreq framework.
412  */
413 void devfreq_monitor_start(struct devfreq *devfreq)
414 {
415 	INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
416 	if (devfreq->profile->polling_ms)
417 		queue_delayed_work(devfreq_wq, &devfreq->work,
418 			msecs_to_jiffies(devfreq->profile->polling_ms));
419 }
420 EXPORT_SYMBOL(devfreq_monitor_start);
421 
422 /**
423  * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
424  * @devfreq:	the devfreq instance.
425  *
426  * Helper function to stop devfreq device load monitoing. Function
427  * to be called from governor in response to DEVFREQ_GOV_STOP
428  * event when device is removed from devfreq framework.
429  */
430 void devfreq_monitor_stop(struct devfreq *devfreq)
431 {
432 	cancel_delayed_work_sync(&devfreq->work);
433 }
434 EXPORT_SYMBOL(devfreq_monitor_stop);
435 
436 /**
437  * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
438  * @devfreq:	the devfreq instance.
439  *
440  * Helper function to suspend devfreq device load monitoing. Function
441  * to be called from governor in response to DEVFREQ_GOV_SUSPEND
442  * event or when polling interval is set to zero.
443  *
444  * Note: Though this function is same as devfreq_monitor_stop(),
445  * intentionally kept separate to provide hooks for collecting
446  * transition statistics.
447  */
448 void devfreq_monitor_suspend(struct devfreq *devfreq)
449 {
450 	mutex_lock(&devfreq->lock);
451 	if (devfreq->stop_polling) {
452 		mutex_unlock(&devfreq->lock);
453 		return;
454 	}
455 
456 	devfreq_update_status(devfreq, devfreq->previous_freq);
457 	devfreq->stop_polling = true;
458 	mutex_unlock(&devfreq->lock);
459 	cancel_delayed_work_sync(&devfreq->work);
460 }
461 EXPORT_SYMBOL(devfreq_monitor_suspend);
462 
463 /**
464  * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
465  * @devfreq:    the devfreq instance.
466  *
467  * Helper function to resume devfreq device load monitoing. Function
468  * to be called from governor in response to DEVFREQ_GOV_RESUME
469  * event or when polling interval is set to non-zero.
470  */
471 void devfreq_monitor_resume(struct devfreq *devfreq)
472 {
473 	unsigned long freq;
474 
475 	mutex_lock(&devfreq->lock);
476 	if (!devfreq->stop_polling)
477 		goto out;
478 
479 	if (!delayed_work_pending(&devfreq->work) &&
480 			devfreq->profile->polling_ms)
481 		queue_delayed_work(devfreq_wq, &devfreq->work,
482 			msecs_to_jiffies(devfreq->profile->polling_ms));
483 
484 	devfreq->last_stat_updated = jiffies;
485 	devfreq->stop_polling = false;
486 
487 	if (devfreq->profile->get_cur_freq &&
488 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
489 		devfreq->previous_freq = freq;
490 
491 out:
492 	mutex_unlock(&devfreq->lock);
493 }
494 EXPORT_SYMBOL(devfreq_monitor_resume);
495 
496 /**
497  * devfreq_interval_update() - Update device devfreq monitoring interval
498  * @devfreq:    the devfreq instance.
499  * @delay:      new polling interval to be set.
500  *
501  * Helper function to set new load monitoring polling interval. Function
502  * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
503  */
504 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
505 {
506 	unsigned int cur_delay = devfreq->profile->polling_ms;
507 	unsigned int new_delay = *delay;
508 
509 	mutex_lock(&devfreq->lock);
510 	devfreq->profile->polling_ms = new_delay;
511 
512 	if (devfreq->stop_polling)
513 		goto out;
514 
515 	/* if new delay is zero, stop polling */
516 	if (!new_delay) {
517 		mutex_unlock(&devfreq->lock);
518 		cancel_delayed_work_sync(&devfreq->work);
519 		return;
520 	}
521 
522 	/* if current delay is zero, start polling with new delay */
523 	if (!cur_delay) {
524 		queue_delayed_work(devfreq_wq, &devfreq->work,
525 			msecs_to_jiffies(devfreq->profile->polling_ms));
526 		goto out;
527 	}
528 
529 	/* if current delay is greater than new delay, restart polling */
530 	if (cur_delay > new_delay) {
531 		mutex_unlock(&devfreq->lock);
532 		cancel_delayed_work_sync(&devfreq->work);
533 		mutex_lock(&devfreq->lock);
534 		if (!devfreq->stop_polling)
535 			queue_delayed_work(devfreq_wq, &devfreq->work,
536 				msecs_to_jiffies(devfreq->profile->polling_ms));
537 	}
538 out:
539 	mutex_unlock(&devfreq->lock);
540 }
541 EXPORT_SYMBOL(devfreq_interval_update);
542 
543 /**
544  * devfreq_notifier_call() - Notify that the device frequency requirements
545  *			     has been changed out of devfreq framework.
546  * @nb:		the notifier_block (supposed to be devfreq->nb)
547  * @type:	not used
548  * @devp:	not used
549  *
550  * Called by a notifier that uses devfreq->nb.
551  */
552 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
553 				 void *devp)
554 {
555 	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
556 	int ret;
557 
558 	mutex_lock(&devfreq->lock);
559 
560 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
561 	if (!devfreq->scaling_min_freq) {
562 		mutex_unlock(&devfreq->lock);
563 		return -EINVAL;
564 	}
565 
566 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
567 	if (!devfreq->scaling_max_freq) {
568 		mutex_unlock(&devfreq->lock);
569 		return -EINVAL;
570 	}
571 
572 	ret = update_devfreq(devfreq);
573 	mutex_unlock(&devfreq->lock);
574 
575 	return ret;
576 }
577 
578 /**
579  * devfreq_dev_release() - Callback for struct device to release the device.
580  * @dev:	the devfreq device
581  *
582  * Remove devfreq from the list and release its resources.
583  */
584 static void devfreq_dev_release(struct device *dev)
585 {
586 	struct devfreq *devfreq = to_devfreq(dev);
587 
588 	mutex_lock(&devfreq_list_lock);
589 	if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
590 		mutex_unlock(&devfreq_list_lock);
591 		dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
592 		return;
593 	}
594 	list_del(&devfreq->node);
595 	mutex_unlock(&devfreq_list_lock);
596 
597 	if (devfreq->profile->exit)
598 		devfreq->profile->exit(devfreq->dev.parent);
599 
600 	mutex_destroy(&devfreq->lock);
601 	kfree(devfreq);
602 }
603 
604 /**
605  * devfreq_add_device() - Add devfreq feature to the device
606  * @dev:	the device to add devfreq feature.
607  * @profile:	device-specific profile to run devfreq.
608  * @governor_name:	name of the policy to choose frequency.
609  * @data:	private data for the governor. The devfreq framework does not
610  *		touch this value.
611  */
612 struct devfreq *devfreq_add_device(struct device *dev,
613 				   struct devfreq_dev_profile *profile,
614 				   const char *governor_name,
615 				   void *data)
616 {
617 	struct devfreq *devfreq;
618 	struct devfreq_governor *governor;
619 	static atomic_t devfreq_no = ATOMIC_INIT(-1);
620 	int err = 0;
621 
622 	if (!dev || !profile || !governor_name) {
623 		dev_err(dev, "%s: Invalid parameters.\n", __func__);
624 		return ERR_PTR(-EINVAL);
625 	}
626 
627 	mutex_lock(&devfreq_list_lock);
628 	devfreq = find_device_devfreq(dev);
629 	mutex_unlock(&devfreq_list_lock);
630 	if (!IS_ERR(devfreq)) {
631 		dev_err(dev, "%s: Unable to create devfreq for the device.\n",
632 			__func__);
633 		err = -EINVAL;
634 		goto err_out;
635 	}
636 
637 	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
638 	if (!devfreq) {
639 		err = -ENOMEM;
640 		goto err_out;
641 	}
642 
643 	mutex_init(&devfreq->lock);
644 	mutex_lock(&devfreq->lock);
645 	devfreq->dev.parent = dev;
646 	devfreq->dev.class = devfreq_class;
647 	devfreq->dev.release = devfreq_dev_release;
648 	devfreq->profile = profile;
649 	strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
650 	devfreq->previous_freq = profile->initial_freq;
651 	devfreq->last_status.current_frequency = profile->initial_freq;
652 	devfreq->data = data;
653 	devfreq->nb.notifier_call = devfreq_notifier_call;
654 
655 	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
656 		mutex_unlock(&devfreq->lock);
657 		err = set_freq_table(devfreq);
658 		if (err < 0)
659 			goto err_dev;
660 		mutex_lock(&devfreq->lock);
661 	}
662 
663 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
664 	if (!devfreq->scaling_min_freq) {
665 		mutex_unlock(&devfreq->lock);
666 		err = -EINVAL;
667 		goto err_dev;
668 	}
669 	devfreq->min_freq = devfreq->scaling_min_freq;
670 
671 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
672 	if (!devfreq->scaling_max_freq) {
673 		mutex_unlock(&devfreq->lock);
674 		err = -EINVAL;
675 		goto err_dev;
676 	}
677 	devfreq->max_freq = devfreq->scaling_max_freq;
678 
679 	devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
680 	atomic_set(&devfreq->suspend_count, 0);
681 
682 	dev_set_name(&devfreq->dev, "devfreq%d",
683 				atomic_inc_return(&devfreq_no));
684 	err = device_register(&devfreq->dev);
685 	if (err) {
686 		mutex_unlock(&devfreq->lock);
687 		put_device(&devfreq->dev);
688 		goto err_out;
689 	}
690 
691 	devfreq->trans_table = devm_kzalloc(&devfreq->dev,
692 			array3_size(sizeof(unsigned int),
693 				    devfreq->profile->max_state,
694 				    devfreq->profile->max_state),
695 			GFP_KERNEL);
696 	if (!devfreq->trans_table) {
697 		mutex_unlock(&devfreq->lock);
698 		err = -ENOMEM;
699 		goto err_devfreq;
700 	}
701 
702 	devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
703 			devfreq->profile->max_state,
704 			sizeof(unsigned long),
705 			GFP_KERNEL);
706 	if (!devfreq->time_in_state) {
707 		mutex_unlock(&devfreq->lock);
708 		err = -ENOMEM;
709 		goto err_devfreq;
710 	}
711 
712 	devfreq->last_stat_updated = jiffies;
713 
714 	srcu_init_notifier_head(&devfreq->transition_notifier_list);
715 
716 	mutex_unlock(&devfreq->lock);
717 
718 	mutex_lock(&devfreq_list_lock);
719 
720 	governor = try_then_request_governor(devfreq->governor_name);
721 	if (IS_ERR(governor)) {
722 		dev_err(dev, "%s: Unable to find governor for the device\n",
723 			__func__);
724 		err = PTR_ERR(governor);
725 		goto err_init;
726 	}
727 
728 	devfreq->governor = governor;
729 	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
730 						NULL);
731 	if (err) {
732 		dev_err(dev, "%s: Unable to start governor for the device\n",
733 			__func__);
734 		goto err_init;
735 	}
736 
737 	list_add(&devfreq->node, &devfreq_list);
738 
739 	mutex_unlock(&devfreq_list_lock);
740 
741 	return devfreq;
742 
743 err_init:
744 	mutex_unlock(&devfreq_list_lock);
745 err_devfreq:
746 	devfreq_remove_device(devfreq);
747 	devfreq = NULL;
748 err_dev:
749 	kfree(devfreq);
750 err_out:
751 	return ERR_PTR(err);
752 }
753 EXPORT_SYMBOL(devfreq_add_device);
754 
755 /**
756  * devfreq_remove_device() - Remove devfreq feature from a device.
757  * @devfreq:	the devfreq instance to be removed
758  *
759  * The opposite of devfreq_add_device().
760  */
761 int devfreq_remove_device(struct devfreq *devfreq)
762 {
763 	if (!devfreq)
764 		return -EINVAL;
765 
766 	if (devfreq->governor)
767 		devfreq->governor->event_handler(devfreq,
768 						 DEVFREQ_GOV_STOP, NULL);
769 	device_unregister(&devfreq->dev);
770 
771 	return 0;
772 }
773 EXPORT_SYMBOL(devfreq_remove_device);
774 
775 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
776 {
777 	struct devfreq **r = res;
778 
779 	if (WARN_ON(!r || !*r))
780 		return 0;
781 
782 	return *r == data;
783 }
784 
785 static void devm_devfreq_dev_release(struct device *dev, void *res)
786 {
787 	devfreq_remove_device(*(struct devfreq **)res);
788 }
789 
790 /**
791  * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
792  * @dev:	the device to add devfreq feature.
793  * @profile:	device-specific profile to run devfreq.
794  * @governor_name:	name of the policy to choose frequency.
795  * @data:	private data for the governor. The devfreq framework does not
796  *		touch this value.
797  *
798  * This function manages automatically the memory of devfreq device using device
799  * resource management and simplify the free operation for memory of devfreq
800  * device.
801  */
802 struct devfreq *devm_devfreq_add_device(struct device *dev,
803 					struct devfreq_dev_profile *profile,
804 					const char *governor_name,
805 					void *data)
806 {
807 	struct devfreq **ptr, *devfreq;
808 
809 	ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
810 	if (!ptr)
811 		return ERR_PTR(-ENOMEM);
812 
813 	devfreq = devfreq_add_device(dev, profile, governor_name, data);
814 	if (IS_ERR(devfreq)) {
815 		devres_free(ptr);
816 		return devfreq;
817 	}
818 
819 	*ptr = devfreq;
820 	devres_add(dev, ptr);
821 
822 	return devfreq;
823 }
824 EXPORT_SYMBOL(devm_devfreq_add_device);
825 
826 #ifdef CONFIG_OF
827 /*
828  * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
829  * @dev - instance to the given device
830  * @index - index into list of devfreq
831  *
832  * return the instance of devfreq device
833  */
834 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
835 {
836 	struct device_node *node;
837 	struct devfreq *devfreq;
838 
839 	if (!dev)
840 		return ERR_PTR(-EINVAL);
841 
842 	if (!dev->of_node)
843 		return ERR_PTR(-EINVAL);
844 
845 	node = of_parse_phandle(dev->of_node, "devfreq", index);
846 	if (!node)
847 		return ERR_PTR(-ENODEV);
848 
849 	mutex_lock(&devfreq_list_lock);
850 	list_for_each_entry(devfreq, &devfreq_list, node) {
851 		if (devfreq->dev.parent
852 			&& devfreq->dev.parent->of_node == node) {
853 			mutex_unlock(&devfreq_list_lock);
854 			of_node_put(node);
855 			return devfreq;
856 		}
857 	}
858 	mutex_unlock(&devfreq_list_lock);
859 	of_node_put(node);
860 
861 	return ERR_PTR(-EPROBE_DEFER);
862 }
863 #else
864 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
865 {
866 	return ERR_PTR(-ENODEV);
867 }
868 #endif /* CONFIG_OF */
869 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
870 
871 /**
872  * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
873  * @dev:	the device to add devfreq feature.
874  * @devfreq:	the devfreq instance to be removed
875  */
876 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
877 {
878 	WARN_ON(devres_release(dev, devm_devfreq_dev_release,
879 			       devm_devfreq_dev_match, devfreq));
880 }
881 EXPORT_SYMBOL(devm_devfreq_remove_device);
882 
883 /**
884  * devfreq_suspend_device() - Suspend devfreq of a device.
885  * @devfreq: the devfreq instance to be suspended
886  *
887  * This function is intended to be called by the pm callbacks
888  * (e.g., runtime_suspend, suspend) of the device driver that
889  * holds the devfreq.
890  */
891 int devfreq_suspend_device(struct devfreq *devfreq)
892 {
893 	int ret;
894 
895 	if (!devfreq)
896 		return -EINVAL;
897 
898 	if (atomic_inc_return(&devfreq->suspend_count) > 1)
899 		return 0;
900 
901 	if (devfreq->governor) {
902 		ret = devfreq->governor->event_handler(devfreq,
903 					DEVFREQ_GOV_SUSPEND, NULL);
904 		if (ret)
905 			return ret;
906 	}
907 
908 	if (devfreq->suspend_freq) {
909 		ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
910 		if (ret)
911 			return ret;
912 	}
913 
914 	return 0;
915 }
916 EXPORT_SYMBOL(devfreq_suspend_device);
917 
918 /**
919  * devfreq_resume_device() - Resume devfreq of a device.
920  * @devfreq: the devfreq instance to be resumed
921  *
922  * This function is intended to be called by the pm callbacks
923  * (e.g., runtime_resume, resume) of the device driver that
924  * holds the devfreq.
925  */
926 int devfreq_resume_device(struct devfreq *devfreq)
927 {
928 	int ret;
929 
930 	if (!devfreq)
931 		return -EINVAL;
932 
933 	if (atomic_dec_return(&devfreq->suspend_count) >= 1)
934 		return 0;
935 
936 	if (devfreq->resume_freq) {
937 		ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
938 		if (ret)
939 			return ret;
940 	}
941 
942 	if (devfreq->governor) {
943 		ret = devfreq->governor->event_handler(devfreq,
944 					DEVFREQ_GOV_RESUME, NULL);
945 		if (ret)
946 			return ret;
947 	}
948 
949 	return 0;
950 }
951 EXPORT_SYMBOL(devfreq_resume_device);
952 
953 /**
954  * devfreq_suspend() - Suspend devfreq governors and devices
955  *
956  * Called during system wide Suspend/Hibernate cycles for suspending governors
957  * and devices preserving the state for resume. On some platforms the devfreq
958  * device must have precise state (frequency) after resume in order to provide
959  * fully operating setup.
960  */
961 void devfreq_suspend(void)
962 {
963 	struct devfreq *devfreq;
964 	int ret;
965 
966 	mutex_lock(&devfreq_list_lock);
967 	list_for_each_entry(devfreq, &devfreq_list, node) {
968 		ret = devfreq_suspend_device(devfreq);
969 		if (ret)
970 			dev_err(&devfreq->dev,
971 				"failed to suspend devfreq device\n");
972 	}
973 	mutex_unlock(&devfreq_list_lock);
974 }
975 
976 /**
977  * devfreq_resume() - Resume devfreq governors and devices
978  *
979  * Called during system wide Suspend/Hibernate cycle for resuming governors and
980  * devices that are suspended with devfreq_suspend().
981  */
982 void devfreq_resume(void)
983 {
984 	struct devfreq *devfreq;
985 	int ret;
986 
987 	mutex_lock(&devfreq_list_lock);
988 	list_for_each_entry(devfreq, &devfreq_list, node) {
989 		ret = devfreq_resume_device(devfreq);
990 		if (ret)
991 			dev_warn(&devfreq->dev,
992 				 "failed to resume devfreq device\n");
993 	}
994 	mutex_unlock(&devfreq_list_lock);
995 }
996 
997 /**
998  * devfreq_add_governor() - Add devfreq governor
999  * @governor:	the devfreq governor to be added
1000  */
1001 int devfreq_add_governor(struct devfreq_governor *governor)
1002 {
1003 	struct devfreq_governor *g;
1004 	struct devfreq *devfreq;
1005 	int err = 0;
1006 
1007 	if (!governor) {
1008 		pr_err("%s: Invalid parameters.\n", __func__);
1009 		return -EINVAL;
1010 	}
1011 
1012 	mutex_lock(&devfreq_list_lock);
1013 	g = find_devfreq_governor(governor->name);
1014 	if (!IS_ERR(g)) {
1015 		pr_err("%s: governor %s already registered\n", __func__,
1016 		       g->name);
1017 		err = -EINVAL;
1018 		goto err_out;
1019 	}
1020 
1021 	list_add(&governor->node, &devfreq_governor_list);
1022 
1023 	list_for_each_entry(devfreq, &devfreq_list, node) {
1024 		int ret = 0;
1025 		struct device *dev = devfreq->dev.parent;
1026 
1027 		if (!strncmp(devfreq->governor_name, governor->name,
1028 			     DEVFREQ_NAME_LEN)) {
1029 			/* The following should never occur */
1030 			if (devfreq->governor) {
1031 				dev_warn(dev,
1032 					 "%s: Governor %s already present\n",
1033 					 __func__, devfreq->governor->name);
1034 				ret = devfreq->governor->event_handler(devfreq,
1035 							DEVFREQ_GOV_STOP, NULL);
1036 				if (ret) {
1037 					dev_warn(dev,
1038 						 "%s: Governor %s stop = %d\n",
1039 						 __func__,
1040 						 devfreq->governor->name, ret);
1041 				}
1042 				/* Fall through */
1043 			}
1044 			devfreq->governor = governor;
1045 			ret = devfreq->governor->event_handler(devfreq,
1046 						DEVFREQ_GOV_START, NULL);
1047 			if (ret) {
1048 				dev_warn(dev, "%s: Governor %s start=%d\n",
1049 					 __func__, devfreq->governor->name,
1050 					 ret);
1051 			}
1052 		}
1053 	}
1054 
1055 err_out:
1056 	mutex_unlock(&devfreq_list_lock);
1057 
1058 	return err;
1059 }
1060 EXPORT_SYMBOL(devfreq_add_governor);
1061 
1062 /**
1063  * devfreq_remove_governor() - Remove devfreq feature from a device.
1064  * @governor:	the devfreq governor to be removed
1065  */
1066 int devfreq_remove_governor(struct devfreq_governor *governor)
1067 {
1068 	struct devfreq_governor *g;
1069 	struct devfreq *devfreq;
1070 	int err = 0;
1071 
1072 	if (!governor) {
1073 		pr_err("%s: Invalid parameters.\n", __func__);
1074 		return -EINVAL;
1075 	}
1076 
1077 	mutex_lock(&devfreq_list_lock);
1078 	g = find_devfreq_governor(governor->name);
1079 	if (IS_ERR(g)) {
1080 		pr_err("%s: governor %s not registered\n", __func__,
1081 		       governor->name);
1082 		err = PTR_ERR(g);
1083 		goto err_out;
1084 	}
1085 	list_for_each_entry(devfreq, &devfreq_list, node) {
1086 		int ret;
1087 		struct device *dev = devfreq->dev.parent;
1088 
1089 		if (!strncmp(devfreq->governor_name, governor->name,
1090 			     DEVFREQ_NAME_LEN)) {
1091 			/* we should have a devfreq governor! */
1092 			if (!devfreq->governor) {
1093 				dev_warn(dev, "%s: Governor %s NOT present\n",
1094 					 __func__, governor->name);
1095 				continue;
1096 				/* Fall through */
1097 			}
1098 			ret = devfreq->governor->event_handler(devfreq,
1099 						DEVFREQ_GOV_STOP, NULL);
1100 			if (ret) {
1101 				dev_warn(dev, "%s: Governor %s stop=%d\n",
1102 					 __func__, devfreq->governor->name,
1103 					 ret);
1104 			}
1105 			devfreq->governor = NULL;
1106 		}
1107 	}
1108 
1109 	list_del(&governor->node);
1110 err_out:
1111 	mutex_unlock(&devfreq_list_lock);
1112 
1113 	return err;
1114 }
1115 EXPORT_SYMBOL(devfreq_remove_governor);
1116 
1117 static ssize_t governor_show(struct device *dev,
1118 			     struct device_attribute *attr, char *buf)
1119 {
1120 	if (!to_devfreq(dev)->governor)
1121 		return -EINVAL;
1122 
1123 	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1124 }
1125 
1126 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1127 			      const char *buf, size_t count)
1128 {
1129 	struct devfreq *df = to_devfreq(dev);
1130 	int ret;
1131 	char str_governor[DEVFREQ_NAME_LEN + 1];
1132 	const struct devfreq_governor *governor, *prev_governor;
1133 
1134 	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1135 	if (ret != 1)
1136 		return -EINVAL;
1137 
1138 	mutex_lock(&devfreq_list_lock);
1139 	governor = try_then_request_governor(str_governor);
1140 	if (IS_ERR(governor)) {
1141 		ret = PTR_ERR(governor);
1142 		goto out;
1143 	}
1144 	if (df->governor == governor) {
1145 		ret = 0;
1146 		goto out;
1147 	} else if ((df->governor && df->governor->immutable) ||
1148 					governor->immutable) {
1149 		ret = -EINVAL;
1150 		goto out;
1151 	}
1152 
1153 	if (df->governor) {
1154 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1155 		if (ret) {
1156 			dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1157 				 __func__, df->governor->name, ret);
1158 			goto out;
1159 		}
1160 	}
1161 	prev_governor = df->governor;
1162 	df->governor = governor;
1163 	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1164 	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1165 	if (ret) {
1166 		dev_warn(dev, "%s: Governor %s not started(%d)\n",
1167 			 __func__, df->governor->name, ret);
1168 		df->governor = prev_governor;
1169 		strncpy(df->governor_name, prev_governor->name,
1170 			DEVFREQ_NAME_LEN);
1171 		ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1172 		if (ret) {
1173 			dev_err(dev,
1174 				"%s: reverting to Governor %s failed (%d)\n",
1175 				__func__, df->governor_name, ret);
1176 			df->governor = NULL;
1177 		}
1178 	}
1179 out:
1180 	mutex_unlock(&devfreq_list_lock);
1181 
1182 	if (!ret)
1183 		ret = count;
1184 	return ret;
1185 }
1186 static DEVICE_ATTR_RW(governor);
1187 
1188 static ssize_t available_governors_show(struct device *d,
1189 					struct device_attribute *attr,
1190 					char *buf)
1191 {
1192 	struct devfreq *df = to_devfreq(d);
1193 	ssize_t count = 0;
1194 
1195 	mutex_lock(&devfreq_list_lock);
1196 
1197 	/*
1198 	 * The devfreq with immutable governor (e.g., passive) shows
1199 	 * only own governor.
1200 	 */
1201 	if (df->governor->immutable) {
1202 		count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1203 				  "%s ", df->governor_name);
1204 	/*
1205 	 * The devfreq device shows the registered governor except for
1206 	 * immutable governors such as passive governor .
1207 	 */
1208 	} else {
1209 		struct devfreq_governor *governor;
1210 
1211 		list_for_each_entry(governor, &devfreq_governor_list, node) {
1212 			if (governor->immutable)
1213 				continue;
1214 			count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1215 					   "%s ", governor->name);
1216 		}
1217 	}
1218 
1219 	mutex_unlock(&devfreq_list_lock);
1220 
1221 	/* Truncate the trailing space */
1222 	if (count)
1223 		count--;
1224 
1225 	count += sprintf(&buf[count], "\n");
1226 
1227 	return count;
1228 }
1229 static DEVICE_ATTR_RO(available_governors);
1230 
1231 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1232 			     char *buf)
1233 {
1234 	unsigned long freq;
1235 	struct devfreq *devfreq = to_devfreq(dev);
1236 
1237 	if (devfreq->profile->get_cur_freq &&
1238 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1239 		return sprintf(buf, "%lu\n", freq);
1240 
1241 	return sprintf(buf, "%lu\n", devfreq->previous_freq);
1242 }
1243 static DEVICE_ATTR_RO(cur_freq);
1244 
1245 static ssize_t target_freq_show(struct device *dev,
1246 				struct device_attribute *attr, char *buf)
1247 {
1248 	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1249 }
1250 static DEVICE_ATTR_RO(target_freq);
1251 
1252 static ssize_t polling_interval_show(struct device *dev,
1253 				     struct device_attribute *attr, char *buf)
1254 {
1255 	return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1256 }
1257 
1258 static ssize_t polling_interval_store(struct device *dev,
1259 				      struct device_attribute *attr,
1260 				      const char *buf, size_t count)
1261 {
1262 	struct devfreq *df = to_devfreq(dev);
1263 	unsigned int value;
1264 	int ret;
1265 
1266 	if (!df->governor)
1267 		return -EINVAL;
1268 
1269 	ret = sscanf(buf, "%u", &value);
1270 	if (ret != 1)
1271 		return -EINVAL;
1272 
1273 	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1274 	ret = count;
1275 
1276 	return ret;
1277 }
1278 static DEVICE_ATTR_RW(polling_interval);
1279 
1280 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1281 			      const char *buf, size_t count)
1282 {
1283 	struct devfreq *df = to_devfreq(dev);
1284 	unsigned long value;
1285 	int ret;
1286 
1287 	ret = sscanf(buf, "%lu", &value);
1288 	if (ret != 1)
1289 		return -EINVAL;
1290 
1291 	mutex_lock(&df->lock);
1292 
1293 	if (value) {
1294 		if (value > df->max_freq) {
1295 			ret = -EINVAL;
1296 			goto unlock;
1297 		}
1298 	} else {
1299 		unsigned long *freq_table = df->profile->freq_table;
1300 
1301 		/* Get minimum frequency according to sorting order */
1302 		if (freq_table[0] < freq_table[df->profile->max_state - 1])
1303 			value = freq_table[0];
1304 		else
1305 			value = freq_table[df->profile->max_state - 1];
1306 	}
1307 
1308 	df->min_freq = value;
1309 	update_devfreq(df);
1310 	ret = count;
1311 unlock:
1312 	mutex_unlock(&df->lock);
1313 	return ret;
1314 }
1315 
1316 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1317 			     char *buf)
1318 {
1319 	struct devfreq *df = to_devfreq(dev);
1320 
1321 	return sprintf(buf, "%lu\n", max(df->scaling_min_freq, df->min_freq));
1322 }
1323 
1324 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1325 			      const char *buf, size_t count)
1326 {
1327 	struct devfreq *df = to_devfreq(dev);
1328 	unsigned long value;
1329 	int ret;
1330 
1331 	ret = sscanf(buf, "%lu", &value);
1332 	if (ret != 1)
1333 		return -EINVAL;
1334 
1335 	mutex_lock(&df->lock);
1336 
1337 	if (value) {
1338 		if (value < df->min_freq) {
1339 			ret = -EINVAL;
1340 			goto unlock;
1341 		}
1342 	} else {
1343 		unsigned long *freq_table = df->profile->freq_table;
1344 
1345 		/* Get maximum frequency according to sorting order */
1346 		if (freq_table[0] < freq_table[df->profile->max_state - 1])
1347 			value = freq_table[df->profile->max_state - 1];
1348 		else
1349 			value = freq_table[0];
1350 	}
1351 
1352 	df->max_freq = value;
1353 	update_devfreq(df);
1354 	ret = count;
1355 unlock:
1356 	mutex_unlock(&df->lock);
1357 	return ret;
1358 }
1359 static DEVICE_ATTR_RW(min_freq);
1360 
1361 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1362 			     char *buf)
1363 {
1364 	struct devfreq *df = to_devfreq(dev);
1365 
1366 	return sprintf(buf, "%lu\n", min(df->scaling_max_freq, df->max_freq));
1367 }
1368 static DEVICE_ATTR_RW(max_freq);
1369 
1370 static ssize_t available_frequencies_show(struct device *d,
1371 					  struct device_attribute *attr,
1372 					  char *buf)
1373 {
1374 	struct devfreq *df = to_devfreq(d);
1375 	ssize_t count = 0;
1376 	int i;
1377 
1378 	mutex_lock(&df->lock);
1379 
1380 	for (i = 0; i < df->profile->max_state; i++)
1381 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1382 				"%lu ", df->profile->freq_table[i]);
1383 
1384 	mutex_unlock(&df->lock);
1385 	/* Truncate the trailing space */
1386 	if (count)
1387 		count--;
1388 
1389 	count += sprintf(&buf[count], "\n");
1390 
1391 	return count;
1392 }
1393 static DEVICE_ATTR_RO(available_frequencies);
1394 
1395 static ssize_t trans_stat_show(struct device *dev,
1396 			       struct device_attribute *attr, char *buf)
1397 {
1398 	struct devfreq *devfreq = to_devfreq(dev);
1399 	ssize_t len;
1400 	int i, j;
1401 	unsigned int max_state = devfreq->profile->max_state;
1402 
1403 	if (!devfreq->stop_polling &&
1404 			devfreq_update_status(devfreq, devfreq->previous_freq))
1405 		return 0;
1406 	if (max_state == 0)
1407 		return sprintf(buf, "Not Supported.\n");
1408 
1409 	len = sprintf(buf, "     From  :   To\n");
1410 	len += sprintf(buf + len, "           :");
1411 	for (i = 0; i < max_state; i++)
1412 		len += sprintf(buf + len, "%10lu",
1413 				devfreq->profile->freq_table[i]);
1414 
1415 	len += sprintf(buf + len, "   time(ms)\n");
1416 
1417 	for (i = 0; i < max_state; i++) {
1418 		if (devfreq->profile->freq_table[i]
1419 					== devfreq->previous_freq) {
1420 			len += sprintf(buf + len, "*");
1421 		} else {
1422 			len += sprintf(buf + len, " ");
1423 		}
1424 		len += sprintf(buf + len, "%10lu:",
1425 				devfreq->profile->freq_table[i]);
1426 		for (j = 0; j < max_state; j++)
1427 			len += sprintf(buf + len, "%10u",
1428 				devfreq->trans_table[(i * max_state) + j]);
1429 		len += sprintf(buf + len, "%10u\n",
1430 			jiffies_to_msecs(devfreq->time_in_state[i]));
1431 	}
1432 
1433 	len += sprintf(buf + len, "Total transition : %u\n",
1434 					devfreq->total_trans);
1435 	return len;
1436 }
1437 static DEVICE_ATTR_RO(trans_stat);
1438 
1439 static struct attribute *devfreq_attrs[] = {
1440 	&dev_attr_governor.attr,
1441 	&dev_attr_available_governors.attr,
1442 	&dev_attr_cur_freq.attr,
1443 	&dev_attr_available_frequencies.attr,
1444 	&dev_attr_target_freq.attr,
1445 	&dev_attr_polling_interval.attr,
1446 	&dev_attr_min_freq.attr,
1447 	&dev_attr_max_freq.attr,
1448 	&dev_attr_trans_stat.attr,
1449 	NULL,
1450 };
1451 ATTRIBUTE_GROUPS(devfreq);
1452 
1453 static int __init devfreq_init(void)
1454 {
1455 	devfreq_class = class_create(THIS_MODULE, "devfreq");
1456 	if (IS_ERR(devfreq_class)) {
1457 		pr_err("%s: couldn't create class\n", __FILE__);
1458 		return PTR_ERR(devfreq_class);
1459 	}
1460 
1461 	devfreq_wq = create_freezable_workqueue("devfreq_wq");
1462 	if (!devfreq_wq) {
1463 		class_destroy(devfreq_class);
1464 		pr_err("%s: couldn't create workqueue\n", __FILE__);
1465 		return -ENOMEM;
1466 	}
1467 	devfreq_class->dev_groups = devfreq_groups;
1468 
1469 	return 0;
1470 }
1471 subsys_initcall(devfreq_init);
1472 
1473 /*
1474  * The following are helper functions for devfreq user device drivers with
1475  * OPP framework.
1476  */
1477 
1478 /**
1479  * devfreq_recommended_opp() - Helper function to get proper OPP for the
1480  *			     freq value given to target callback.
1481  * @dev:	The devfreq user device. (parent of devfreq)
1482  * @freq:	The frequency given to target function
1483  * @flags:	Flags handed from devfreq framework.
1484  *
1485  * The callers are required to call dev_pm_opp_put() for the returned OPP after
1486  * use.
1487  */
1488 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1489 					   unsigned long *freq,
1490 					   u32 flags)
1491 {
1492 	struct dev_pm_opp *opp;
1493 
1494 	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1495 		/* The freq is an upper bound. opp should be lower */
1496 		opp = dev_pm_opp_find_freq_floor(dev, freq);
1497 
1498 		/* If not available, use the closest opp */
1499 		if (opp == ERR_PTR(-ERANGE))
1500 			opp = dev_pm_opp_find_freq_ceil(dev, freq);
1501 	} else {
1502 		/* The freq is an lower bound. opp should be higher */
1503 		opp = dev_pm_opp_find_freq_ceil(dev, freq);
1504 
1505 		/* If not available, use the closest opp */
1506 		if (opp == ERR_PTR(-ERANGE))
1507 			opp = dev_pm_opp_find_freq_floor(dev, freq);
1508 	}
1509 
1510 	return opp;
1511 }
1512 EXPORT_SYMBOL(devfreq_recommended_opp);
1513 
1514 /**
1515  * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1516  *				     for any changes in the OPP availability
1517  *				     changes
1518  * @dev:	The devfreq user device. (parent of devfreq)
1519  * @devfreq:	The devfreq object.
1520  */
1521 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1522 {
1523 	return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1524 }
1525 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1526 
1527 /**
1528  * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1529  *				       notified for any changes in the OPP
1530  *				       availability changes anymore.
1531  * @dev:	The devfreq user device. (parent of devfreq)
1532  * @devfreq:	The devfreq object.
1533  *
1534  * At exit() callback of devfreq_dev_profile, this must be included if
1535  * devfreq_recommended_opp is used.
1536  */
1537 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1538 {
1539 	return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1540 }
1541 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1542 
1543 static void devm_devfreq_opp_release(struct device *dev, void *res)
1544 {
1545 	devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1546 }
1547 
1548 /**
1549  * devm_devfreq_register_opp_notifier() - Resource-managed
1550  *					  devfreq_register_opp_notifier()
1551  * @dev:	The devfreq user device. (parent of devfreq)
1552  * @devfreq:	The devfreq object.
1553  */
1554 int devm_devfreq_register_opp_notifier(struct device *dev,
1555 				       struct devfreq *devfreq)
1556 {
1557 	struct devfreq **ptr;
1558 	int ret;
1559 
1560 	ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1561 	if (!ptr)
1562 		return -ENOMEM;
1563 
1564 	ret = devfreq_register_opp_notifier(dev, devfreq);
1565 	if (ret) {
1566 		devres_free(ptr);
1567 		return ret;
1568 	}
1569 
1570 	*ptr = devfreq;
1571 	devres_add(dev, ptr);
1572 
1573 	return 0;
1574 }
1575 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1576 
1577 /**
1578  * devm_devfreq_unregister_opp_notifier() - Resource-managed
1579  *					    devfreq_unregister_opp_notifier()
1580  * @dev:	The devfreq user device. (parent of devfreq)
1581  * @devfreq:	The devfreq object.
1582  */
1583 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1584 					 struct devfreq *devfreq)
1585 {
1586 	WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1587 			       devm_devfreq_dev_match, devfreq));
1588 }
1589 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1590 
1591 /**
1592  * devfreq_register_notifier() - Register a driver with devfreq
1593  * @devfreq:	The devfreq object.
1594  * @nb:		The notifier block to register.
1595  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1596  */
1597 int devfreq_register_notifier(struct devfreq *devfreq,
1598 			      struct notifier_block *nb,
1599 			      unsigned int list)
1600 {
1601 	int ret = 0;
1602 
1603 	if (!devfreq)
1604 		return -EINVAL;
1605 
1606 	switch (list) {
1607 	case DEVFREQ_TRANSITION_NOTIFIER:
1608 		ret = srcu_notifier_chain_register(
1609 				&devfreq->transition_notifier_list, nb);
1610 		break;
1611 	default:
1612 		ret = -EINVAL;
1613 	}
1614 
1615 	return ret;
1616 }
1617 EXPORT_SYMBOL(devfreq_register_notifier);
1618 
1619 /*
1620  * devfreq_unregister_notifier() - Unregister a driver with devfreq
1621  * @devfreq:	The devfreq object.
1622  * @nb:		The notifier block to be unregistered.
1623  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1624  */
1625 int devfreq_unregister_notifier(struct devfreq *devfreq,
1626 				struct notifier_block *nb,
1627 				unsigned int list)
1628 {
1629 	int ret = 0;
1630 
1631 	if (!devfreq)
1632 		return -EINVAL;
1633 
1634 	switch (list) {
1635 	case DEVFREQ_TRANSITION_NOTIFIER:
1636 		ret = srcu_notifier_chain_unregister(
1637 				&devfreq->transition_notifier_list, nb);
1638 		break;
1639 	default:
1640 		ret = -EINVAL;
1641 	}
1642 
1643 	return ret;
1644 }
1645 EXPORT_SYMBOL(devfreq_unregister_notifier);
1646 
1647 struct devfreq_notifier_devres {
1648 	struct devfreq *devfreq;
1649 	struct notifier_block *nb;
1650 	unsigned int list;
1651 };
1652 
1653 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1654 {
1655 	struct devfreq_notifier_devres *this = res;
1656 
1657 	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1658 }
1659 
1660 /**
1661  * devm_devfreq_register_notifier()
1662 	- Resource-managed devfreq_register_notifier()
1663  * @dev:	The devfreq user device. (parent of devfreq)
1664  * @devfreq:	The devfreq object.
1665  * @nb:		The notifier block to be unregistered.
1666  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1667  */
1668 int devm_devfreq_register_notifier(struct device *dev,
1669 				struct devfreq *devfreq,
1670 				struct notifier_block *nb,
1671 				unsigned int list)
1672 {
1673 	struct devfreq_notifier_devres *ptr;
1674 	int ret;
1675 
1676 	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1677 				GFP_KERNEL);
1678 	if (!ptr)
1679 		return -ENOMEM;
1680 
1681 	ret = devfreq_register_notifier(devfreq, nb, list);
1682 	if (ret) {
1683 		devres_free(ptr);
1684 		return ret;
1685 	}
1686 
1687 	ptr->devfreq = devfreq;
1688 	ptr->nb = nb;
1689 	ptr->list = list;
1690 	devres_add(dev, ptr);
1691 
1692 	return 0;
1693 }
1694 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1695 
1696 /**
1697  * devm_devfreq_unregister_notifier()
1698 	- Resource-managed devfreq_unregister_notifier()
1699  * @dev:	The devfreq user device. (parent of devfreq)
1700  * @devfreq:	The devfreq object.
1701  * @nb:		The notifier block to be unregistered.
1702  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1703  */
1704 void devm_devfreq_unregister_notifier(struct device *dev,
1705 				      struct devfreq *devfreq,
1706 				      struct notifier_block *nb,
1707 				      unsigned int list)
1708 {
1709 	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1710 			       devm_devfreq_dev_match, devfreq));
1711 }
1712 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
1713