xref: /openbmc/linux/drivers/devfreq/devfreq.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
4  *	    for Non-CPU Devices.
5  *
6  * Copyright (C) 2011 Samsung Electronics
7  *	MyungJoo Ham <myungjoo.ham@samsung.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/kmod.h>
12 #include <linux/sched.h>
13 #include <linux/debugfs.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/stat.h>
20 #include <linux/pm_opp.h>
21 #include <linux/devfreq.h>
22 #include <linux/workqueue.h>
23 #include <linux/platform_device.h>
24 #include <linux/list.h>
25 #include <linux/printk.h>
26 #include <linux/hrtimer.h>
27 #include <linux/of.h>
28 #include <linux/pm_qos.h>
29 #include "governor.h"
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/devfreq.h>
33 
34 #define HZ_PER_KHZ	1000
35 
36 static struct class *devfreq_class;
37 static struct dentry *devfreq_debugfs;
38 
39 /*
40  * devfreq core provides delayed work based load monitoring helper
41  * functions. Governors can use these or can implement their own
42  * monitoring mechanism.
43  */
44 static struct workqueue_struct *devfreq_wq;
45 
46 /* The list of all device-devfreq governors */
47 static LIST_HEAD(devfreq_governor_list);
48 /* The list of all device-devfreq */
49 static LIST_HEAD(devfreq_list);
50 static DEFINE_MUTEX(devfreq_list_lock);
51 
52 /**
53  * find_device_devfreq() - find devfreq struct using device pointer
54  * @dev:	device pointer used to lookup device devfreq.
55  *
56  * Search the list of device devfreqs and return the matched device's
57  * devfreq info. devfreq_list_lock should be held by the caller.
58  */
59 static struct devfreq *find_device_devfreq(struct device *dev)
60 {
61 	struct devfreq *tmp_devfreq;
62 
63 	lockdep_assert_held(&devfreq_list_lock);
64 
65 	if (IS_ERR_OR_NULL(dev)) {
66 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
67 		return ERR_PTR(-EINVAL);
68 	}
69 
70 	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
71 		if (tmp_devfreq->dev.parent == dev)
72 			return tmp_devfreq;
73 	}
74 
75 	return ERR_PTR(-ENODEV);
76 }
77 
78 static unsigned long find_available_min_freq(struct devfreq *devfreq)
79 {
80 	struct dev_pm_opp *opp;
81 	unsigned long min_freq = 0;
82 
83 	opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
84 	if (IS_ERR(opp))
85 		min_freq = 0;
86 	else
87 		dev_pm_opp_put(opp);
88 
89 	return min_freq;
90 }
91 
92 static unsigned long find_available_max_freq(struct devfreq *devfreq)
93 {
94 	struct dev_pm_opp *opp;
95 	unsigned long max_freq = ULONG_MAX;
96 
97 	opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
98 	if (IS_ERR(opp))
99 		max_freq = 0;
100 	else
101 		dev_pm_opp_put(opp);
102 
103 	return max_freq;
104 }
105 
106 /**
107  * get_freq_range() - Get the current freq range
108  * @devfreq:	the devfreq instance
109  * @min_freq:	the min frequency
110  * @max_freq:	the max frequency
111  *
112  * This takes into consideration all constraints.
113  */
114 static void get_freq_range(struct devfreq *devfreq,
115 			   unsigned long *min_freq,
116 			   unsigned long *max_freq)
117 {
118 	unsigned long *freq_table = devfreq->profile->freq_table;
119 	s32 qos_min_freq, qos_max_freq;
120 
121 	lockdep_assert_held(&devfreq->lock);
122 
123 	/*
124 	 * Initialize minimum/maximum frequency from freq table.
125 	 * The devfreq drivers can initialize this in either ascending or
126 	 * descending order and devfreq core supports both.
127 	 */
128 	if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
129 		*min_freq = freq_table[0];
130 		*max_freq = freq_table[devfreq->profile->max_state - 1];
131 	} else {
132 		*min_freq = freq_table[devfreq->profile->max_state - 1];
133 		*max_freq = freq_table[0];
134 	}
135 
136 	/* Apply constraints from PM QoS */
137 	qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
138 					     DEV_PM_QOS_MIN_FREQUENCY);
139 	qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
140 					     DEV_PM_QOS_MAX_FREQUENCY);
141 	*min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
142 	if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
143 		*max_freq = min(*max_freq,
144 				(unsigned long)HZ_PER_KHZ * qos_max_freq);
145 
146 	/* Apply constraints from OPP interface */
147 	*min_freq = max(*min_freq, devfreq->scaling_min_freq);
148 	*max_freq = min(*max_freq, devfreq->scaling_max_freq);
149 
150 	if (*min_freq > *max_freq)
151 		*min_freq = *max_freq;
152 }
153 
154 /**
155  * devfreq_get_freq_level() - Lookup freq_table for the frequency
156  * @devfreq:	the devfreq instance
157  * @freq:	the target frequency
158  */
159 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
160 {
161 	int lev;
162 
163 	for (lev = 0; lev < devfreq->profile->max_state; lev++)
164 		if (freq == devfreq->profile->freq_table[lev])
165 			return lev;
166 
167 	return -EINVAL;
168 }
169 
170 static int set_freq_table(struct devfreq *devfreq)
171 {
172 	struct devfreq_dev_profile *profile = devfreq->profile;
173 	struct dev_pm_opp *opp;
174 	unsigned long freq;
175 	int i, count;
176 
177 	/* Initialize the freq_table from OPP table */
178 	count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
179 	if (count <= 0)
180 		return -EINVAL;
181 
182 	profile->max_state = count;
183 	profile->freq_table = devm_kcalloc(devfreq->dev.parent,
184 					profile->max_state,
185 					sizeof(*profile->freq_table),
186 					GFP_KERNEL);
187 	if (!profile->freq_table) {
188 		profile->max_state = 0;
189 		return -ENOMEM;
190 	}
191 
192 	for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
193 		opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
194 		if (IS_ERR(opp)) {
195 			devm_kfree(devfreq->dev.parent, profile->freq_table);
196 			profile->max_state = 0;
197 			return PTR_ERR(opp);
198 		}
199 		dev_pm_opp_put(opp);
200 		profile->freq_table[i] = freq;
201 	}
202 
203 	return 0;
204 }
205 
206 /**
207  * devfreq_update_status() - Update statistics of devfreq behavior
208  * @devfreq:	the devfreq instance
209  * @freq:	the update target frequency
210  */
211 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
212 {
213 	int lev, prev_lev, ret = 0;
214 	u64 cur_time;
215 
216 	lockdep_assert_held(&devfreq->lock);
217 	cur_time = get_jiffies_64();
218 
219 	/* Immediately exit if previous_freq is not initialized yet. */
220 	if (!devfreq->previous_freq)
221 		goto out;
222 
223 	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
224 	if (prev_lev < 0) {
225 		ret = prev_lev;
226 		goto out;
227 	}
228 
229 	devfreq->stats.time_in_state[prev_lev] +=
230 			cur_time - devfreq->stats.last_update;
231 
232 	lev = devfreq_get_freq_level(devfreq, freq);
233 	if (lev < 0) {
234 		ret = lev;
235 		goto out;
236 	}
237 
238 	if (lev != prev_lev) {
239 		devfreq->stats.trans_table[
240 			(prev_lev * devfreq->profile->max_state) + lev]++;
241 		devfreq->stats.total_trans++;
242 	}
243 
244 out:
245 	devfreq->stats.last_update = cur_time;
246 	return ret;
247 }
248 EXPORT_SYMBOL(devfreq_update_status);
249 
250 /**
251  * find_devfreq_governor() - find devfreq governor from name
252  * @name:	name of the governor
253  *
254  * Search the list of devfreq governors and return the matched
255  * governor's pointer. devfreq_list_lock should be held by the caller.
256  */
257 static struct devfreq_governor *find_devfreq_governor(const char *name)
258 {
259 	struct devfreq_governor *tmp_governor;
260 
261 	lockdep_assert_held(&devfreq_list_lock);
262 
263 	if (IS_ERR_OR_NULL(name)) {
264 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
265 		return ERR_PTR(-EINVAL);
266 	}
267 
268 	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
269 		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
270 			return tmp_governor;
271 	}
272 
273 	return ERR_PTR(-ENODEV);
274 }
275 
276 /**
277  * try_then_request_governor() - Try to find the governor and request the
278  *                               module if is not found.
279  * @name:	name of the governor
280  *
281  * Search the list of devfreq governors and request the module and try again
282  * if is not found. This can happen when both drivers (the governor driver
283  * and the driver that call devfreq_add_device) are built as modules.
284  * devfreq_list_lock should be held by the caller. Returns the matched
285  * governor's pointer or an error pointer.
286  */
287 static struct devfreq_governor *try_then_request_governor(const char *name)
288 {
289 	struct devfreq_governor *governor;
290 	int err = 0;
291 
292 	lockdep_assert_held(&devfreq_list_lock);
293 
294 	if (IS_ERR_OR_NULL(name)) {
295 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
296 		return ERR_PTR(-EINVAL);
297 	}
298 
299 	governor = find_devfreq_governor(name);
300 	if (IS_ERR(governor)) {
301 		mutex_unlock(&devfreq_list_lock);
302 
303 		if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
304 			     DEVFREQ_NAME_LEN))
305 			err = request_module("governor_%s", "simpleondemand");
306 		else
307 			err = request_module("governor_%s", name);
308 		/* Restore previous state before return */
309 		mutex_lock(&devfreq_list_lock);
310 		if (err)
311 			return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
312 
313 		governor = find_devfreq_governor(name);
314 	}
315 
316 	return governor;
317 }
318 
319 static int devfreq_notify_transition(struct devfreq *devfreq,
320 		struct devfreq_freqs *freqs, unsigned int state)
321 {
322 	if (!devfreq)
323 		return -EINVAL;
324 
325 	switch (state) {
326 	case DEVFREQ_PRECHANGE:
327 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
328 				DEVFREQ_PRECHANGE, freqs);
329 		break;
330 
331 	case DEVFREQ_POSTCHANGE:
332 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
333 				DEVFREQ_POSTCHANGE, freqs);
334 		break;
335 	default:
336 		return -EINVAL;
337 	}
338 
339 	return 0;
340 }
341 
342 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
343 			      u32 flags)
344 {
345 	struct devfreq_freqs freqs;
346 	unsigned long cur_freq;
347 	int err = 0;
348 
349 	if (devfreq->profile->get_cur_freq)
350 		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
351 	else
352 		cur_freq = devfreq->previous_freq;
353 
354 	freqs.old = cur_freq;
355 	freqs.new = new_freq;
356 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
357 
358 	err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
359 	if (err) {
360 		freqs.new = cur_freq;
361 		devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
362 		return err;
363 	}
364 
365 	freqs.new = new_freq;
366 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
367 
368 	if (devfreq_update_status(devfreq, new_freq))
369 		dev_err(&devfreq->dev,
370 			"Couldn't update frequency transition information.\n");
371 
372 	devfreq->previous_freq = new_freq;
373 
374 	if (devfreq->suspend_freq)
375 		devfreq->resume_freq = cur_freq;
376 
377 	return err;
378 }
379 
380 /* Load monitoring helper functions for governors use */
381 
382 /**
383  * update_devfreq() - Reevaluate the device and configure frequency.
384  * @devfreq:	the devfreq instance.
385  *
386  * Note: Lock devfreq->lock before calling update_devfreq
387  *	 This function is exported for governors.
388  */
389 int update_devfreq(struct devfreq *devfreq)
390 {
391 	unsigned long freq, min_freq, max_freq;
392 	int err = 0;
393 	u32 flags = 0;
394 
395 	lockdep_assert_held(&devfreq->lock);
396 
397 	if (!devfreq->governor)
398 		return -EINVAL;
399 
400 	/* Reevaluate the proper frequency */
401 	err = devfreq->governor->get_target_freq(devfreq, &freq);
402 	if (err)
403 		return err;
404 	get_freq_range(devfreq, &min_freq, &max_freq);
405 
406 	if (freq < min_freq) {
407 		freq = min_freq;
408 		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
409 	}
410 	if (freq > max_freq) {
411 		freq = max_freq;
412 		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
413 	}
414 
415 	return devfreq_set_target(devfreq, freq, flags);
416 
417 }
418 EXPORT_SYMBOL(update_devfreq);
419 
420 /**
421  * devfreq_monitor() - Periodically poll devfreq objects.
422  * @work:	the work struct used to run devfreq_monitor periodically.
423  *
424  */
425 static void devfreq_monitor(struct work_struct *work)
426 {
427 	int err;
428 	struct devfreq *devfreq = container_of(work,
429 					struct devfreq, work.work);
430 
431 	mutex_lock(&devfreq->lock);
432 	err = update_devfreq(devfreq);
433 	if (err)
434 		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
435 
436 	queue_delayed_work(devfreq_wq, &devfreq->work,
437 				msecs_to_jiffies(devfreq->profile->polling_ms));
438 	mutex_unlock(&devfreq->lock);
439 
440 	trace_devfreq_monitor(devfreq);
441 }
442 
443 /**
444  * devfreq_monitor_start() - Start load monitoring of devfreq instance
445  * @devfreq:	the devfreq instance.
446  *
447  * Helper function for starting devfreq device load monitoring. By
448  * default delayed work based monitoring is supported. Function
449  * to be called from governor in response to DEVFREQ_GOV_START
450  * event when device is added to devfreq framework.
451  */
452 void devfreq_monitor_start(struct devfreq *devfreq)
453 {
454 	if (devfreq->governor->interrupt_driven)
455 		return;
456 
457 	INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
458 	if (devfreq->profile->polling_ms)
459 		queue_delayed_work(devfreq_wq, &devfreq->work,
460 			msecs_to_jiffies(devfreq->profile->polling_ms));
461 }
462 EXPORT_SYMBOL(devfreq_monitor_start);
463 
464 /**
465  * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
466  * @devfreq:	the devfreq instance.
467  *
468  * Helper function to stop devfreq device load monitoring. Function
469  * to be called from governor in response to DEVFREQ_GOV_STOP
470  * event when device is removed from devfreq framework.
471  */
472 void devfreq_monitor_stop(struct devfreq *devfreq)
473 {
474 	if (devfreq->governor->interrupt_driven)
475 		return;
476 
477 	cancel_delayed_work_sync(&devfreq->work);
478 }
479 EXPORT_SYMBOL(devfreq_monitor_stop);
480 
481 /**
482  * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
483  * @devfreq:	the devfreq instance.
484  *
485  * Helper function to suspend devfreq device load monitoring. Function
486  * to be called from governor in response to DEVFREQ_GOV_SUSPEND
487  * event or when polling interval is set to zero.
488  *
489  * Note: Though this function is same as devfreq_monitor_stop(),
490  * intentionally kept separate to provide hooks for collecting
491  * transition statistics.
492  */
493 void devfreq_monitor_suspend(struct devfreq *devfreq)
494 {
495 	mutex_lock(&devfreq->lock);
496 	if (devfreq->stop_polling) {
497 		mutex_unlock(&devfreq->lock);
498 		return;
499 	}
500 
501 	devfreq_update_status(devfreq, devfreq->previous_freq);
502 	devfreq->stop_polling = true;
503 	mutex_unlock(&devfreq->lock);
504 
505 	if (devfreq->governor->interrupt_driven)
506 		return;
507 
508 	cancel_delayed_work_sync(&devfreq->work);
509 }
510 EXPORT_SYMBOL(devfreq_monitor_suspend);
511 
512 /**
513  * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
514  * @devfreq:    the devfreq instance.
515  *
516  * Helper function to resume devfreq device load monitoring. Function
517  * to be called from governor in response to DEVFREQ_GOV_RESUME
518  * event or when polling interval is set to non-zero.
519  */
520 void devfreq_monitor_resume(struct devfreq *devfreq)
521 {
522 	unsigned long freq;
523 
524 	mutex_lock(&devfreq->lock);
525 	if (!devfreq->stop_polling)
526 		goto out;
527 
528 	if (devfreq->governor->interrupt_driven)
529 		goto out_update;
530 
531 	if (!delayed_work_pending(&devfreq->work) &&
532 			devfreq->profile->polling_ms)
533 		queue_delayed_work(devfreq_wq, &devfreq->work,
534 			msecs_to_jiffies(devfreq->profile->polling_ms));
535 
536 out_update:
537 	devfreq->stats.last_update = get_jiffies_64();
538 	devfreq->stop_polling = false;
539 
540 	if (devfreq->profile->get_cur_freq &&
541 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
542 		devfreq->previous_freq = freq;
543 
544 out:
545 	mutex_unlock(&devfreq->lock);
546 }
547 EXPORT_SYMBOL(devfreq_monitor_resume);
548 
549 /**
550  * devfreq_update_interval() - Update device devfreq monitoring interval
551  * @devfreq:    the devfreq instance.
552  * @delay:      new polling interval to be set.
553  *
554  * Helper function to set new load monitoring polling interval. Function
555  * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
556  */
557 void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
558 {
559 	unsigned int cur_delay = devfreq->profile->polling_ms;
560 	unsigned int new_delay = *delay;
561 
562 	mutex_lock(&devfreq->lock);
563 	devfreq->profile->polling_ms = new_delay;
564 
565 	if (devfreq->stop_polling)
566 		goto out;
567 
568 	if (devfreq->governor->interrupt_driven)
569 		goto out;
570 
571 	/* if new delay is zero, stop polling */
572 	if (!new_delay) {
573 		mutex_unlock(&devfreq->lock);
574 		cancel_delayed_work_sync(&devfreq->work);
575 		return;
576 	}
577 
578 	/* if current delay is zero, start polling with new delay */
579 	if (!cur_delay) {
580 		queue_delayed_work(devfreq_wq, &devfreq->work,
581 			msecs_to_jiffies(devfreq->profile->polling_ms));
582 		goto out;
583 	}
584 
585 	/* if current delay is greater than new delay, restart polling */
586 	if (cur_delay > new_delay) {
587 		mutex_unlock(&devfreq->lock);
588 		cancel_delayed_work_sync(&devfreq->work);
589 		mutex_lock(&devfreq->lock);
590 		if (!devfreq->stop_polling)
591 			queue_delayed_work(devfreq_wq, &devfreq->work,
592 				msecs_to_jiffies(devfreq->profile->polling_ms));
593 	}
594 out:
595 	mutex_unlock(&devfreq->lock);
596 }
597 EXPORT_SYMBOL(devfreq_update_interval);
598 
599 /**
600  * devfreq_notifier_call() - Notify that the device frequency requirements
601  *			     has been changed out of devfreq framework.
602  * @nb:		the notifier_block (supposed to be devfreq->nb)
603  * @type:	not used
604  * @devp:	not used
605  *
606  * Called by a notifier that uses devfreq->nb.
607  */
608 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
609 				 void *devp)
610 {
611 	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
612 	int err = -EINVAL;
613 
614 	mutex_lock(&devfreq->lock);
615 
616 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
617 	if (!devfreq->scaling_min_freq)
618 		goto out;
619 
620 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
621 	if (!devfreq->scaling_max_freq) {
622 		devfreq->scaling_max_freq = ULONG_MAX;
623 		goto out;
624 	}
625 
626 	err = update_devfreq(devfreq);
627 
628 out:
629 	mutex_unlock(&devfreq->lock);
630 	if (err)
631 		dev_err(devfreq->dev.parent,
632 			"failed to update frequency from OPP notifier (%d)\n",
633 			err);
634 
635 	return NOTIFY_OK;
636 }
637 
638 /**
639  * qos_notifier_call() - Common handler for QoS constraints.
640  * @devfreq:    the devfreq instance.
641  */
642 static int qos_notifier_call(struct devfreq *devfreq)
643 {
644 	int err;
645 
646 	mutex_lock(&devfreq->lock);
647 	err = update_devfreq(devfreq);
648 	mutex_unlock(&devfreq->lock);
649 	if (err)
650 		dev_err(devfreq->dev.parent,
651 			"failed to update frequency from PM QoS (%d)\n",
652 			err);
653 
654 	return NOTIFY_OK;
655 }
656 
657 /**
658  * qos_min_notifier_call() - Callback for QoS min_freq changes.
659  * @nb:		Should be devfreq->nb_min
660  */
661 static int qos_min_notifier_call(struct notifier_block *nb,
662 					 unsigned long val, void *ptr)
663 {
664 	return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
665 }
666 
667 /**
668  * qos_max_notifier_call() - Callback for QoS max_freq changes.
669  * @nb:		Should be devfreq->nb_max
670  */
671 static int qos_max_notifier_call(struct notifier_block *nb,
672 					 unsigned long val, void *ptr)
673 {
674 	return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
675 }
676 
677 /**
678  * devfreq_dev_release() - Callback for struct device to release the device.
679  * @dev:	the devfreq device
680  *
681  * Remove devfreq from the list and release its resources.
682  */
683 static void devfreq_dev_release(struct device *dev)
684 {
685 	struct devfreq *devfreq = to_devfreq(dev);
686 	int err;
687 
688 	mutex_lock(&devfreq_list_lock);
689 	list_del(&devfreq->node);
690 	mutex_unlock(&devfreq_list_lock);
691 
692 	err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
693 					 DEV_PM_QOS_MAX_FREQUENCY);
694 	if (err && err != -ENOENT)
695 		dev_warn(dev->parent,
696 			"Failed to remove max_freq notifier: %d\n", err);
697 	err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
698 					 DEV_PM_QOS_MIN_FREQUENCY);
699 	if (err && err != -ENOENT)
700 		dev_warn(dev->parent,
701 			"Failed to remove min_freq notifier: %d\n", err);
702 
703 	if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
704 		err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
705 		if (err < 0)
706 			dev_warn(dev->parent,
707 				"Failed to remove max_freq request: %d\n", err);
708 	}
709 	if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
710 		err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
711 		if (err < 0)
712 			dev_warn(dev->parent,
713 				"Failed to remove min_freq request: %d\n", err);
714 	}
715 
716 	if (devfreq->profile->exit)
717 		devfreq->profile->exit(devfreq->dev.parent);
718 
719 	mutex_destroy(&devfreq->lock);
720 	kfree(devfreq);
721 }
722 
723 /**
724  * devfreq_add_device() - Add devfreq feature to the device
725  * @dev:	the device to add devfreq feature.
726  * @profile:	device-specific profile to run devfreq.
727  * @governor_name:	name of the policy to choose frequency.
728  * @data:	private data for the governor. The devfreq framework does not
729  *		touch this value.
730  */
731 struct devfreq *devfreq_add_device(struct device *dev,
732 				   struct devfreq_dev_profile *profile,
733 				   const char *governor_name,
734 				   void *data)
735 {
736 	struct devfreq *devfreq;
737 	struct devfreq_governor *governor;
738 	int err = 0;
739 
740 	if (!dev || !profile || !governor_name) {
741 		dev_err(dev, "%s: Invalid parameters.\n", __func__);
742 		return ERR_PTR(-EINVAL);
743 	}
744 
745 	mutex_lock(&devfreq_list_lock);
746 	devfreq = find_device_devfreq(dev);
747 	mutex_unlock(&devfreq_list_lock);
748 	if (!IS_ERR(devfreq)) {
749 		dev_err(dev, "%s: devfreq device already exists!\n",
750 			__func__);
751 		err = -EINVAL;
752 		goto err_out;
753 	}
754 
755 	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
756 	if (!devfreq) {
757 		err = -ENOMEM;
758 		goto err_out;
759 	}
760 
761 	mutex_init(&devfreq->lock);
762 	mutex_lock(&devfreq->lock);
763 	devfreq->dev.parent = dev;
764 	devfreq->dev.class = devfreq_class;
765 	devfreq->dev.release = devfreq_dev_release;
766 	INIT_LIST_HEAD(&devfreq->node);
767 	devfreq->profile = profile;
768 	strscpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
769 	devfreq->previous_freq = profile->initial_freq;
770 	devfreq->last_status.current_frequency = profile->initial_freq;
771 	devfreq->data = data;
772 	devfreq->nb.notifier_call = devfreq_notifier_call;
773 
774 	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
775 		mutex_unlock(&devfreq->lock);
776 		err = set_freq_table(devfreq);
777 		if (err < 0)
778 			goto err_dev;
779 		mutex_lock(&devfreq->lock);
780 	}
781 
782 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
783 	if (!devfreq->scaling_min_freq) {
784 		mutex_unlock(&devfreq->lock);
785 		err = -EINVAL;
786 		goto err_dev;
787 	}
788 
789 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
790 	if (!devfreq->scaling_max_freq) {
791 		mutex_unlock(&devfreq->lock);
792 		err = -EINVAL;
793 		goto err_dev;
794 	}
795 
796 	devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
797 	atomic_set(&devfreq->suspend_count, 0);
798 
799 	dev_set_name(&devfreq->dev, "%s", dev_name(dev));
800 	err = device_register(&devfreq->dev);
801 	if (err) {
802 		mutex_unlock(&devfreq->lock);
803 		put_device(&devfreq->dev);
804 		goto err_out;
805 	}
806 
807 	devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
808 			array3_size(sizeof(unsigned int),
809 				    devfreq->profile->max_state,
810 				    devfreq->profile->max_state),
811 			GFP_KERNEL);
812 	if (!devfreq->stats.trans_table) {
813 		mutex_unlock(&devfreq->lock);
814 		err = -ENOMEM;
815 		goto err_devfreq;
816 	}
817 
818 	devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
819 			devfreq->profile->max_state,
820 			sizeof(*devfreq->stats.time_in_state),
821 			GFP_KERNEL);
822 	if (!devfreq->stats.time_in_state) {
823 		mutex_unlock(&devfreq->lock);
824 		err = -ENOMEM;
825 		goto err_devfreq;
826 	}
827 
828 	devfreq->stats.total_trans = 0;
829 	devfreq->stats.last_update = get_jiffies_64();
830 
831 	srcu_init_notifier_head(&devfreq->transition_notifier_list);
832 
833 	mutex_unlock(&devfreq->lock);
834 
835 	err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
836 				     DEV_PM_QOS_MIN_FREQUENCY, 0);
837 	if (err < 0)
838 		goto err_devfreq;
839 	err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
840 				     DEV_PM_QOS_MAX_FREQUENCY,
841 				     PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
842 	if (err < 0)
843 		goto err_devfreq;
844 
845 	devfreq->nb_min.notifier_call = qos_min_notifier_call;
846 	err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
847 				      DEV_PM_QOS_MIN_FREQUENCY);
848 	if (err)
849 		goto err_devfreq;
850 
851 	devfreq->nb_max.notifier_call = qos_max_notifier_call;
852 	err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
853 				      DEV_PM_QOS_MAX_FREQUENCY);
854 	if (err)
855 		goto err_devfreq;
856 
857 	mutex_lock(&devfreq_list_lock);
858 
859 	governor = try_then_request_governor(devfreq->governor_name);
860 	if (IS_ERR(governor)) {
861 		dev_err(dev, "%s: Unable to find governor for the device\n",
862 			__func__);
863 		err = PTR_ERR(governor);
864 		goto err_init;
865 	}
866 
867 	devfreq->governor = governor;
868 	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
869 						NULL);
870 	if (err) {
871 		dev_err(dev, "%s: Unable to start governor for the device\n",
872 			__func__);
873 		goto err_init;
874 	}
875 
876 	list_add(&devfreq->node, &devfreq_list);
877 
878 	mutex_unlock(&devfreq_list_lock);
879 
880 	return devfreq;
881 
882 err_init:
883 	mutex_unlock(&devfreq_list_lock);
884 err_devfreq:
885 	devfreq_remove_device(devfreq);
886 	devfreq = NULL;
887 err_dev:
888 	kfree(devfreq);
889 err_out:
890 	return ERR_PTR(err);
891 }
892 EXPORT_SYMBOL(devfreq_add_device);
893 
894 /**
895  * devfreq_remove_device() - Remove devfreq feature from a device.
896  * @devfreq:	the devfreq instance to be removed
897  *
898  * The opposite of devfreq_add_device().
899  */
900 int devfreq_remove_device(struct devfreq *devfreq)
901 {
902 	if (!devfreq)
903 		return -EINVAL;
904 
905 	if (devfreq->governor)
906 		devfreq->governor->event_handler(devfreq,
907 						 DEVFREQ_GOV_STOP, NULL);
908 	device_unregister(&devfreq->dev);
909 
910 	return 0;
911 }
912 EXPORT_SYMBOL(devfreq_remove_device);
913 
914 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
915 {
916 	struct devfreq **r = res;
917 
918 	if (WARN_ON(!r || !*r))
919 		return 0;
920 
921 	return *r == data;
922 }
923 
924 static void devm_devfreq_dev_release(struct device *dev, void *res)
925 {
926 	devfreq_remove_device(*(struct devfreq **)res);
927 }
928 
929 /**
930  * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
931  * @dev:	the device to add devfreq feature.
932  * @profile:	device-specific profile to run devfreq.
933  * @governor_name:	name of the policy to choose frequency.
934  * @data:	private data for the governor. The devfreq framework does not
935  *		touch this value.
936  *
937  * This function manages automatically the memory of devfreq device using device
938  * resource management and simplify the free operation for memory of devfreq
939  * device.
940  */
941 struct devfreq *devm_devfreq_add_device(struct device *dev,
942 					struct devfreq_dev_profile *profile,
943 					const char *governor_name,
944 					void *data)
945 {
946 	struct devfreq **ptr, *devfreq;
947 
948 	ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
949 	if (!ptr)
950 		return ERR_PTR(-ENOMEM);
951 
952 	devfreq = devfreq_add_device(dev, profile, governor_name, data);
953 	if (IS_ERR(devfreq)) {
954 		devres_free(ptr);
955 		return devfreq;
956 	}
957 
958 	*ptr = devfreq;
959 	devres_add(dev, ptr);
960 
961 	return devfreq;
962 }
963 EXPORT_SYMBOL(devm_devfreq_add_device);
964 
965 #ifdef CONFIG_OF
966 /*
967  * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
968  * @dev - instance to the given device
969  * @index - index into list of devfreq
970  *
971  * return the instance of devfreq device
972  */
973 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
974 {
975 	struct device_node *node;
976 	struct devfreq *devfreq;
977 
978 	if (!dev)
979 		return ERR_PTR(-EINVAL);
980 
981 	if (!dev->of_node)
982 		return ERR_PTR(-EINVAL);
983 
984 	node = of_parse_phandle(dev->of_node, "devfreq", index);
985 	if (!node)
986 		return ERR_PTR(-ENODEV);
987 
988 	mutex_lock(&devfreq_list_lock);
989 	list_for_each_entry(devfreq, &devfreq_list, node) {
990 		if (devfreq->dev.parent
991 			&& devfreq->dev.parent->of_node == node) {
992 			mutex_unlock(&devfreq_list_lock);
993 			of_node_put(node);
994 			return devfreq;
995 		}
996 	}
997 	mutex_unlock(&devfreq_list_lock);
998 	of_node_put(node);
999 
1000 	return ERR_PTR(-EPROBE_DEFER);
1001 }
1002 #else
1003 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
1004 {
1005 	return ERR_PTR(-ENODEV);
1006 }
1007 #endif /* CONFIG_OF */
1008 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
1009 
1010 /**
1011  * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
1012  * @dev:	the device from which to remove devfreq feature.
1013  * @devfreq:	the devfreq instance to be removed
1014  */
1015 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
1016 {
1017 	WARN_ON(devres_release(dev, devm_devfreq_dev_release,
1018 			       devm_devfreq_dev_match, devfreq));
1019 }
1020 EXPORT_SYMBOL(devm_devfreq_remove_device);
1021 
1022 /**
1023  * devfreq_suspend_device() - Suspend devfreq of a device.
1024  * @devfreq: the devfreq instance to be suspended
1025  *
1026  * This function is intended to be called by the pm callbacks
1027  * (e.g., runtime_suspend, suspend) of the device driver that
1028  * holds the devfreq.
1029  */
1030 int devfreq_suspend_device(struct devfreq *devfreq)
1031 {
1032 	int ret;
1033 
1034 	if (!devfreq)
1035 		return -EINVAL;
1036 
1037 	if (atomic_inc_return(&devfreq->suspend_count) > 1)
1038 		return 0;
1039 
1040 	if (devfreq->governor) {
1041 		ret = devfreq->governor->event_handler(devfreq,
1042 					DEVFREQ_GOV_SUSPEND, NULL);
1043 		if (ret)
1044 			return ret;
1045 	}
1046 
1047 	if (devfreq->suspend_freq) {
1048 		mutex_lock(&devfreq->lock);
1049 		ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
1050 		mutex_unlock(&devfreq->lock);
1051 		if (ret)
1052 			return ret;
1053 	}
1054 
1055 	return 0;
1056 }
1057 EXPORT_SYMBOL(devfreq_suspend_device);
1058 
1059 /**
1060  * devfreq_resume_device() - Resume devfreq of a device.
1061  * @devfreq: the devfreq instance to be resumed
1062  *
1063  * This function is intended to be called by the pm callbacks
1064  * (e.g., runtime_resume, resume) of the device driver that
1065  * holds the devfreq.
1066  */
1067 int devfreq_resume_device(struct devfreq *devfreq)
1068 {
1069 	int ret;
1070 
1071 	if (!devfreq)
1072 		return -EINVAL;
1073 
1074 	if (atomic_dec_return(&devfreq->suspend_count) >= 1)
1075 		return 0;
1076 
1077 	if (devfreq->resume_freq) {
1078 		mutex_lock(&devfreq->lock);
1079 		ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
1080 		mutex_unlock(&devfreq->lock);
1081 		if (ret)
1082 			return ret;
1083 	}
1084 
1085 	if (devfreq->governor) {
1086 		ret = devfreq->governor->event_handler(devfreq,
1087 					DEVFREQ_GOV_RESUME, NULL);
1088 		if (ret)
1089 			return ret;
1090 	}
1091 
1092 	return 0;
1093 }
1094 EXPORT_SYMBOL(devfreq_resume_device);
1095 
1096 /**
1097  * devfreq_suspend() - Suspend devfreq governors and devices
1098  *
1099  * Called during system wide Suspend/Hibernate cycles for suspending governors
1100  * and devices preserving the state for resume. On some platforms the devfreq
1101  * device must have precise state (frequency) after resume in order to provide
1102  * fully operating setup.
1103  */
1104 void devfreq_suspend(void)
1105 {
1106 	struct devfreq *devfreq;
1107 	int ret;
1108 
1109 	mutex_lock(&devfreq_list_lock);
1110 	list_for_each_entry(devfreq, &devfreq_list, node) {
1111 		ret = devfreq_suspend_device(devfreq);
1112 		if (ret)
1113 			dev_err(&devfreq->dev,
1114 				"failed to suspend devfreq device\n");
1115 	}
1116 	mutex_unlock(&devfreq_list_lock);
1117 }
1118 
1119 /**
1120  * devfreq_resume() - Resume devfreq governors and devices
1121  *
1122  * Called during system wide Suspend/Hibernate cycle for resuming governors and
1123  * devices that are suspended with devfreq_suspend().
1124  */
1125 void devfreq_resume(void)
1126 {
1127 	struct devfreq *devfreq;
1128 	int ret;
1129 
1130 	mutex_lock(&devfreq_list_lock);
1131 	list_for_each_entry(devfreq, &devfreq_list, node) {
1132 		ret = devfreq_resume_device(devfreq);
1133 		if (ret)
1134 			dev_warn(&devfreq->dev,
1135 				 "failed to resume devfreq device\n");
1136 	}
1137 	mutex_unlock(&devfreq_list_lock);
1138 }
1139 
1140 /**
1141  * devfreq_add_governor() - Add devfreq governor
1142  * @governor:	the devfreq governor to be added
1143  */
1144 int devfreq_add_governor(struct devfreq_governor *governor)
1145 {
1146 	struct devfreq_governor *g;
1147 	struct devfreq *devfreq;
1148 	int err = 0;
1149 
1150 	if (!governor) {
1151 		pr_err("%s: Invalid parameters.\n", __func__);
1152 		return -EINVAL;
1153 	}
1154 
1155 	mutex_lock(&devfreq_list_lock);
1156 	g = find_devfreq_governor(governor->name);
1157 	if (!IS_ERR(g)) {
1158 		pr_err("%s: governor %s already registered\n", __func__,
1159 		       g->name);
1160 		err = -EINVAL;
1161 		goto err_out;
1162 	}
1163 
1164 	list_add(&governor->node, &devfreq_governor_list);
1165 
1166 	list_for_each_entry(devfreq, &devfreq_list, node) {
1167 		int ret = 0;
1168 		struct device *dev = devfreq->dev.parent;
1169 
1170 		if (!strncmp(devfreq->governor_name, governor->name,
1171 			     DEVFREQ_NAME_LEN)) {
1172 			/* The following should never occur */
1173 			if (devfreq->governor) {
1174 				dev_warn(dev,
1175 					 "%s: Governor %s already present\n",
1176 					 __func__, devfreq->governor->name);
1177 				ret = devfreq->governor->event_handler(devfreq,
1178 							DEVFREQ_GOV_STOP, NULL);
1179 				if (ret) {
1180 					dev_warn(dev,
1181 						 "%s: Governor %s stop = %d\n",
1182 						 __func__,
1183 						 devfreq->governor->name, ret);
1184 				}
1185 				/* Fall through */
1186 			}
1187 			devfreq->governor = governor;
1188 			ret = devfreq->governor->event_handler(devfreq,
1189 						DEVFREQ_GOV_START, NULL);
1190 			if (ret) {
1191 				dev_warn(dev, "%s: Governor %s start=%d\n",
1192 					 __func__, devfreq->governor->name,
1193 					 ret);
1194 			}
1195 		}
1196 	}
1197 
1198 err_out:
1199 	mutex_unlock(&devfreq_list_lock);
1200 
1201 	return err;
1202 }
1203 EXPORT_SYMBOL(devfreq_add_governor);
1204 
1205 /**
1206  * devfreq_remove_governor() - Remove devfreq feature from a device.
1207  * @governor:	the devfreq governor to be removed
1208  */
1209 int devfreq_remove_governor(struct devfreq_governor *governor)
1210 {
1211 	struct devfreq_governor *g;
1212 	struct devfreq *devfreq;
1213 	int err = 0;
1214 
1215 	if (!governor) {
1216 		pr_err("%s: Invalid parameters.\n", __func__);
1217 		return -EINVAL;
1218 	}
1219 
1220 	mutex_lock(&devfreq_list_lock);
1221 	g = find_devfreq_governor(governor->name);
1222 	if (IS_ERR(g)) {
1223 		pr_err("%s: governor %s not registered\n", __func__,
1224 		       governor->name);
1225 		err = PTR_ERR(g);
1226 		goto err_out;
1227 	}
1228 	list_for_each_entry(devfreq, &devfreq_list, node) {
1229 		int ret;
1230 		struct device *dev = devfreq->dev.parent;
1231 
1232 		if (!strncmp(devfreq->governor_name, governor->name,
1233 			     DEVFREQ_NAME_LEN)) {
1234 			/* we should have a devfreq governor! */
1235 			if (!devfreq->governor) {
1236 				dev_warn(dev, "%s: Governor %s NOT present\n",
1237 					 __func__, governor->name);
1238 				continue;
1239 				/* Fall through */
1240 			}
1241 			ret = devfreq->governor->event_handler(devfreq,
1242 						DEVFREQ_GOV_STOP, NULL);
1243 			if (ret) {
1244 				dev_warn(dev, "%s: Governor %s stop=%d\n",
1245 					 __func__, devfreq->governor->name,
1246 					 ret);
1247 			}
1248 			devfreq->governor = NULL;
1249 		}
1250 	}
1251 
1252 	list_del(&governor->node);
1253 err_out:
1254 	mutex_unlock(&devfreq_list_lock);
1255 
1256 	return err;
1257 }
1258 EXPORT_SYMBOL(devfreq_remove_governor);
1259 
1260 static ssize_t name_show(struct device *dev,
1261 			struct device_attribute *attr, char *buf)
1262 {
1263 	struct devfreq *devfreq = to_devfreq(dev);
1264 	return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
1265 }
1266 static DEVICE_ATTR_RO(name);
1267 
1268 static ssize_t governor_show(struct device *dev,
1269 			     struct device_attribute *attr, char *buf)
1270 {
1271 	if (!to_devfreq(dev)->governor)
1272 		return -EINVAL;
1273 
1274 	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1275 }
1276 
1277 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1278 			      const char *buf, size_t count)
1279 {
1280 	struct devfreq *df = to_devfreq(dev);
1281 	int ret;
1282 	char str_governor[DEVFREQ_NAME_LEN + 1];
1283 	const struct devfreq_governor *governor, *prev_governor;
1284 
1285 	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1286 	if (ret != 1)
1287 		return -EINVAL;
1288 
1289 	mutex_lock(&devfreq_list_lock);
1290 	governor = try_then_request_governor(str_governor);
1291 	if (IS_ERR(governor)) {
1292 		ret = PTR_ERR(governor);
1293 		goto out;
1294 	}
1295 	if (df->governor == governor) {
1296 		ret = 0;
1297 		goto out;
1298 	} else if ((df->governor && df->governor->immutable) ||
1299 					governor->immutable) {
1300 		ret = -EINVAL;
1301 		goto out;
1302 	}
1303 
1304 	if (df->governor) {
1305 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1306 		if (ret) {
1307 			dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1308 				 __func__, df->governor->name, ret);
1309 			goto out;
1310 		}
1311 	}
1312 	prev_governor = df->governor;
1313 	df->governor = governor;
1314 	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1315 	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1316 	if (ret) {
1317 		dev_warn(dev, "%s: Governor %s not started(%d)\n",
1318 			 __func__, df->governor->name, ret);
1319 		df->governor = prev_governor;
1320 		strncpy(df->governor_name, prev_governor->name,
1321 			DEVFREQ_NAME_LEN);
1322 		ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1323 		if (ret) {
1324 			dev_err(dev,
1325 				"%s: reverting to Governor %s failed (%d)\n",
1326 				__func__, df->governor_name, ret);
1327 			df->governor = NULL;
1328 		}
1329 	}
1330 out:
1331 	mutex_unlock(&devfreq_list_lock);
1332 
1333 	if (!ret)
1334 		ret = count;
1335 	return ret;
1336 }
1337 static DEVICE_ATTR_RW(governor);
1338 
1339 static ssize_t available_governors_show(struct device *d,
1340 					struct device_attribute *attr,
1341 					char *buf)
1342 {
1343 	struct devfreq *df = to_devfreq(d);
1344 	ssize_t count = 0;
1345 
1346 	mutex_lock(&devfreq_list_lock);
1347 
1348 	/*
1349 	 * The devfreq with immutable governor (e.g., passive) shows
1350 	 * only own governor.
1351 	 */
1352 	if (df->governor && df->governor->immutable) {
1353 		count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1354 				  "%s ", df->governor_name);
1355 	/*
1356 	 * The devfreq device shows the registered governor except for
1357 	 * immutable governors such as passive governor .
1358 	 */
1359 	} else {
1360 		struct devfreq_governor *governor;
1361 
1362 		list_for_each_entry(governor, &devfreq_governor_list, node) {
1363 			if (governor->immutable)
1364 				continue;
1365 			count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1366 					   "%s ", governor->name);
1367 		}
1368 	}
1369 
1370 	mutex_unlock(&devfreq_list_lock);
1371 
1372 	/* Truncate the trailing space */
1373 	if (count)
1374 		count--;
1375 
1376 	count += sprintf(&buf[count], "\n");
1377 
1378 	return count;
1379 }
1380 static DEVICE_ATTR_RO(available_governors);
1381 
1382 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1383 			     char *buf)
1384 {
1385 	unsigned long freq;
1386 	struct devfreq *devfreq = to_devfreq(dev);
1387 
1388 	if (devfreq->profile->get_cur_freq &&
1389 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1390 		return sprintf(buf, "%lu\n", freq);
1391 
1392 	return sprintf(buf, "%lu\n", devfreq->previous_freq);
1393 }
1394 static DEVICE_ATTR_RO(cur_freq);
1395 
1396 static ssize_t target_freq_show(struct device *dev,
1397 				struct device_attribute *attr, char *buf)
1398 {
1399 	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1400 }
1401 static DEVICE_ATTR_RO(target_freq);
1402 
1403 static ssize_t polling_interval_show(struct device *dev,
1404 				     struct device_attribute *attr, char *buf)
1405 {
1406 	return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1407 }
1408 
1409 static ssize_t polling_interval_store(struct device *dev,
1410 				      struct device_attribute *attr,
1411 				      const char *buf, size_t count)
1412 {
1413 	struct devfreq *df = to_devfreq(dev);
1414 	unsigned int value;
1415 	int ret;
1416 
1417 	if (!df->governor)
1418 		return -EINVAL;
1419 
1420 	ret = sscanf(buf, "%u", &value);
1421 	if (ret != 1)
1422 		return -EINVAL;
1423 
1424 	df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
1425 	ret = count;
1426 
1427 	return ret;
1428 }
1429 static DEVICE_ATTR_RW(polling_interval);
1430 
1431 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1432 			      const char *buf, size_t count)
1433 {
1434 	struct devfreq *df = to_devfreq(dev);
1435 	unsigned long value;
1436 	int ret;
1437 
1438 	/*
1439 	 * Protect against theoretical sysfs writes between
1440 	 * device_add and dev_pm_qos_add_request
1441 	 */
1442 	if (!dev_pm_qos_request_active(&df->user_min_freq_req))
1443 		return -EAGAIN;
1444 
1445 	ret = sscanf(buf, "%lu", &value);
1446 	if (ret != 1)
1447 		return -EINVAL;
1448 
1449 	/* Round down to kHz for PM QoS */
1450 	ret = dev_pm_qos_update_request(&df->user_min_freq_req,
1451 					value / HZ_PER_KHZ);
1452 	if (ret < 0)
1453 		return ret;
1454 
1455 	return count;
1456 }
1457 
1458 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1459 			     char *buf)
1460 {
1461 	struct devfreq *df = to_devfreq(dev);
1462 	unsigned long min_freq, max_freq;
1463 
1464 	mutex_lock(&df->lock);
1465 	get_freq_range(df, &min_freq, &max_freq);
1466 	mutex_unlock(&df->lock);
1467 
1468 	return sprintf(buf, "%lu\n", min_freq);
1469 }
1470 static DEVICE_ATTR_RW(min_freq);
1471 
1472 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1473 			      const char *buf, size_t count)
1474 {
1475 	struct devfreq *df = to_devfreq(dev);
1476 	unsigned long value;
1477 	int ret;
1478 
1479 	/*
1480 	 * Protect against theoretical sysfs writes between
1481 	 * device_add and dev_pm_qos_add_request
1482 	 */
1483 	if (!dev_pm_qos_request_active(&df->user_max_freq_req))
1484 		return -EINVAL;
1485 
1486 	ret = sscanf(buf, "%lu", &value);
1487 	if (ret != 1)
1488 		return -EINVAL;
1489 
1490 	/*
1491 	 * PM QoS frequencies are in kHz so we need to convert. Convert by
1492 	 * rounding upwards so that the acceptable interval never shrinks.
1493 	 *
1494 	 * For example if the user writes "666666666" to sysfs this value will
1495 	 * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1496 	 * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1497 	 *
1498 	 * A value of zero means "no limit".
1499 	 */
1500 	if (value)
1501 		value = DIV_ROUND_UP(value, HZ_PER_KHZ);
1502 	else
1503 		value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
1504 
1505 	ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
1506 	if (ret < 0)
1507 		return ret;
1508 
1509 	return count;
1510 }
1511 
1512 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1513 			     char *buf)
1514 {
1515 	struct devfreq *df = to_devfreq(dev);
1516 	unsigned long min_freq, max_freq;
1517 
1518 	mutex_lock(&df->lock);
1519 	get_freq_range(df, &min_freq, &max_freq);
1520 	mutex_unlock(&df->lock);
1521 
1522 	return sprintf(buf, "%lu\n", max_freq);
1523 }
1524 static DEVICE_ATTR_RW(max_freq);
1525 
1526 static ssize_t available_frequencies_show(struct device *d,
1527 					  struct device_attribute *attr,
1528 					  char *buf)
1529 {
1530 	struct devfreq *df = to_devfreq(d);
1531 	ssize_t count = 0;
1532 	int i;
1533 
1534 	mutex_lock(&df->lock);
1535 
1536 	for (i = 0; i < df->profile->max_state; i++)
1537 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1538 				"%lu ", df->profile->freq_table[i]);
1539 
1540 	mutex_unlock(&df->lock);
1541 	/* Truncate the trailing space */
1542 	if (count)
1543 		count--;
1544 
1545 	count += sprintf(&buf[count], "\n");
1546 
1547 	return count;
1548 }
1549 static DEVICE_ATTR_RO(available_frequencies);
1550 
1551 static ssize_t trans_stat_show(struct device *dev,
1552 			       struct device_attribute *attr, char *buf)
1553 {
1554 	struct devfreq *devfreq = to_devfreq(dev);
1555 	ssize_t len;
1556 	int i, j;
1557 	unsigned int max_state = devfreq->profile->max_state;
1558 
1559 	if (max_state == 0)
1560 		return sprintf(buf, "Not Supported.\n");
1561 
1562 	mutex_lock(&devfreq->lock);
1563 	if (!devfreq->stop_polling &&
1564 			devfreq_update_status(devfreq, devfreq->previous_freq)) {
1565 		mutex_unlock(&devfreq->lock);
1566 		return 0;
1567 	}
1568 	mutex_unlock(&devfreq->lock);
1569 
1570 	len = sprintf(buf, "     From  :   To\n");
1571 	len += sprintf(buf + len, "           :");
1572 	for (i = 0; i < max_state; i++)
1573 		len += sprintf(buf + len, "%10lu",
1574 				devfreq->profile->freq_table[i]);
1575 
1576 	len += sprintf(buf + len, "   time(ms)\n");
1577 
1578 	for (i = 0; i < max_state; i++) {
1579 		if (devfreq->profile->freq_table[i]
1580 					== devfreq->previous_freq) {
1581 			len += sprintf(buf + len, "*");
1582 		} else {
1583 			len += sprintf(buf + len, " ");
1584 		}
1585 		len += sprintf(buf + len, "%10lu:",
1586 				devfreq->profile->freq_table[i]);
1587 		for (j = 0; j < max_state; j++)
1588 			len += sprintf(buf + len, "%10u",
1589 				devfreq->stats.trans_table[(i * max_state) + j]);
1590 
1591 		len += sprintf(buf + len, "%10llu\n", (u64)
1592 			jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
1593 	}
1594 
1595 	len += sprintf(buf + len, "Total transition : %u\n",
1596 					devfreq->stats.total_trans);
1597 	return len;
1598 }
1599 
1600 static ssize_t trans_stat_store(struct device *dev,
1601 				struct device_attribute *attr,
1602 				const char *buf, size_t count)
1603 {
1604 	struct devfreq *df = to_devfreq(dev);
1605 	int err, value;
1606 
1607 	if (df->profile->max_state == 0)
1608 		return count;
1609 
1610 	err = kstrtoint(buf, 10, &value);
1611 	if (err || value != 0)
1612 		return -EINVAL;
1613 
1614 	mutex_lock(&df->lock);
1615 	memset(df->stats.time_in_state, 0, (df->profile->max_state *
1616 					sizeof(*df->stats.time_in_state)));
1617 	memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
1618 					df->profile->max_state,
1619 					df->profile->max_state));
1620 	df->stats.total_trans = 0;
1621 	df->stats.last_update = get_jiffies_64();
1622 	mutex_unlock(&df->lock);
1623 
1624 	return count;
1625 }
1626 static DEVICE_ATTR_RW(trans_stat);
1627 
1628 static struct attribute *devfreq_attrs[] = {
1629 	&dev_attr_name.attr,
1630 	&dev_attr_governor.attr,
1631 	&dev_attr_available_governors.attr,
1632 	&dev_attr_cur_freq.attr,
1633 	&dev_attr_available_frequencies.attr,
1634 	&dev_attr_target_freq.attr,
1635 	&dev_attr_polling_interval.attr,
1636 	&dev_attr_min_freq.attr,
1637 	&dev_attr_max_freq.attr,
1638 	&dev_attr_trans_stat.attr,
1639 	NULL,
1640 };
1641 ATTRIBUTE_GROUPS(devfreq);
1642 
1643 /**
1644  * devfreq_summary_show() - Show the summary of the devfreq devices
1645  * @s:		seq_file instance to show the summary of devfreq devices
1646  * @data:	not used
1647  *
1648  * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1649  * It helps that user can know the detailed information of the devfreq devices.
1650  *
1651  * Return 0 always because it shows the information without any data change.
1652  */
1653 static int devfreq_summary_show(struct seq_file *s, void *data)
1654 {
1655 	struct devfreq *devfreq;
1656 	struct devfreq *p_devfreq = NULL;
1657 	unsigned long cur_freq, min_freq, max_freq;
1658 	unsigned int polling_ms;
1659 
1660 	seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
1661 			"dev_name",
1662 			"dev",
1663 			"parent_dev",
1664 			"governor",
1665 			"polling_ms",
1666 			"cur_freq_Hz",
1667 			"min_freq_Hz",
1668 			"max_freq_Hz");
1669 	seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
1670 			"------------------------------",
1671 			"----------",
1672 			"----------",
1673 			"---------------",
1674 			"----------",
1675 			"------------",
1676 			"------------",
1677 			"------------");
1678 
1679 	mutex_lock(&devfreq_list_lock);
1680 
1681 	list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
1682 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1683 		if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
1684 							DEVFREQ_NAME_LEN)) {
1685 			struct devfreq_passive_data *data = devfreq->data;
1686 
1687 			if (data)
1688 				p_devfreq = data->parent;
1689 		} else {
1690 			p_devfreq = NULL;
1691 		}
1692 #endif
1693 
1694 		mutex_lock(&devfreq->lock);
1695 		cur_freq = devfreq->previous_freq,
1696 		get_freq_range(devfreq, &min_freq, &max_freq);
1697 		polling_ms = devfreq->profile->polling_ms,
1698 		mutex_unlock(&devfreq->lock);
1699 
1700 		seq_printf(s,
1701 			"%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
1702 			dev_name(devfreq->dev.parent),
1703 			dev_name(&devfreq->dev),
1704 			p_devfreq ? dev_name(&p_devfreq->dev) : "null",
1705 			devfreq->governor_name,
1706 			polling_ms,
1707 			cur_freq,
1708 			min_freq,
1709 			max_freq);
1710 	}
1711 
1712 	mutex_unlock(&devfreq_list_lock);
1713 
1714 	return 0;
1715 }
1716 DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
1717 
1718 static int __init devfreq_init(void)
1719 {
1720 	devfreq_class = class_create(THIS_MODULE, "devfreq");
1721 	if (IS_ERR(devfreq_class)) {
1722 		pr_err("%s: couldn't create class\n", __FILE__);
1723 		return PTR_ERR(devfreq_class);
1724 	}
1725 
1726 	devfreq_wq = create_freezable_workqueue("devfreq_wq");
1727 	if (!devfreq_wq) {
1728 		class_destroy(devfreq_class);
1729 		pr_err("%s: couldn't create workqueue\n", __FILE__);
1730 		return -ENOMEM;
1731 	}
1732 	devfreq_class->dev_groups = devfreq_groups;
1733 
1734 	devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
1735 	debugfs_create_file("devfreq_summary", 0444,
1736 				devfreq_debugfs, NULL,
1737 				&devfreq_summary_fops);
1738 
1739 	return 0;
1740 }
1741 subsys_initcall(devfreq_init);
1742 
1743 /*
1744  * The following are helper functions for devfreq user device drivers with
1745  * OPP framework.
1746  */
1747 
1748 /**
1749  * devfreq_recommended_opp() - Helper function to get proper OPP for the
1750  *			     freq value given to target callback.
1751  * @dev:	The devfreq user device. (parent of devfreq)
1752  * @freq:	The frequency given to target function
1753  * @flags:	Flags handed from devfreq framework.
1754  *
1755  * The callers are required to call dev_pm_opp_put() for the returned OPP after
1756  * use.
1757  */
1758 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1759 					   unsigned long *freq,
1760 					   u32 flags)
1761 {
1762 	struct dev_pm_opp *opp;
1763 
1764 	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1765 		/* The freq is an upper bound. opp should be lower */
1766 		opp = dev_pm_opp_find_freq_floor(dev, freq);
1767 
1768 		/* If not available, use the closest opp */
1769 		if (opp == ERR_PTR(-ERANGE))
1770 			opp = dev_pm_opp_find_freq_ceil(dev, freq);
1771 	} else {
1772 		/* The freq is an lower bound. opp should be higher */
1773 		opp = dev_pm_opp_find_freq_ceil(dev, freq);
1774 
1775 		/* If not available, use the closest opp */
1776 		if (opp == ERR_PTR(-ERANGE))
1777 			opp = dev_pm_opp_find_freq_floor(dev, freq);
1778 	}
1779 
1780 	return opp;
1781 }
1782 EXPORT_SYMBOL(devfreq_recommended_opp);
1783 
1784 /**
1785  * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1786  *				     for any changes in the OPP availability
1787  *				     changes
1788  * @dev:	The devfreq user device. (parent of devfreq)
1789  * @devfreq:	The devfreq object.
1790  */
1791 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1792 {
1793 	return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1794 }
1795 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1796 
1797 /**
1798  * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1799  *				       notified for any changes in the OPP
1800  *				       availability changes anymore.
1801  * @dev:	The devfreq user device. (parent of devfreq)
1802  * @devfreq:	The devfreq object.
1803  *
1804  * At exit() callback of devfreq_dev_profile, this must be included if
1805  * devfreq_recommended_opp is used.
1806  */
1807 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1808 {
1809 	return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1810 }
1811 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1812 
1813 static void devm_devfreq_opp_release(struct device *dev, void *res)
1814 {
1815 	devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1816 }
1817 
1818 /**
1819  * devm_devfreq_register_opp_notifier() - Resource-managed
1820  *					  devfreq_register_opp_notifier()
1821  * @dev:	The devfreq user device. (parent of devfreq)
1822  * @devfreq:	The devfreq object.
1823  */
1824 int devm_devfreq_register_opp_notifier(struct device *dev,
1825 				       struct devfreq *devfreq)
1826 {
1827 	struct devfreq **ptr;
1828 	int ret;
1829 
1830 	ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1831 	if (!ptr)
1832 		return -ENOMEM;
1833 
1834 	ret = devfreq_register_opp_notifier(dev, devfreq);
1835 	if (ret) {
1836 		devres_free(ptr);
1837 		return ret;
1838 	}
1839 
1840 	*ptr = devfreq;
1841 	devres_add(dev, ptr);
1842 
1843 	return 0;
1844 }
1845 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1846 
1847 /**
1848  * devm_devfreq_unregister_opp_notifier() - Resource-managed
1849  *					    devfreq_unregister_opp_notifier()
1850  * @dev:	The devfreq user device. (parent of devfreq)
1851  * @devfreq:	The devfreq object.
1852  */
1853 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1854 					 struct devfreq *devfreq)
1855 {
1856 	WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1857 			       devm_devfreq_dev_match, devfreq));
1858 }
1859 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1860 
1861 /**
1862  * devfreq_register_notifier() - Register a driver with devfreq
1863  * @devfreq:	The devfreq object.
1864  * @nb:		The notifier block to register.
1865  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1866  */
1867 int devfreq_register_notifier(struct devfreq *devfreq,
1868 			      struct notifier_block *nb,
1869 			      unsigned int list)
1870 {
1871 	int ret = 0;
1872 
1873 	if (!devfreq)
1874 		return -EINVAL;
1875 
1876 	switch (list) {
1877 	case DEVFREQ_TRANSITION_NOTIFIER:
1878 		ret = srcu_notifier_chain_register(
1879 				&devfreq->transition_notifier_list, nb);
1880 		break;
1881 	default:
1882 		ret = -EINVAL;
1883 	}
1884 
1885 	return ret;
1886 }
1887 EXPORT_SYMBOL(devfreq_register_notifier);
1888 
1889 /*
1890  * devfreq_unregister_notifier() - Unregister a driver with devfreq
1891  * @devfreq:	The devfreq object.
1892  * @nb:		The notifier block to be unregistered.
1893  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1894  */
1895 int devfreq_unregister_notifier(struct devfreq *devfreq,
1896 				struct notifier_block *nb,
1897 				unsigned int list)
1898 {
1899 	int ret = 0;
1900 
1901 	if (!devfreq)
1902 		return -EINVAL;
1903 
1904 	switch (list) {
1905 	case DEVFREQ_TRANSITION_NOTIFIER:
1906 		ret = srcu_notifier_chain_unregister(
1907 				&devfreq->transition_notifier_list, nb);
1908 		break;
1909 	default:
1910 		ret = -EINVAL;
1911 	}
1912 
1913 	return ret;
1914 }
1915 EXPORT_SYMBOL(devfreq_unregister_notifier);
1916 
1917 struct devfreq_notifier_devres {
1918 	struct devfreq *devfreq;
1919 	struct notifier_block *nb;
1920 	unsigned int list;
1921 };
1922 
1923 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1924 {
1925 	struct devfreq_notifier_devres *this = res;
1926 
1927 	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1928 }
1929 
1930 /**
1931  * devm_devfreq_register_notifier()
1932  *	- Resource-managed devfreq_register_notifier()
1933  * @dev:	The devfreq user device. (parent of devfreq)
1934  * @devfreq:	The devfreq object.
1935  * @nb:		The notifier block to be unregistered.
1936  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1937  */
1938 int devm_devfreq_register_notifier(struct device *dev,
1939 				struct devfreq *devfreq,
1940 				struct notifier_block *nb,
1941 				unsigned int list)
1942 {
1943 	struct devfreq_notifier_devres *ptr;
1944 	int ret;
1945 
1946 	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1947 				GFP_KERNEL);
1948 	if (!ptr)
1949 		return -ENOMEM;
1950 
1951 	ret = devfreq_register_notifier(devfreq, nb, list);
1952 	if (ret) {
1953 		devres_free(ptr);
1954 		return ret;
1955 	}
1956 
1957 	ptr->devfreq = devfreq;
1958 	ptr->nb = nb;
1959 	ptr->list = list;
1960 	devres_add(dev, ptr);
1961 
1962 	return 0;
1963 }
1964 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1965 
1966 /**
1967  * devm_devfreq_unregister_notifier()
1968  *	- Resource-managed devfreq_unregister_notifier()
1969  * @dev:	The devfreq user device. (parent of devfreq)
1970  * @devfreq:	The devfreq object.
1971  * @nb:		The notifier block to be unregistered.
1972  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1973  */
1974 void devm_devfreq_unregister_notifier(struct device *dev,
1975 				      struct devfreq *devfreq,
1976 				      struct notifier_block *nb,
1977 				      unsigned int list)
1978 {
1979 	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1980 			       devm_devfreq_dev_match, devfreq));
1981 }
1982 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
1983