xref: /openbmc/linux/drivers/devfreq/devfreq.c (revision 86edee97)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
4  *	    for Non-CPU Devices.
5  *
6  * Copyright (C) 2011 Samsung Electronics
7  *	MyungJoo Ham <myungjoo.ham@samsung.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/kmod.h>
12 #include <linux/sched.h>
13 #include <linux/debugfs.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/stat.h>
20 #include <linux/pm_opp.h>
21 #include <linux/devfreq.h>
22 #include <linux/workqueue.h>
23 #include <linux/platform_device.h>
24 #include <linux/list.h>
25 #include <linux/printk.h>
26 #include <linux/hrtimer.h>
27 #include <linux/of.h>
28 #include <linux/pm_qos.h>
29 #include "governor.h"
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/devfreq.h>
33 
34 #define HZ_PER_KHZ	1000
35 
36 static struct class *devfreq_class;
37 static struct dentry *devfreq_debugfs;
38 
39 /*
40  * devfreq core provides delayed work based load monitoring helper
41  * functions. Governors can use these or can implement their own
42  * monitoring mechanism.
43  */
44 static struct workqueue_struct *devfreq_wq;
45 
46 /* The list of all device-devfreq governors */
47 static LIST_HEAD(devfreq_governor_list);
48 /* The list of all device-devfreq */
49 static LIST_HEAD(devfreq_list);
50 static DEFINE_MUTEX(devfreq_list_lock);
51 
52 /**
53  * find_device_devfreq() - find devfreq struct using device pointer
54  * @dev:	device pointer used to lookup device devfreq.
55  *
56  * Search the list of device devfreqs and return the matched device's
57  * devfreq info. devfreq_list_lock should be held by the caller.
58  */
59 static struct devfreq *find_device_devfreq(struct device *dev)
60 {
61 	struct devfreq *tmp_devfreq;
62 
63 	if (IS_ERR_OR_NULL(dev)) {
64 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
65 		return ERR_PTR(-EINVAL);
66 	}
67 	WARN(!mutex_is_locked(&devfreq_list_lock),
68 	     "devfreq_list_lock must be locked.");
69 
70 	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
71 		if (tmp_devfreq->dev.parent == dev)
72 			return tmp_devfreq;
73 	}
74 
75 	return ERR_PTR(-ENODEV);
76 }
77 
78 static unsigned long find_available_min_freq(struct devfreq *devfreq)
79 {
80 	struct dev_pm_opp *opp;
81 	unsigned long min_freq = 0;
82 
83 	opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
84 	if (IS_ERR(opp))
85 		min_freq = 0;
86 	else
87 		dev_pm_opp_put(opp);
88 
89 	return min_freq;
90 }
91 
92 static unsigned long find_available_max_freq(struct devfreq *devfreq)
93 {
94 	struct dev_pm_opp *opp;
95 	unsigned long max_freq = ULONG_MAX;
96 
97 	opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
98 	if (IS_ERR(opp))
99 		max_freq = 0;
100 	else
101 		dev_pm_opp_put(opp);
102 
103 	return max_freq;
104 }
105 
106 /**
107  * get_freq_range() - Get the current freq range
108  * @devfreq:	the devfreq instance
109  * @min_freq:	the min frequency
110  * @max_freq:	the max frequency
111  *
112  * This takes into consideration all constraints.
113  */
114 static void get_freq_range(struct devfreq *devfreq,
115 			   unsigned long *min_freq,
116 			   unsigned long *max_freq)
117 {
118 	unsigned long *freq_table = devfreq->profile->freq_table;
119 	s32 qos_min_freq, qos_max_freq;
120 
121 	lockdep_assert_held(&devfreq->lock);
122 
123 	/*
124 	 * Initialize minimum/maximum frequency from freq table.
125 	 * The devfreq drivers can initialize this in either ascending or
126 	 * descending order and devfreq core supports both.
127 	 */
128 	if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
129 		*min_freq = freq_table[0];
130 		*max_freq = freq_table[devfreq->profile->max_state - 1];
131 	} else {
132 		*min_freq = freq_table[devfreq->profile->max_state - 1];
133 		*max_freq = freq_table[0];
134 	}
135 
136 	/* Apply constraints from PM QoS */
137 	qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
138 					     DEV_PM_QOS_MIN_FREQUENCY);
139 	qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
140 					     DEV_PM_QOS_MAX_FREQUENCY);
141 	*min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
142 	if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
143 		*max_freq = min(*max_freq,
144 				(unsigned long)HZ_PER_KHZ * qos_max_freq);
145 
146 	/* Apply constraints from OPP interface */
147 	*min_freq = max(*min_freq, devfreq->scaling_min_freq);
148 	*max_freq = min(*max_freq, devfreq->scaling_max_freq);
149 
150 	if (*min_freq > *max_freq)
151 		*min_freq = *max_freq;
152 }
153 
154 /**
155  * devfreq_get_freq_level() - Lookup freq_table for the frequency
156  * @devfreq:	the devfreq instance
157  * @freq:	the target frequency
158  */
159 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
160 {
161 	int lev;
162 
163 	for (lev = 0; lev < devfreq->profile->max_state; lev++)
164 		if (freq == devfreq->profile->freq_table[lev])
165 			return lev;
166 
167 	return -EINVAL;
168 }
169 
170 static int set_freq_table(struct devfreq *devfreq)
171 {
172 	struct devfreq_dev_profile *profile = devfreq->profile;
173 	struct dev_pm_opp *opp;
174 	unsigned long freq;
175 	int i, count;
176 
177 	/* Initialize the freq_table from OPP table */
178 	count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
179 	if (count <= 0)
180 		return -EINVAL;
181 
182 	profile->max_state = count;
183 	profile->freq_table = devm_kcalloc(devfreq->dev.parent,
184 					profile->max_state,
185 					sizeof(*profile->freq_table),
186 					GFP_KERNEL);
187 	if (!profile->freq_table) {
188 		profile->max_state = 0;
189 		return -ENOMEM;
190 	}
191 
192 	for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
193 		opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
194 		if (IS_ERR(opp)) {
195 			devm_kfree(devfreq->dev.parent, profile->freq_table);
196 			profile->max_state = 0;
197 			return PTR_ERR(opp);
198 		}
199 		dev_pm_opp_put(opp);
200 		profile->freq_table[i] = freq;
201 	}
202 
203 	return 0;
204 }
205 
206 /**
207  * devfreq_update_status() - Update statistics of devfreq behavior
208  * @devfreq:	the devfreq instance
209  * @freq:	the update target frequency
210  */
211 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
212 {
213 	int lev, prev_lev, ret = 0;
214 	u64 cur_time;
215 
216 	lockdep_assert_held(&devfreq->lock);
217 	cur_time = get_jiffies_64();
218 
219 	/* Immediately exit if previous_freq is not initialized yet. */
220 	if (!devfreq->previous_freq)
221 		goto out;
222 
223 	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
224 	if (prev_lev < 0) {
225 		ret = prev_lev;
226 		goto out;
227 	}
228 
229 	devfreq->stats.time_in_state[prev_lev] +=
230 			cur_time - devfreq->stats.last_update;
231 
232 	lev = devfreq_get_freq_level(devfreq, freq);
233 	if (lev < 0) {
234 		ret = lev;
235 		goto out;
236 	}
237 
238 	if (lev != prev_lev) {
239 		devfreq->stats.trans_table[
240 			(prev_lev * devfreq->profile->max_state) + lev]++;
241 		devfreq->stats.total_trans++;
242 	}
243 
244 out:
245 	devfreq->stats.last_update = cur_time;
246 	return ret;
247 }
248 EXPORT_SYMBOL(devfreq_update_status);
249 
250 /**
251  * find_devfreq_governor() - find devfreq governor from name
252  * @name:	name of the governor
253  *
254  * Search the list of devfreq governors and return the matched
255  * governor's pointer. devfreq_list_lock should be held by the caller.
256  */
257 static struct devfreq_governor *find_devfreq_governor(const char *name)
258 {
259 	struct devfreq_governor *tmp_governor;
260 
261 	if (IS_ERR_OR_NULL(name)) {
262 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
263 		return ERR_PTR(-EINVAL);
264 	}
265 	WARN(!mutex_is_locked(&devfreq_list_lock),
266 	     "devfreq_list_lock must be locked.");
267 
268 	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
269 		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
270 			return tmp_governor;
271 	}
272 
273 	return ERR_PTR(-ENODEV);
274 }
275 
276 /**
277  * try_then_request_governor() - Try to find the governor and request the
278  *                               module if is not found.
279  * @name:	name of the governor
280  *
281  * Search the list of devfreq governors and request the module and try again
282  * if is not found. This can happen when both drivers (the governor driver
283  * and the driver that call devfreq_add_device) are built as modules.
284  * devfreq_list_lock should be held by the caller. Returns the matched
285  * governor's pointer or an error pointer.
286  */
287 static struct devfreq_governor *try_then_request_governor(const char *name)
288 {
289 	struct devfreq_governor *governor;
290 	int err = 0;
291 
292 	if (IS_ERR_OR_NULL(name)) {
293 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
294 		return ERR_PTR(-EINVAL);
295 	}
296 	WARN(!mutex_is_locked(&devfreq_list_lock),
297 	     "devfreq_list_lock must be locked.");
298 
299 	governor = find_devfreq_governor(name);
300 	if (IS_ERR(governor)) {
301 		mutex_unlock(&devfreq_list_lock);
302 
303 		if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
304 			     DEVFREQ_NAME_LEN))
305 			err = request_module("governor_%s", "simpleondemand");
306 		else
307 			err = request_module("governor_%s", name);
308 		/* Restore previous state before return */
309 		mutex_lock(&devfreq_list_lock);
310 		if (err)
311 			return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
312 
313 		governor = find_devfreq_governor(name);
314 	}
315 
316 	return governor;
317 }
318 
319 static int devfreq_notify_transition(struct devfreq *devfreq,
320 		struct devfreq_freqs *freqs, unsigned int state)
321 {
322 	if (!devfreq)
323 		return -EINVAL;
324 
325 	switch (state) {
326 	case DEVFREQ_PRECHANGE:
327 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
328 				DEVFREQ_PRECHANGE, freqs);
329 		break;
330 
331 	case DEVFREQ_POSTCHANGE:
332 		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
333 				DEVFREQ_POSTCHANGE, freqs);
334 		break;
335 	default:
336 		return -EINVAL;
337 	}
338 
339 	return 0;
340 }
341 
342 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
343 			      u32 flags)
344 {
345 	struct devfreq_freqs freqs;
346 	unsigned long cur_freq;
347 	int err = 0;
348 
349 	if (devfreq->profile->get_cur_freq)
350 		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
351 	else
352 		cur_freq = devfreq->previous_freq;
353 
354 	freqs.old = cur_freq;
355 	freqs.new = new_freq;
356 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
357 
358 	err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
359 	if (err) {
360 		freqs.new = cur_freq;
361 		devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
362 		return err;
363 	}
364 
365 	freqs.new = new_freq;
366 	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
367 
368 	if (devfreq_update_status(devfreq, new_freq))
369 		dev_err(&devfreq->dev,
370 			"Couldn't update frequency transition information.\n");
371 
372 	devfreq->previous_freq = new_freq;
373 
374 	if (devfreq->suspend_freq)
375 		devfreq->resume_freq = cur_freq;
376 
377 	return err;
378 }
379 
380 /* Load monitoring helper functions for governors use */
381 
382 /**
383  * update_devfreq() - Reevaluate the device and configure frequency.
384  * @devfreq:	the devfreq instance.
385  *
386  * Note: Lock devfreq->lock before calling update_devfreq
387  *	 This function is exported for governors.
388  */
389 int update_devfreq(struct devfreq *devfreq)
390 {
391 	unsigned long freq, min_freq, max_freq;
392 	int err = 0;
393 	u32 flags = 0;
394 
395 	if (!mutex_is_locked(&devfreq->lock)) {
396 		WARN(true, "devfreq->lock must be locked by the caller.\n");
397 		return -EINVAL;
398 	}
399 
400 	if (!devfreq->governor)
401 		return -EINVAL;
402 
403 	/* Reevaluate the proper frequency */
404 	err = devfreq->governor->get_target_freq(devfreq, &freq);
405 	if (err)
406 		return err;
407 	get_freq_range(devfreq, &min_freq, &max_freq);
408 
409 	if (freq < min_freq) {
410 		freq = min_freq;
411 		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
412 	}
413 	if (freq > max_freq) {
414 		freq = max_freq;
415 		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
416 	}
417 
418 	return devfreq_set_target(devfreq, freq, flags);
419 
420 }
421 EXPORT_SYMBOL(update_devfreq);
422 
423 /**
424  * devfreq_monitor() - Periodically poll devfreq objects.
425  * @work:	the work struct used to run devfreq_monitor periodically.
426  *
427  */
428 static void devfreq_monitor(struct work_struct *work)
429 {
430 	int err;
431 	struct devfreq *devfreq = container_of(work,
432 					struct devfreq, work.work);
433 
434 	mutex_lock(&devfreq->lock);
435 	err = update_devfreq(devfreq);
436 	if (err)
437 		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
438 
439 	queue_delayed_work(devfreq_wq, &devfreq->work,
440 				msecs_to_jiffies(devfreq->profile->polling_ms));
441 	mutex_unlock(&devfreq->lock);
442 
443 	trace_devfreq_monitor(devfreq);
444 }
445 
446 /**
447  * devfreq_monitor_start() - Start load monitoring of devfreq instance
448  * @devfreq:	the devfreq instance.
449  *
450  * Helper function for starting devfreq device load monitoring. By
451  * default delayed work based monitoring is supported. Function
452  * to be called from governor in response to DEVFREQ_GOV_START
453  * event when device is added to devfreq framework.
454  */
455 void devfreq_monitor_start(struct devfreq *devfreq)
456 {
457 	if (devfreq->governor->interrupt_driven)
458 		return;
459 
460 	INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
461 	if (devfreq->profile->polling_ms)
462 		queue_delayed_work(devfreq_wq, &devfreq->work,
463 			msecs_to_jiffies(devfreq->profile->polling_ms));
464 }
465 EXPORT_SYMBOL(devfreq_monitor_start);
466 
467 /**
468  * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
469  * @devfreq:	the devfreq instance.
470  *
471  * Helper function to stop devfreq device load monitoring. Function
472  * to be called from governor in response to DEVFREQ_GOV_STOP
473  * event when device is removed from devfreq framework.
474  */
475 void devfreq_monitor_stop(struct devfreq *devfreq)
476 {
477 	if (devfreq->governor->interrupt_driven)
478 		return;
479 
480 	cancel_delayed_work_sync(&devfreq->work);
481 }
482 EXPORT_SYMBOL(devfreq_monitor_stop);
483 
484 /**
485  * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
486  * @devfreq:	the devfreq instance.
487  *
488  * Helper function to suspend devfreq device load monitoring. Function
489  * to be called from governor in response to DEVFREQ_GOV_SUSPEND
490  * event or when polling interval is set to zero.
491  *
492  * Note: Though this function is same as devfreq_monitor_stop(),
493  * intentionally kept separate to provide hooks for collecting
494  * transition statistics.
495  */
496 void devfreq_monitor_suspend(struct devfreq *devfreq)
497 {
498 	mutex_lock(&devfreq->lock);
499 	if (devfreq->stop_polling) {
500 		mutex_unlock(&devfreq->lock);
501 		return;
502 	}
503 
504 	devfreq_update_status(devfreq, devfreq->previous_freq);
505 	devfreq->stop_polling = true;
506 	mutex_unlock(&devfreq->lock);
507 
508 	if (devfreq->governor->interrupt_driven)
509 		return;
510 
511 	cancel_delayed_work_sync(&devfreq->work);
512 }
513 EXPORT_SYMBOL(devfreq_monitor_suspend);
514 
515 /**
516  * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
517  * @devfreq:    the devfreq instance.
518  *
519  * Helper function to resume devfreq device load monitoring. Function
520  * to be called from governor in response to DEVFREQ_GOV_RESUME
521  * event or when polling interval is set to non-zero.
522  */
523 void devfreq_monitor_resume(struct devfreq *devfreq)
524 {
525 	unsigned long freq;
526 
527 	mutex_lock(&devfreq->lock);
528 	if (!devfreq->stop_polling)
529 		goto out;
530 
531 	if (devfreq->governor->interrupt_driven)
532 		goto out_update;
533 
534 	if (!delayed_work_pending(&devfreq->work) &&
535 			devfreq->profile->polling_ms)
536 		queue_delayed_work(devfreq_wq, &devfreq->work,
537 			msecs_to_jiffies(devfreq->profile->polling_ms));
538 
539 out_update:
540 	devfreq->stats.last_update = get_jiffies_64();
541 	devfreq->stop_polling = false;
542 
543 	if (devfreq->profile->get_cur_freq &&
544 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
545 		devfreq->previous_freq = freq;
546 
547 out:
548 	mutex_unlock(&devfreq->lock);
549 }
550 EXPORT_SYMBOL(devfreq_monitor_resume);
551 
552 /**
553  * devfreq_interval_update() - Update device devfreq monitoring interval
554  * @devfreq:    the devfreq instance.
555  * @delay:      new polling interval to be set.
556  *
557  * Helper function to set new load monitoring polling interval. Function
558  * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
559  */
560 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
561 {
562 	unsigned int cur_delay = devfreq->profile->polling_ms;
563 	unsigned int new_delay = *delay;
564 
565 	mutex_lock(&devfreq->lock);
566 	devfreq->profile->polling_ms = new_delay;
567 
568 	if (devfreq->stop_polling)
569 		goto out;
570 
571 	if (devfreq->governor->interrupt_driven)
572 		goto out;
573 
574 	/* if new delay is zero, stop polling */
575 	if (!new_delay) {
576 		mutex_unlock(&devfreq->lock);
577 		cancel_delayed_work_sync(&devfreq->work);
578 		return;
579 	}
580 
581 	/* if current delay is zero, start polling with new delay */
582 	if (!cur_delay) {
583 		queue_delayed_work(devfreq_wq, &devfreq->work,
584 			msecs_to_jiffies(devfreq->profile->polling_ms));
585 		goto out;
586 	}
587 
588 	/* if current delay is greater than new delay, restart polling */
589 	if (cur_delay > new_delay) {
590 		mutex_unlock(&devfreq->lock);
591 		cancel_delayed_work_sync(&devfreq->work);
592 		mutex_lock(&devfreq->lock);
593 		if (!devfreq->stop_polling)
594 			queue_delayed_work(devfreq_wq, &devfreq->work,
595 				msecs_to_jiffies(devfreq->profile->polling_ms));
596 	}
597 out:
598 	mutex_unlock(&devfreq->lock);
599 }
600 EXPORT_SYMBOL(devfreq_interval_update);
601 
602 /**
603  * devfreq_notifier_call() - Notify that the device frequency requirements
604  *			     has been changed out of devfreq framework.
605  * @nb:		the notifier_block (supposed to be devfreq->nb)
606  * @type:	not used
607  * @devp:	not used
608  *
609  * Called by a notifier that uses devfreq->nb.
610  */
611 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
612 				 void *devp)
613 {
614 	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
615 	int err = -EINVAL;
616 
617 	mutex_lock(&devfreq->lock);
618 
619 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
620 	if (!devfreq->scaling_min_freq)
621 		goto out;
622 
623 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
624 	if (!devfreq->scaling_max_freq) {
625 		devfreq->scaling_max_freq = ULONG_MAX;
626 		goto out;
627 	}
628 
629 	err = update_devfreq(devfreq);
630 
631 out:
632 	mutex_unlock(&devfreq->lock);
633 	if (err)
634 		dev_err(devfreq->dev.parent,
635 			"failed to update frequency from OPP notifier (%d)\n",
636 			err);
637 
638 	return NOTIFY_OK;
639 }
640 
641 /**
642  * qos_notifier_call() - Common handler for QoS constraints.
643  * @devfreq:    the devfreq instance.
644  */
645 static int qos_notifier_call(struct devfreq *devfreq)
646 {
647 	int err;
648 
649 	mutex_lock(&devfreq->lock);
650 	err = update_devfreq(devfreq);
651 	mutex_unlock(&devfreq->lock);
652 	if (err)
653 		dev_err(devfreq->dev.parent,
654 			"failed to update frequency from PM QoS (%d)\n",
655 			err);
656 
657 	return NOTIFY_OK;
658 }
659 
660 /**
661  * qos_min_notifier_call() - Callback for QoS min_freq changes.
662  * @nb:		Should be devfreq->nb_min
663  */
664 static int qos_min_notifier_call(struct notifier_block *nb,
665 					 unsigned long val, void *ptr)
666 {
667 	return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
668 }
669 
670 /**
671  * qos_max_notifier_call() - Callback for QoS max_freq changes.
672  * @nb:		Should be devfreq->nb_max
673  */
674 static int qos_max_notifier_call(struct notifier_block *nb,
675 					 unsigned long val, void *ptr)
676 {
677 	return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
678 }
679 
680 /**
681  * devfreq_dev_release() - Callback for struct device to release the device.
682  * @dev:	the devfreq device
683  *
684  * Remove devfreq from the list and release its resources.
685  */
686 static void devfreq_dev_release(struct device *dev)
687 {
688 	struct devfreq *devfreq = to_devfreq(dev);
689 	int err;
690 
691 	mutex_lock(&devfreq_list_lock);
692 	list_del(&devfreq->node);
693 	mutex_unlock(&devfreq_list_lock);
694 
695 	err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
696 					 DEV_PM_QOS_MAX_FREQUENCY);
697 	if (err && err != -ENOENT)
698 		dev_warn(dev->parent,
699 			"Failed to remove max_freq notifier: %d\n", err);
700 	err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
701 					 DEV_PM_QOS_MIN_FREQUENCY);
702 	if (err && err != -ENOENT)
703 		dev_warn(dev->parent,
704 			"Failed to remove min_freq notifier: %d\n", err);
705 
706 	if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
707 		err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
708 		if (err)
709 			dev_warn(dev->parent,
710 				"Failed to remove max_freq request: %d\n", err);
711 	}
712 	if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
713 		err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
714 		if (err)
715 			dev_warn(dev->parent,
716 				"Failed to remove min_freq request: %d\n", err);
717 	}
718 
719 	if (devfreq->profile->exit)
720 		devfreq->profile->exit(devfreq->dev.parent);
721 
722 	mutex_destroy(&devfreq->lock);
723 	kfree(devfreq);
724 }
725 
726 /**
727  * devfreq_add_device() - Add devfreq feature to the device
728  * @dev:	the device to add devfreq feature.
729  * @profile:	device-specific profile to run devfreq.
730  * @governor_name:	name of the policy to choose frequency.
731  * @data:	private data for the governor. The devfreq framework does not
732  *		touch this value.
733  */
734 struct devfreq *devfreq_add_device(struct device *dev,
735 				   struct devfreq_dev_profile *profile,
736 				   const char *governor_name,
737 				   void *data)
738 {
739 	struct devfreq *devfreq;
740 	struct devfreq_governor *governor;
741 	static atomic_t devfreq_no = ATOMIC_INIT(-1);
742 	int err = 0;
743 
744 	if (!dev || !profile || !governor_name) {
745 		dev_err(dev, "%s: Invalid parameters.\n", __func__);
746 		return ERR_PTR(-EINVAL);
747 	}
748 
749 	mutex_lock(&devfreq_list_lock);
750 	devfreq = find_device_devfreq(dev);
751 	mutex_unlock(&devfreq_list_lock);
752 	if (!IS_ERR(devfreq)) {
753 		dev_err(dev, "%s: devfreq device already exists!\n",
754 			__func__);
755 		err = -EINVAL;
756 		goto err_out;
757 	}
758 
759 	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
760 	if (!devfreq) {
761 		err = -ENOMEM;
762 		goto err_out;
763 	}
764 
765 	mutex_init(&devfreq->lock);
766 	mutex_lock(&devfreq->lock);
767 	devfreq->dev.parent = dev;
768 	devfreq->dev.class = devfreq_class;
769 	devfreq->dev.release = devfreq_dev_release;
770 	INIT_LIST_HEAD(&devfreq->node);
771 	devfreq->profile = profile;
772 	strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
773 	devfreq->previous_freq = profile->initial_freq;
774 	devfreq->last_status.current_frequency = profile->initial_freq;
775 	devfreq->data = data;
776 	devfreq->nb.notifier_call = devfreq_notifier_call;
777 
778 	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
779 		mutex_unlock(&devfreq->lock);
780 		err = set_freq_table(devfreq);
781 		if (err < 0)
782 			goto err_dev;
783 		mutex_lock(&devfreq->lock);
784 	}
785 
786 	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
787 	if (!devfreq->scaling_min_freq) {
788 		mutex_unlock(&devfreq->lock);
789 		err = -EINVAL;
790 		goto err_dev;
791 	}
792 
793 	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
794 	if (!devfreq->scaling_max_freq) {
795 		mutex_unlock(&devfreq->lock);
796 		err = -EINVAL;
797 		goto err_dev;
798 	}
799 
800 	devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
801 	atomic_set(&devfreq->suspend_count, 0);
802 
803 	dev_set_name(&devfreq->dev, "devfreq%d",
804 				atomic_inc_return(&devfreq_no));
805 	err = device_register(&devfreq->dev);
806 	if (err) {
807 		mutex_unlock(&devfreq->lock);
808 		put_device(&devfreq->dev);
809 		goto err_out;
810 	}
811 
812 	devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
813 			array3_size(sizeof(unsigned int),
814 				    devfreq->profile->max_state,
815 				    devfreq->profile->max_state),
816 			GFP_KERNEL);
817 	if (!devfreq->stats.trans_table) {
818 		mutex_unlock(&devfreq->lock);
819 		err = -ENOMEM;
820 		goto err_devfreq;
821 	}
822 
823 	devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
824 			devfreq->profile->max_state,
825 			sizeof(*devfreq->stats.time_in_state),
826 			GFP_KERNEL);
827 	if (!devfreq->stats.time_in_state) {
828 		mutex_unlock(&devfreq->lock);
829 		err = -ENOMEM;
830 		goto err_devfreq;
831 	}
832 
833 	devfreq->stats.total_trans = 0;
834 	devfreq->stats.last_update = get_jiffies_64();
835 
836 	srcu_init_notifier_head(&devfreq->transition_notifier_list);
837 
838 	mutex_unlock(&devfreq->lock);
839 
840 	err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
841 				     DEV_PM_QOS_MIN_FREQUENCY, 0);
842 	if (err < 0)
843 		goto err_devfreq;
844 	err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
845 				     DEV_PM_QOS_MAX_FREQUENCY,
846 				     PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
847 	if (err < 0)
848 		goto err_devfreq;
849 
850 	devfreq->nb_min.notifier_call = qos_min_notifier_call;
851 	err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
852 				      DEV_PM_QOS_MIN_FREQUENCY);
853 	if (err)
854 		goto err_devfreq;
855 
856 	devfreq->nb_max.notifier_call = qos_max_notifier_call;
857 	err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
858 				      DEV_PM_QOS_MAX_FREQUENCY);
859 	if (err)
860 		goto err_devfreq;
861 
862 	mutex_lock(&devfreq_list_lock);
863 
864 	governor = try_then_request_governor(devfreq->governor_name);
865 	if (IS_ERR(governor)) {
866 		dev_err(dev, "%s: Unable to find governor for the device\n",
867 			__func__);
868 		err = PTR_ERR(governor);
869 		goto err_init;
870 	}
871 
872 	devfreq->governor = governor;
873 	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
874 						NULL);
875 	if (err) {
876 		dev_err(dev, "%s: Unable to start governor for the device\n",
877 			__func__);
878 		goto err_init;
879 	}
880 
881 	list_add(&devfreq->node, &devfreq_list);
882 
883 	mutex_unlock(&devfreq_list_lock);
884 
885 	return devfreq;
886 
887 err_init:
888 	mutex_unlock(&devfreq_list_lock);
889 err_devfreq:
890 	devfreq_remove_device(devfreq);
891 	devfreq = NULL;
892 err_dev:
893 	kfree(devfreq);
894 err_out:
895 	return ERR_PTR(err);
896 }
897 EXPORT_SYMBOL(devfreq_add_device);
898 
899 /**
900  * devfreq_remove_device() - Remove devfreq feature from a device.
901  * @devfreq:	the devfreq instance to be removed
902  *
903  * The opposite of devfreq_add_device().
904  */
905 int devfreq_remove_device(struct devfreq *devfreq)
906 {
907 	if (!devfreq)
908 		return -EINVAL;
909 
910 	if (devfreq->governor)
911 		devfreq->governor->event_handler(devfreq,
912 						 DEVFREQ_GOV_STOP, NULL);
913 	device_unregister(&devfreq->dev);
914 
915 	return 0;
916 }
917 EXPORT_SYMBOL(devfreq_remove_device);
918 
919 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
920 {
921 	struct devfreq **r = res;
922 
923 	if (WARN_ON(!r || !*r))
924 		return 0;
925 
926 	return *r == data;
927 }
928 
929 static void devm_devfreq_dev_release(struct device *dev, void *res)
930 {
931 	devfreq_remove_device(*(struct devfreq **)res);
932 }
933 
934 /**
935  * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
936  * @dev:	the device to add devfreq feature.
937  * @profile:	device-specific profile to run devfreq.
938  * @governor_name:	name of the policy to choose frequency.
939  * @data:	private data for the governor. The devfreq framework does not
940  *		touch this value.
941  *
942  * This function manages automatically the memory of devfreq device using device
943  * resource management and simplify the free operation for memory of devfreq
944  * device.
945  */
946 struct devfreq *devm_devfreq_add_device(struct device *dev,
947 					struct devfreq_dev_profile *profile,
948 					const char *governor_name,
949 					void *data)
950 {
951 	struct devfreq **ptr, *devfreq;
952 
953 	ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
954 	if (!ptr)
955 		return ERR_PTR(-ENOMEM);
956 
957 	devfreq = devfreq_add_device(dev, profile, governor_name, data);
958 	if (IS_ERR(devfreq)) {
959 		devres_free(ptr);
960 		return devfreq;
961 	}
962 
963 	*ptr = devfreq;
964 	devres_add(dev, ptr);
965 
966 	return devfreq;
967 }
968 EXPORT_SYMBOL(devm_devfreq_add_device);
969 
970 #ifdef CONFIG_OF
971 /*
972  * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
973  * @dev - instance to the given device
974  * @index - index into list of devfreq
975  *
976  * return the instance of devfreq device
977  */
978 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
979 {
980 	struct device_node *node;
981 	struct devfreq *devfreq;
982 
983 	if (!dev)
984 		return ERR_PTR(-EINVAL);
985 
986 	if (!dev->of_node)
987 		return ERR_PTR(-EINVAL);
988 
989 	node = of_parse_phandle(dev->of_node, "devfreq", index);
990 	if (!node)
991 		return ERR_PTR(-ENODEV);
992 
993 	mutex_lock(&devfreq_list_lock);
994 	list_for_each_entry(devfreq, &devfreq_list, node) {
995 		if (devfreq->dev.parent
996 			&& devfreq->dev.parent->of_node == node) {
997 			mutex_unlock(&devfreq_list_lock);
998 			of_node_put(node);
999 			return devfreq;
1000 		}
1001 	}
1002 	mutex_unlock(&devfreq_list_lock);
1003 	of_node_put(node);
1004 
1005 	return ERR_PTR(-EPROBE_DEFER);
1006 }
1007 #else
1008 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
1009 {
1010 	return ERR_PTR(-ENODEV);
1011 }
1012 #endif /* CONFIG_OF */
1013 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
1014 
1015 /**
1016  * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
1017  * @dev:	the device from which to remove devfreq feature.
1018  * @devfreq:	the devfreq instance to be removed
1019  */
1020 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
1021 {
1022 	WARN_ON(devres_release(dev, devm_devfreq_dev_release,
1023 			       devm_devfreq_dev_match, devfreq));
1024 }
1025 EXPORT_SYMBOL(devm_devfreq_remove_device);
1026 
1027 /**
1028  * devfreq_suspend_device() - Suspend devfreq of a device.
1029  * @devfreq: the devfreq instance to be suspended
1030  *
1031  * This function is intended to be called by the pm callbacks
1032  * (e.g., runtime_suspend, suspend) of the device driver that
1033  * holds the devfreq.
1034  */
1035 int devfreq_suspend_device(struct devfreq *devfreq)
1036 {
1037 	int ret;
1038 
1039 	if (!devfreq)
1040 		return -EINVAL;
1041 
1042 	if (atomic_inc_return(&devfreq->suspend_count) > 1)
1043 		return 0;
1044 
1045 	if (devfreq->governor) {
1046 		ret = devfreq->governor->event_handler(devfreq,
1047 					DEVFREQ_GOV_SUSPEND, NULL);
1048 		if (ret)
1049 			return ret;
1050 	}
1051 
1052 	if (devfreq->suspend_freq) {
1053 		mutex_lock(&devfreq->lock);
1054 		ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
1055 		mutex_unlock(&devfreq->lock);
1056 		if (ret)
1057 			return ret;
1058 	}
1059 
1060 	return 0;
1061 }
1062 EXPORT_SYMBOL(devfreq_suspend_device);
1063 
1064 /**
1065  * devfreq_resume_device() - Resume devfreq of a device.
1066  * @devfreq: the devfreq instance to be resumed
1067  *
1068  * This function is intended to be called by the pm callbacks
1069  * (e.g., runtime_resume, resume) of the device driver that
1070  * holds the devfreq.
1071  */
1072 int devfreq_resume_device(struct devfreq *devfreq)
1073 {
1074 	int ret;
1075 
1076 	if (!devfreq)
1077 		return -EINVAL;
1078 
1079 	if (atomic_dec_return(&devfreq->suspend_count) >= 1)
1080 		return 0;
1081 
1082 	if (devfreq->resume_freq) {
1083 		mutex_lock(&devfreq->lock);
1084 		ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
1085 		mutex_unlock(&devfreq->lock);
1086 		if (ret)
1087 			return ret;
1088 	}
1089 
1090 	if (devfreq->governor) {
1091 		ret = devfreq->governor->event_handler(devfreq,
1092 					DEVFREQ_GOV_RESUME, NULL);
1093 		if (ret)
1094 			return ret;
1095 	}
1096 
1097 	return 0;
1098 }
1099 EXPORT_SYMBOL(devfreq_resume_device);
1100 
1101 /**
1102  * devfreq_suspend() - Suspend devfreq governors and devices
1103  *
1104  * Called during system wide Suspend/Hibernate cycles for suspending governors
1105  * and devices preserving the state for resume. On some platforms the devfreq
1106  * device must have precise state (frequency) after resume in order to provide
1107  * fully operating setup.
1108  */
1109 void devfreq_suspend(void)
1110 {
1111 	struct devfreq *devfreq;
1112 	int ret;
1113 
1114 	mutex_lock(&devfreq_list_lock);
1115 	list_for_each_entry(devfreq, &devfreq_list, node) {
1116 		ret = devfreq_suspend_device(devfreq);
1117 		if (ret)
1118 			dev_err(&devfreq->dev,
1119 				"failed to suspend devfreq device\n");
1120 	}
1121 	mutex_unlock(&devfreq_list_lock);
1122 }
1123 
1124 /**
1125  * devfreq_resume() - Resume devfreq governors and devices
1126  *
1127  * Called during system wide Suspend/Hibernate cycle for resuming governors and
1128  * devices that are suspended with devfreq_suspend().
1129  */
1130 void devfreq_resume(void)
1131 {
1132 	struct devfreq *devfreq;
1133 	int ret;
1134 
1135 	mutex_lock(&devfreq_list_lock);
1136 	list_for_each_entry(devfreq, &devfreq_list, node) {
1137 		ret = devfreq_resume_device(devfreq);
1138 		if (ret)
1139 			dev_warn(&devfreq->dev,
1140 				 "failed to resume devfreq device\n");
1141 	}
1142 	mutex_unlock(&devfreq_list_lock);
1143 }
1144 
1145 /**
1146  * devfreq_add_governor() - Add devfreq governor
1147  * @governor:	the devfreq governor to be added
1148  */
1149 int devfreq_add_governor(struct devfreq_governor *governor)
1150 {
1151 	struct devfreq_governor *g;
1152 	struct devfreq *devfreq;
1153 	int err = 0;
1154 
1155 	if (!governor) {
1156 		pr_err("%s: Invalid parameters.\n", __func__);
1157 		return -EINVAL;
1158 	}
1159 
1160 	mutex_lock(&devfreq_list_lock);
1161 	g = find_devfreq_governor(governor->name);
1162 	if (!IS_ERR(g)) {
1163 		pr_err("%s: governor %s already registered\n", __func__,
1164 		       g->name);
1165 		err = -EINVAL;
1166 		goto err_out;
1167 	}
1168 
1169 	list_add(&governor->node, &devfreq_governor_list);
1170 
1171 	list_for_each_entry(devfreq, &devfreq_list, node) {
1172 		int ret = 0;
1173 		struct device *dev = devfreq->dev.parent;
1174 
1175 		if (!strncmp(devfreq->governor_name, governor->name,
1176 			     DEVFREQ_NAME_LEN)) {
1177 			/* The following should never occur */
1178 			if (devfreq->governor) {
1179 				dev_warn(dev,
1180 					 "%s: Governor %s already present\n",
1181 					 __func__, devfreq->governor->name);
1182 				ret = devfreq->governor->event_handler(devfreq,
1183 							DEVFREQ_GOV_STOP, NULL);
1184 				if (ret) {
1185 					dev_warn(dev,
1186 						 "%s: Governor %s stop = %d\n",
1187 						 __func__,
1188 						 devfreq->governor->name, ret);
1189 				}
1190 				/* Fall through */
1191 			}
1192 			devfreq->governor = governor;
1193 			ret = devfreq->governor->event_handler(devfreq,
1194 						DEVFREQ_GOV_START, NULL);
1195 			if (ret) {
1196 				dev_warn(dev, "%s: Governor %s start=%d\n",
1197 					 __func__, devfreq->governor->name,
1198 					 ret);
1199 			}
1200 		}
1201 	}
1202 
1203 err_out:
1204 	mutex_unlock(&devfreq_list_lock);
1205 
1206 	return err;
1207 }
1208 EXPORT_SYMBOL(devfreq_add_governor);
1209 
1210 /**
1211  * devfreq_remove_governor() - Remove devfreq feature from a device.
1212  * @governor:	the devfreq governor to be removed
1213  */
1214 int devfreq_remove_governor(struct devfreq_governor *governor)
1215 {
1216 	struct devfreq_governor *g;
1217 	struct devfreq *devfreq;
1218 	int err = 0;
1219 
1220 	if (!governor) {
1221 		pr_err("%s: Invalid parameters.\n", __func__);
1222 		return -EINVAL;
1223 	}
1224 
1225 	mutex_lock(&devfreq_list_lock);
1226 	g = find_devfreq_governor(governor->name);
1227 	if (IS_ERR(g)) {
1228 		pr_err("%s: governor %s not registered\n", __func__,
1229 		       governor->name);
1230 		err = PTR_ERR(g);
1231 		goto err_out;
1232 	}
1233 	list_for_each_entry(devfreq, &devfreq_list, node) {
1234 		int ret;
1235 		struct device *dev = devfreq->dev.parent;
1236 
1237 		if (!strncmp(devfreq->governor_name, governor->name,
1238 			     DEVFREQ_NAME_LEN)) {
1239 			/* we should have a devfreq governor! */
1240 			if (!devfreq->governor) {
1241 				dev_warn(dev, "%s: Governor %s NOT present\n",
1242 					 __func__, governor->name);
1243 				continue;
1244 				/* Fall through */
1245 			}
1246 			ret = devfreq->governor->event_handler(devfreq,
1247 						DEVFREQ_GOV_STOP, NULL);
1248 			if (ret) {
1249 				dev_warn(dev, "%s: Governor %s stop=%d\n",
1250 					 __func__, devfreq->governor->name,
1251 					 ret);
1252 			}
1253 			devfreq->governor = NULL;
1254 		}
1255 	}
1256 
1257 	list_del(&governor->node);
1258 err_out:
1259 	mutex_unlock(&devfreq_list_lock);
1260 
1261 	return err;
1262 }
1263 EXPORT_SYMBOL(devfreq_remove_governor);
1264 
1265 static ssize_t name_show(struct device *dev,
1266 			struct device_attribute *attr, char *buf)
1267 {
1268 	struct devfreq *devfreq = to_devfreq(dev);
1269 	return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
1270 }
1271 static DEVICE_ATTR_RO(name);
1272 
1273 static ssize_t governor_show(struct device *dev,
1274 			     struct device_attribute *attr, char *buf)
1275 {
1276 	if (!to_devfreq(dev)->governor)
1277 		return -EINVAL;
1278 
1279 	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1280 }
1281 
1282 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1283 			      const char *buf, size_t count)
1284 {
1285 	struct devfreq *df = to_devfreq(dev);
1286 	int ret;
1287 	char str_governor[DEVFREQ_NAME_LEN + 1];
1288 	const struct devfreq_governor *governor, *prev_governor;
1289 
1290 	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1291 	if (ret != 1)
1292 		return -EINVAL;
1293 
1294 	mutex_lock(&devfreq_list_lock);
1295 	governor = try_then_request_governor(str_governor);
1296 	if (IS_ERR(governor)) {
1297 		ret = PTR_ERR(governor);
1298 		goto out;
1299 	}
1300 	if (df->governor == governor) {
1301 		ret = 0;
1302 		goto out;
1303 	} else if ((df->governor && df->governor->immutable) ||
1304 					governor->immutable) {
1305 		ret = -EINVAL;
1306 		goto out;
1307 	}
1308 
1309 	if (df->governor) {
1310 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1311 		if (ret) {
1312 			dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1313 				 __func__, df->governor->name, ret);
1314 			goto out;
1315 		}
1316 	}
1317 	prev_governor = df->governor;
1318 	df->governor = governor;
1319 	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1320 	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1321 	if (ret) {
1322 		dev_warn(dev, "%s: Governor %s not started(%d)\n",
1323 			 __func__, df->governor->name, ret);
1324 		df->governor = prev_governor;
1325 		strncpy(df->governor_name, prev_governor->name,
1326 			DEVFREQ_NAME_LEN);
1327 		ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1328 		if (ret) {
1329 			dev_err(dev,
1330 				"%s: reverting to Governor %s failed (%d)\n",
1331 				__func__, df->governor_name, ret);
1332 			df->governor = NULL;
1333 		}
1334 	}
1335 out:
1336 	mutex_unlock(&devfreq_list_lock);
1337 
1338 	if (!ret)
1339 		ret = count;
1340 	return ret;
1341 }
1342 static DEVICE_ATTR_RW(governor);
1343 
1344 static ssize_t available_governors_show(struct device *d,
1345 					struct device_attribute *attr,
1346 					char *buf)
1347 {
1348 	struct devfreq *df = to_devfreq(d);
1349 	ssize_t count = 0;
1350 
1351 	mutex_lock(&devfreq_list_lock);
1352 
1353 	/*
1354 	 * The devfreq with immutable governor (e.g., passive) shows
1355 	 * only own governor.
1356 	 */
1357 	if (df->governor && df->governor->immutable) {
1358 		count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1359 				  "%s ", df->governor_name);
1360 	/*
1361 	 * The devfreq device shows the registered governor except for
1362 	 * immutable governors such as passive governor .
1363 	 */
1364 	} else {
1365 		struct devfreq_governor *governor;
1366 
1367 		list_for_each_entry(governor, &devfreq_governor_list, node) {
1368 			if (governor->immutable)
1369 				continue;
1370 			count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1371 					   "%s ", governor->name);
1372 		}
1373 	}
1374 
1375 	mutex_unlock(&devfreq_list_lock);
1376 
1377 	/* Truncate the trailing space */
1378 	if (count)
1379 		count--;
1380 
1381 	count += sprintf(&buf[count], "\n");
1382 
1383 	return count;
1384 }
1385 static DEVICE_ATTR_RO(available_governors);
1386 
1387 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1388 			     char *buf)
1389 {
1390 	unsigned long freq;
1391 	struct devfreq *devfreq = to_devfreq(dev);
1392 
1393 	if (devfreq->profile->get_cur_freq &&
1394 		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1395 		return sprintf(buf, "%lu\n", freq);
1396 
1397 	return sprintf(buf, "%lu\n", devfreq->previous_freq);
1398 }
1399 static DEVICE_ATTR_RO(cur_freq);
1400 
1401 static ssize_t target_freq_show(struct device *dev,
1402 				struct device_attribute *attr, char *buf)
1403 {
1404 	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1405 }
1406 static DEVICE_ATTR_RO(target_freq);
1407 
1408 static ssize_t polling_interval_show(struct device *dev,
1409 				     struct device_attribute *attr, char *buf)
1410 {
1411 	return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1412 }
1413 
1414 static ssize_t polling_interval_store(struct device *dev,
1415 				      struct device_attribute *attr,
1416 				      const char *buf, size_t count)
1417 {
1418 	struct devfreq *df = to_devfreq(dev);
1419 	unsigned int value;
1420 	int ret;
1421 
1422 	if (!df->governor)
1423 		return -EINVAL;
1424 
1425 	ret = sscanf(buf, "%u", &value);
1426 	if (ret != 1)
1427 		return -EINVAL;
1428 
1429 	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1430 	ret = count;
1431 
1432 	return ret;
1433 }
1434 static DEVICE_ATTR_RW(polling_interval);
1435 
1436 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1437 			      const char *buf, size_t count)
1438 {
1439 	struct devfreq *df = to_devfreq(dev);
1440 	unsigned long value;
1441 	int ret;
1442 
1443 	/*
1444 	 * Protect against theoretical sysfs writes between
1445 	 * device_add and dev_pm_qos_add_request
1446 	 */
1447 	if (!dev_pm_qos_request_active(&df->user_min_freq_req))
1448 		return -EAGAIN;
1449 
1450 	ret = sscanf(buf, "%lu", &value);
1451 	if (ret != 1)
1452 		return -EINVAL;
1453 
1454 	/* Round down to kHz for PM QoS */
1455 	ret = dev_pm_qos_update_request(&df->user_min_freq_req,
1456 					value / HZ_PER_KHZ);
1457 	if (ret < 0)
1458 		return ret;
1459 
1460 	return count;
1461 }
1462 
1463 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1464 			     char *buf)
1465 {
1466 	struct devfreq *df = to_devfreq(dev);
1467 	unsigned long min_freq, max_freq;
1468 
1469 	mutex_lock(&df->lock);
1470 	get_freq_range(df, &min_freq, &max_freq);
1471 	mutex_unlock(&df->lock);
1472 
1473 	return sprintf(buf, "%lu\n", min_freq);
1474 }
1475 static DEVICE_ATTR_RW(min_freq);
1476 
1477 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1478 			      const char *buf, size_t count)
1479 {
1480 	struct devfreq *df = to_devfreq(dev);
1481 	unsigned long value;
1482 	int ret;
1483 
1484 	/*
1485 	 * Protect against theoretical sysfs writes between
1486 	 * device_add and dev_pm_qos_add_request
1487 	 */
1488 	if (!dev_pm_qos_request_active(&df->user_max_freq_req))
1489 		return -EINVAL;
1490 
1491 	ret = sscanf(buf, "%lu", &value);
1492 	if (ret != 1)
1493 		return -EINVAL;
1494 
1495 	/*
1496 	 * PM QoS frequencies are in kHz so we need to convert. Convert by
1497 	 * rounding upwards so that the acceptable interval never shrinks.
1498 	 *
1499 	 * For example if the user writes "666666666" to sysfs this value will
1500 	 * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1501 	 * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1502 	 *
1503 	 * A value of zero means "no limit".
1504 	 */
1505 	if (value)
1506 		value = DIV_ROUND_UP(value, HZ_PER_KHZ);
1507 	else
1508 		value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
1509 
1510 	ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
1511 	if (ret < 0)
1512 		return ret;
1513 
1514 	return count;
1515 }
1516 
1517 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1518 			     char *buf)
1519 {
1520 	struct devfreq *df = to_devfreq(dev);
1521 	unsigned long min_freq, max_freq;
1522 
1523 	mutex_lock(&df->lock);
1524 	get_freq_range(df, &min_freq, &max_freq);
1525 	mutex_unlock(&df->lock);
1526 
1527 	return sprintf(buf, "%lu\n", max_freq);
1528 }
1529 static DEVICE_ATTR_RW(max_freq);
1530 
1531 static ssize_t available_frequencies_show(struct device *d,
1532 					  struct device_attribute *attr,
1533 					  char *buf)
1534 {
1535 	struct devfreq *df = to_devfreq(d);
1536 	ssize_t count = 0;
1537 	int i;
1538 
1539 	mutex_lock(&df->lock);
1540 
1541 	for (i = 0; i < df->profile->max_state; i++)
1542 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1543 				"%lu ", df->profile->freq_table[i]);
1544 
1545 	mutex_unlock(&df->lock);
1546 	/* Truncate the trailing space */
1547 	if (count)
1548 		count--;
1549 
1550 	count += sprintf(&buf[count], "\n");
1551 
1552 	return count;
1553 }
1554 static DEVICE_ATTR_RO(available_frequencies);
1555 
1556 static ssize_t trans_stat_show(struct device *dev,
1557 			       struct device_attribute *attr, char *buf)
1558 {
1559 	struct devfreq *devfreq = to_devfreq(dev);
1560 	ssize_t len;
1561 	int i, j;
1562 	unsigned int max_state = devfreq->profile->max_state;
1563 
1564 	if (max_state == 0)
1565 		return sprintf(buf, "Not Supported.\n");
1566 
1567 	mutex_lock(&devfreq->lock);
1568 	if (!devfreq->stop_polling &&
1569 			devfreq_update_status(devfreq, devfreq->previous_freq)) {
1570 		mutex_unlock(&devfreq->lock);
1571 		return 0;
1572 	}
1573 	mutex_unlock(&devfreq->lock);
1574 
1575 	len = sprintf(buf, "     From  :   To\n");
1576 	len += sprintf(buf + len, "           :");
1577 	for (i = 0; i < max_state; i++)
1578 		len += sprintf(buf + len, "%10lu",
1579 				devfreq->profile->freq_table[i]);
1580 
1581 	len += sprintf(buf + len, "   time(ms)\n");
1582 
1583 	for (i = 0; i < max_state; i++) {
1584 		if (devfreq->profile->freq_table[i]
1585 					== devfreq->previous_freq) {
1586 			len += sprintf(buf + len, "*");
1587 		} else {
1588 			len += sprintf(buf + len, " ");
1589 		}
1590 		len += sprintf(buf + len, "%10lu:",
1591 				devfreq->profile->freq_table[i]);
1592 		for (j = 0; j < max_state; j++)
1593 			len += sprintf(buf + len, "%10u",
1594 				devfreq->stats.trans_table[(i * max_state) + j]);
1595 
1596 		len += sprintf(buf + len, "%10llu\n", (u64)
1597 			jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
1598 	}
1599 
1600 	len += sprintf(buf + len, "Total transition : %u\n",
1601 					devfreq->stats.total_trans);
1602 	return len;
1603 }
1604 
1605 static ssize_t trans_stat_store(struct device *dev,
1606 				struct device_attribute *attr,
1607 				const char *buf, size_t count)
1608 {
1609 	struct devfreq *df = to_devfreq(dev);
1610 	int err, value;
1611 
1612 	if (df->profile->max_state == 0)
1613 		return count;
1614 
1615 	err = kstrtoint(buf, 10, &value);
1616 	if (err || value != 0)
1617 		return -EINVAL;
1618 
1619 	mutex_lock(&df->lock);
1620 	memset(df->stats.time_in_state, 0, (df->profile->max_state *
1621 					sizeof(*df->stats.time_in_state)));
1622 	memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
1623 					df->profile->max_state,
1624 					df->profile->max_state));
1625 	df->stats.total_trans = 0;
1626 	df->stats.last_update = get_jiffies_64();
1627 	mutex_unlock(&df->lock);
1628 
1629 	return count;
1630 }
1631 static DEVICE_ATTR_RW(trans_stat);
1632 
1633 static struct attribute *devfreq_attrs[] = {
1634 	&dev_attr_name.attr,
1635 	&dev_attr_governor.attr,
1636 	&dev_attr_available_governors.attr,
1637 	&dev_attr_cur_freq.attr,
1638 	&dev_attr_available_frequencies.attr,
1639 	&dev_attr_target_freq.attr,
1640 	&dev_attr_polling_interval.attr,
1641 	&dev_attr_min_freq.attr,
1642 	&dev_attr_max_freq.attr,
1643 	&dev_attr_trans_stat.attr,
1644 	NULL,
1645 };
1646 ATTRIBUTE_GROUPS(devfreq);
1647 
1648 /**
1649  * devfreq_summary_show() - Show the summary of the devfreq devices
1650  * @s:		seq_file instance to show the summary of devfreq devices
1651  * @data:	not used
1652  *
1653  * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1654  * It helps that user can know the detailed information of the devfreq devices.
1655  *
1656  * Return 0 always because it shows the information without any data change.
1657  */
1658 static int devfreq_summary_show(struct seq_file *s, void *data)
1659 {
1660 	struct devfreq *devfreq;
1661 	struct devfreq *p_devfreq = NULL;
1662 	unsigned long cur_freq, min_freq, max_freq;
1663 	unsigned int polling_ms;
1664 
1665 	seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
1666 			"dev_name",
1667 			"dev",
1668 			"parent_dev",
1669 			"governor",
1670 			"polling_ms",
1671 			"cur_freq_Hz",
1672 			"min_freq_Hz",
1673 			"max_freq_Hz");
1674 	seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
1675 			"------------------------------",
1676 			"----------",
1677 			"----------",
1678 			"---------------",
1679 			"----------",
1680 			"------------",
1681 			"------------",
1682 			"------------");
1683 
1684 	mutex_lock(&devfreq_list_lock);
1685 
1686 	list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
1687 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1688 		if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
1689 							DEVFREQ_NAME_LEN)) {
1690 			struct devfreq_passive_data *data = devfreq->data;
1691 
1692 			if (data)
1693 				p_devfreq = data->parent;
1694 		} else {
1695 			p_devfreq = NULL;
1696 		}
1697 #endif
1698 
1699 		mutex_lock(&devfreq->lock);
1700 		cur_freq = devfreq->previous_freq,
1701 		get_freq_range(devfreq, &min_freq, &max_freq);
1702 		polling_ms = devfreq->profile->polling_ms,
1703 		mutex_unlock(&devfreq->lock);
1704 
1705 		seq_printf(s,
1706 			"%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
1707 			dev_name(devfreq->dev.parent),
1708 			dev_name(&devfreq->dev),
1709 			p_devfreq ? dev_name(&p_devfreq->dev) : "null",
1710 			devfreq->governor_name,
1711 			polling_ms,
1712 			cur_freq,
1713 			min_freq,
1714 			max_freq);
1715 	}
1716 
1717 	mutex_unlock(&devfreq_list_lock);
1718 
1719 	return 0;
1720 }
1721 DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
1722 
1723 static int __init devfreq_init(void)
1724 {
1725 	devfreq_class = class_create(THIS_MODULE, "devfreq");
1726 	if (IS_ERR(devfreq_class)) {
1727 		pr_err("%s: couldn't create class\n", __FILE__);
1728 		return PTR_ERR(devfreq_class);
1729 	}
1730 
1731 	devfreq_wq = create_freezable_workqueue("devfreq_wq");
1732 	if (!devfreq_wq) {
1733 		class_destroy(devfreq_class);
1734 		pr_err("%s: couldn't create workqueue\n", __FILE__);
1735 		return -ENOMEM;
1736 	}
1737 	devfreq_class->dev_groups = devfreq_groups;
1738 
1739 	devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
1740 	debugfs_create_file("devfreq_summary", 0444,
1741 				devfreq_debugfs, NULL,
1742 				&devfreq_summary_fops);
1743 
1744 	return 0;
1745 }
1746 subsys_initcall(devfreq_init);
1747 
1748 /*
1749  * The following are helper functions for devfreq user device drivers with
1750  * OPP framework.
1751  */
1752 
1753 /**
1754  * devfreq_recommended_opp() - Helper function to get proper OPP for the
1755  *			     freq value given to target callback.
1756  * @dev:	The devfreq user device. (parent of devfreq)
1757  * @freq:	The frequency given to target function
1758  * @flags:	Flags handed from devfreq framework.
1759  *
1760  * The callers are required to call dev_pm_opp_put() for the returned OPP after
1761  * use.
1762  */
1763 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1764 					   unsigned long *freq,
1765 					   u32 flags)
1766 {
1767 	struct dev_pm_opp *opp;
1768 
1769 	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1770 		/* The freq is an upper bound. opp should be lower */
1771 		opp = dev_pm_opp_find_freq_floor(dev, freq);
1772 
1773 		/* If not available, use the closest opp */
1774 		if (opp == ERR_PTR(-ERANGE))
1775 			opp = dev_pm_opp_find_freq_ceil(dev, freq);
1776 	} else {
1777 		/* The freq is an lower bound. opp should be higher */
1778 		opp = dev_pm_opp_find_freq_ceil(dev, freq);
1779 
1780 		/* If not available, use the closest opp */
1781 		if (opp == ERR_PTR(-ERANGE))
1782 			opp = dev_pm_opp_find_freq_floor(dev, freq);
1783 	}
1784 
1785 	return opp;
1786 }
1787 EXPORT_SYMBOL(devfreq_recommended_opp);
1788 
1789 /**
1790  * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1791  *				     for any changes in the OPP availability
1792  *				     changes
1793  * @dev:	The devfreq user device. (parent of devfreq)
1794  * @devfreq:	The devfreq object.
1795  */
1796 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1797 {
1798 	return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1799 }
1800 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1801 
1802 /**
1803  * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1804  *				       notified for any changes in the OPP
1805  *				       availability changes anymore.
1806  * @dev:	The devfreq user device. (parent of devfreq)
1807  * @devfreq:	The devfreq object.
1808  *
1809  * At exit() callback of devfreq_dev_profile, this must be included if
1810  * devfreq_recommended_opp is used.
1811  */
1812 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1813 {
1814 	return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1815 }
1816 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1817 
1818 static void devm_devfreq_opp_release(struct device *dev, void *res)
1819 {
1820 	devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1821 }
1822 
1823 /**
1824  * devm_devfreq_register_opp_notifier() - Resource-managed
1825  *					  devfreq_register_opp_notifier()
1826  * @dev:	The devfreq user device. (parent of devfreq)
1827  * @devfreq:	The devfreq object.
1828  */
1829 int devm_devfreq_register_opp_notifier(struct device *dev,
1830 				       struct devfreq *devfreq)
1831 {
1832 	struct devfreq **ptr;
1833 	int ret;
1834 
1835 	ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1836 	if (!ptr)
1837 		return -ENOMEM;
1838 
1839 	ret = devfreq_register_opp_notifier(dev, devfreq);
1840 	if (ret) {
1841 		devres_free(ptr);
1842 		return ret;
1843 	}
1844 
1845 	*ptr = devfreq;
1846 	devres_add(dev, ptr);
1847 
1848 	return 0;
1849 }
1850 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1851 
1852 /**
1853  * devm_devfreq_unregister_opp_notifier() - Resource-managed
1854  *					    devfreq_unregister_opp_notifier()
1855  * @dev:	The devfreq user device. (parent of devfreq)
1856  * @devfreq:	The devfreq object.
1857  */
1858 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1859 					 struct devfreq *devfreq)
1860 {
1861 	WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1862 			       devm_devfreq_dev_match, devfreq));
1863 }
1864 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1865 
1866 /**
1867  * devfreq_register_notifier() - Register a driver with devfreq
1868  * @devfreq:	The devfreq object.
1869  * @nb:		The notifier block to register.
1870  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1871  */
1872 int devfreq_register_notifier(struct devfreq *devfreq,
1873 			      struct notifier_block *nb,
1874 			      unsigned int list)
1875 {
1876 	int ret = 0;
1877 
1878 	if (!devfreq)
1879 		return -EINVAL;
1880 
1881 	switch (list) {
1882 	case DEVFREQ_TRANSITION_NOTIFIER:
1883 		ret = srcu_notifier_chain_register(
1884 				&devfreq->transition_notifier_list, nb);
1885 		break;
1886 	default:
1887 		ret = -EINVAL;
1888 	}
1889 
1890 	return ret;
1891 }
1892 EXPORT_SYMBOL(devfreq_register_notifier);
1893 
1894 /*
1895  * devfreq_unregister_notifier() - Unregister a driver with devfreq
1896  * @devfreq:	The devfreq object.
1897  * @nb:		The notifier block to be unregistered.
1898  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1899  */
1900 int devfreq_unregister_notifier(struct devfreq *devfreq,
1901 				struct notifier_block *nb,
1902 				unsigned int list)
1903 {
1904 	int ret = 0;
1905 
1906 	if (!devfreq)
1907 		return -EINVAL;
1908 
1909 	switch (list) {
1910 	case DEVFREQ_TRANSITION_NOTIFIER:
1911 		ret = srcu_notifier_chain_unregister(
1912 				&devfreq->transition_notifier_list, nb);
1913 		break;
1914 	default:
1915 		ret = -EINVAL;
1916 	}
1917 
1918 	return ret;
1919 }
1920 EXPORT_SYMBOL(devfreq_unregister_notifier);
1921 
1922 struct devfreq_notifier_devres {
1923 	struct devfreq *devfreq;
1924 	struct notifier_block *nb;
1925 	unsigned int list;
1926 };
1927 
1928 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1929 {
1930 	struct devfreq_notifier_devres *this = res;
1931 
1932 	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1933 }
1934 
1935 /**
1936  * devm_devfreq_register_notifier()
1937  *	- Resource-managed devfreq_register_notifier()
1938  * @dev:	The devfreq user device. (parent of devfreq)
1939  * @devfreq:	The devfreq object.
1940  * @nb:		The notifier block to be unregistered.
1941  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1942  */
1943 int devm_devfreq_register_notifier(struct device *dev,
1944 				struct devfreq *devfreq,
1945 				struct notifier_block *nb,
1946 				unsigned int list)
1947 {
1948 	struct devfreq_notifier_devres *ptr;
1949 	int ret;
1950 
1951 	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1952 				GFP_KERNEL);
1953 	if (!ptr)
1954 		return -ENOMEM;
1955 
1956 	ret = devfreq_register_notifier(devfreq, nb, list);
1957 	if (ret) {
1958 		devres_free(ptr);
1959 		return ret;
1960 	}
1961 
1962 	ptr->devfreq = devfreq;
1963 	ptr->nb = nb;
1964 	ptr->list = list;
1965 	devres_add(dev, ptr);
1966 
1967 	return 0;
1968 }
1969 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1970 
1971 /**
1972  * devm_devfreq_unregister_notifier()
1973  *	- Resource-managed devfreq_unregister_notifier()
1974  * @dev:	The devfreq user device. (parent of devfreq)
1975  * @devfreq:	The devfreq object.
1976  * @nb:		The notifier block to be unregistered.
1977  * @list:	DEVFREQ_TRANSITION_NOTIFIER.
1978  */
1979 void devm_devfreq_unregister_notifier(struct device *dev,
1980 				      struct devfreq *devfreq,
1981 				      struct notifier_block *nb,
1982 				      unsigned int list)
1983 {
1984 	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1985 			       devm_devfreq_dev_match, devfreq));
1986 }
1987 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
1988