1 /*
2  *  drivers/cpufreq/cpufreq_conservative.c
3  *
4  *  Copyright (C)  2001 Russell King
5  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6  *                      Jun Nakajima <jun.nakajima@intel.com>
7  *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/slab.h>
15 #include "cpufreq_governor.h"
16 
17 struct cs_policy_dbs_info {
18 	struct policy_dbs_info policy_dbs;
19 	unsigned int down_skip;
20 	unsigned int requested_freq;
21 };
22 
23 static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
24 {
25 	return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
26 }
27 
28 struct cs_dbs_tuners {
29 	unsigned int down_threshold;
30 	unsigned int freq_step;
31 };
32 
33 /* Conservative governor macros */
34 #define DEF_FREQUENCY_UP_THRESHOLD		(80)
35 #define DEF_FREQUENCY_DOWN_THRESHOLD		(20)
36 #define DEF_FREQUENCY_STEP			(5)
37 #define DEF_SAMPLING_DOWN_FACTOR		(1)
38 #define MAX_SAMPLING_DOWN_FACTOR		(10)
39 
40 static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
41 					   struct cpufreq_policy *policy)
42 {
43 	unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
44 
45 	/* max freq cannot be less than 100. But who knows... */
46 	if (unlikely(freq_target == 0))
47 		freq_target = DEF_FREQUENCY_STEP;
48 
49 	return freq_target;
50 }
51 
52 /*
53  * Every sampling_rate, we check, if current idle time is less than 20%
54  * (default), then we try to increase frequency. Every sampling_rate *
55  * sampling_down_factor, we check, if current idle time is more than 80%
56  * (default), then we try to decrease frequency
57  *
58  * Any frequency increase takes it to the maximum frequency. Frequency reduction
59  * happens at minimum steps of 5% (default) of maximum frequency
60  */
61 static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
62 {
63 	struct policy_dbs_info *policy_dbs = policy->governor_data;
64 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
65 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
66 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
67 	unsigned int load = dbs_update(policy);
68 
69 	/*
70 	 * break out if we 'cannot' reduce the speed as the user might
71 	 * want freq_step to be zero
72 	 */
73 	if (cs_tuners->freq_step == 0)
74 		goto out;
75 
76 	/* Check for frequency increase */
77 	if (load > dbs_data->up_threshold) {
78 		dbs_info->down_skip = 0;
79 
80 		/* if we are already at full speed then break out early */
81 		if (dbs_info->requested_freq == policy->max)
82 			goto out;
83 
84 		dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
85 
86 		if (dbs_info->requested_freq > policy->max)
87 			dbs_info->requested_freq = policy->max;
88 
89 		__cpufreq_driver_target(policy, dbs_info->requested_freq,
90 			CPUFREQ_RELATION_H);
91 		goto out;
92 	}
93 
94 	/* if sampling_down_factor is active break out early */
95 	if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
96 		goto out;
97 	dbs_info->down_skip = 0;
98 
99 	/* Check for frequency decrease */
100 	if (load < cs_tuners->down_threshold) {
101 		unsigned int freq_target;
102 		/*
103 		 * if we cannot reduce the frequency anymore, break out early
104 		 */
105 		if (policy->cur == policy->min)
106 			goto out;
107 
108 		freq_target = get_freq_target(cs_tuners, policy);
109 		if (dbs_info->requested_freq > freq_target)
110 			dbs_info->requested_freq -= freq_target;
111 		else
112 			dbs_info->requested_freq = policy->min;
113 
114 		__cpufreq_driver_target(policy, dbs_info->requested_freq,
115 				CPUFREQ_RELATION_L);
116 	}
117 
118  out:
119 	return dbs_data->sampling_rate;
120 }
121 
122 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
123 				void *data);
124 
125 static struct notifier_block cs_cpufreq_notifier_block = {
126 	.notifier_call = dbs_cpufreq_notifier,
127 };
128 
129 /************************** sysfs interface ************************/
130 static struct dbs_governor cs_dbs_gov;
131 
132 static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
133 		const char *buf, size_t count)
134 {
135 	unsigned int input;
136 	int ret;
137 	ret = sscanf(buf, "%u", &input);
138 
139 	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
140 		return -EINVAL;
141 
142 	dbs_data->sampling_down_factor = input;
143 	return count;
144 }
145 
146 static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
147 		size_t count)
148 {
149 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
150 	unsigned int input;
151 	int ret;
152 	ret = sscanf(buf, "%u", &input);
153 
154 	if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
155 		return -EINVAL;
156 
157 	dbs_data->up_threshold = input;
158 	return count;
159 }
160 
161 static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
162 		size_t count)
163 {
164 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
165 	unsigned int input;
166 	int ret;
167 	ret = sscanf(buf, "%u", &input);
168 
169 	/* cannot be lower than 11 otherwise freq will not fall */
170 	if (ret != 1 || input < 11 || input > 100 ||
171 			input >= dbs_data->up_threshold)
172 		return -EINVAL;
173 
174 	cs_tuners->down_threshold = input;
175 	return count;
176 }
177 
178 static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
179 		const char *buf, size_t count)
180 {
181 	unsigned int input;
182 	int ret;
183 
184 	ret = sscanf(buf, "%u", &input);
185 	if (ret != 1)
186 		return -EINVAL;
187 
188 	if (input > 1)
189 		input = 1;
190 
191 	if (input == dbs_data->ignore_nice_load) /* nothing to do */
192 		return count;
193 
194 	dbs_data->ignore_nice_load = input;
195 
196 	/* we need to re-evaluate prev_cpu_idle */
197 	gov_update_cpu_data(dbs_data);
198 
199 	return count;
200 }
201 
202 static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
203 		size_t count)
204 {
205 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
206 	unsigned int input;
207 	int ret;
208 	ret = sscanf(buf, "%u", &input);
209 
210 	if (ret != 1)
211 		return -EINVAL;
212 
213 	if (input > 100)
214 		input = 100;
215 
216 	/*
217 	 * no need to test here if freq_step is zero as the user might actually
218 	 * want this, they would be crazy though :)
219 	 */
220 	cs_tuners->freq_step = input;
221 	return count;
222 }
223 
224 gov_show_one_common(sampling_rate);
225 gov_show_one_common(sampling_down_factor);
226 gov_show_one_common(up_threshold);
227 gov_show_one_common(ignore_nice_load);
228 gov_show_one_common(min_sampling_rate);
229 gov_show_one(cs, down_threshold);
230 gov_show_one(cs, freq_step);
231 
232 gov_attr_rw(sampling_rate);
233 gov_attr_rw(sampling_down_factor);
234 gov_attr_rw(up_threshold);
235 gov_attr_rw(ignore_nice_load);
236 gov_attr_ro(min_sampling_rate);
237 gov_attr_rw(down_threshold);
238 gov_attr_rw(freq_step);
239 
240 static struct attribute *cs_attributes[] = {
241 	&min_sampling_rate.attr,
242 	&sampling_rate.attr,
243 	&sampling_down_factor.attr,
244 	&up_threshold.attr,
245 	&down_threshold.attr,
246 	&ignore_nice_load.attr,
247 	&freq_step.attr,
248 	NULL
249 };
250 
251 /************************** sysfs end ************************/
252 
253 static struct policy_dbs_info *cs_alloc(void)
254 {
255 	struct cs_policy_dbs_info *dbs_info;
256 
257 	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
258 	return dbs_info ? &dbs_info->policy_dbs : NULL;
259 }
260 
261 static void cs_free(struct policy_dbs_info *policy_dbs)
262 {
263 	kfree(to_dbs_info(policy_dbs));
264 }
265 
266 static int cs_init(struct dbs_data *dbs_data, bool notify)
267 {
268 	struct cs_dbs_tuners *tuners;
269 
270 	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
271 	if (!tuners) {
272 		pr_err("%s: kzalloc failed\n", __func__);
273 		return -ENOMEM;
274 	}
275 
276 	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
277 	tuners->freq_step = DEF_FREQUENCY_STEP;
278 	dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
279 	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
280 	dbs_data->ignore_nice_load = 0;
281 
282 	dbs_data->tuners = tuners;
283 	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
284 		jiffies_to_usecs(10);
285 
286 	if (notify)
287 		cpufreq_register_notifier(&cs_cpufreq_notifier_block,
288 					  CPUFREQ_TRANSITION_NOTIFIER);
289 
290 	return 0;
291 }
292 
293 static void cs_exit(struct dbs_data *dbs_data, bool notify)
294 {
295 	if (notify)
296 		cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
297 					    CPUFREQ_TRANSITION_NOTIFIER);
298 
299 	kfree(dbs_data->tuners);
300 }
301 
302 static void cs_start(struct cpufreq_policy *policy)
303 {
304 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
305 
306 	dbs_info->down_skip = 0;
307 	dbs_info->requested_freq = policy->cur;
308 }
309 
310 static struct dbs_governor cs_dbs_gov = {
311 	.gov = {
312 		.name = "conservative",
313 		.governor = cpufreq_governor_dbs,
314 		.max_transition_latency = TRANSITION_LATENCY_LIMIT,
315 		.owner = THIS_MODULE,
316 	},
317 	.kobj_type = { .default_attrs = cs_attributes },
318 	.gov_dbs_timer = cs_dbs_timer,
319 	.alloc = cs_alloc,
320 	.free = cs_free,
321 	.init = cs_init,
322 	.exit = cs_exit,
323 	.start = cs_start,
324 };
325 
326 #define CPU_FREQ_GOV_CONSERVATIVE	(&cs_dbs_gov.gov)
327 
328 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
329 				void *data)
330 {
331 	struct cpufreq_freqs *freq = data;
332 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
333 	struct cs_policy_dbs_info *dbs_info;
334 
335 	if (!policy)
336 		return 0;
337 
338 	/* policy isn't governed by conservative governor */
339 	if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
340 		return 0;
341 
342 	dbs_info = to_dbs_info(policy->governor_data);
343 	/*
344 	 * we only care if our internally tracked freq moves outside the 'valid'
345 	 * ranges of frequency available to us otherwise we do not change it
346 	*/
347 	if (dbs_info->requested_freq > policy->max
348 			|| dbs_info->requested_freq < policy->min)
349 		dbs_info->requested_freq = freq->new;
350 
351 	return 0;
352 }
353 
354 static int __init cpufreq_gov_dbs_init(void)
355 {
356 	return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE);
357 }
358 
359 static void __exit cpufreq_gov_dbs_exit(void)
360 {
361 	cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE);
362 }
363 
364 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
365 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
366 		"Low Latency Frequency Transition capable processors "
367 		"optimised for use in a battery environment");
368 MODULE_LICENSE("GPL");
369 
370 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
371 struct cpufreq_governor *cpufreq_default_governor(void)
372 {
373 	return CPU_FREQ_GOV_CONSERVATIVE;
374 }
375 
376 fs_initcall(cpufreq_gov_dbs_init);
377 #else
378 module_init(cpufreq_gov_dbs_init);
379 #endif
380 module_exit(cpufreq_gov_dbs_exit);
381