1 /*
2  *  drivers/cpufreq/cpufreq_conservative.c
3  *
4  *  Copyright (C)  2001 Russell King
5  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6  *                      Jun Nakajima <jun.nakajima@intel.com>
7  *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/slab.h>
15 #include "cpufreq_governor.h"
16 
17 struct cs_policy_dbs_info {
18 	struct policy_dbs_info policy_dbs;
19 	unsigned int down_skip;
20 	unsigned int requested_freq;
21 };
22 
23 static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
24 {
25 	return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
26 }
27 
28 struct cs_dbs_tuners {
29 	unsigned int down_threshold;
30 	unsigned int freq_step;
31 };
32 
33 /* Conservative governor macros */
34 #define DEF_FREQUENCY_UP_THRESHOLD		(80)
35 #define DEF_FREQUENCY_DOWN_THRESHOLD		(20)
36 #define DEF_FREQUENCY_STEP			(5)
37 #define DEF_SAMPLING_DOWN_FACTOR		(1)
38 #define MAX_SAMPLING_DOWN_FACTOR		(10)
39 
40 static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
41 					   struct cpufreq_policy *policy)
42 {
43 	unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
44 
45 	/* max freq cannot be less than 100. But who knows... */
46 	if (unlikely(freq_target == 0))
47 		freq_target = DEF_FREQUENCY_STEP;
48 
49 	return freq_target;
50 }
51 
52 /*
53  * Every sampling_rate, we check, if current idle time is less than 20%
54  * (default), then we try to increase frequency. Every sampling_rate *
55  * sampling_down_factor, we check, if current idle time is more than 80%
56  * (default), then we try to decrease frequency
57  *
58  * Any frequency increase takes it to the maximum frequency. Frequency reduction
59  * happens at minimum steps of 5% (default) of maximum frequency
60  */
61 static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
62 {
63 	struct policy_dbs_info *policy_dbs = policy->governor_data;
64 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
65 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
66 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
67 	unsigned int load = dbs_update(policy);
68 
69 	/*
70 	 * break out if we 'cannot' reduce the speed as the user might
71 	 * want freq_step to be zero
72 	 */
73 	if (cs_tuners->freq_step == 0)
74 		goto out;
75 
76 	/* Check for frequency increase */
77 	if (load > dbs_data->up_threshold) {
78 		dbs_info->down_skip = 0;
79 
80 		/* if we are already at full speed then break out early */
81 		if (dbs_info->requested_freq == policy->max)
82 			goto out;
83 
84 		dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
85 
86 		if (dbs_info->requested_freq > policy->max)
87 			dbs_info->requested_freq = policy->max;
88 
89 		__cpufreq_driver_target(policy, dbs_info->requested_freq,
90 			CPUFREQ_RELATION_H);
91 		goto out;
92 	}
93 
94 	/* if sampling_down_factor is active break out early */
95 	if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
96 		goto out;
97 	dbs_info->down_skip = 0;
98 
99 	/* Check for frequency decrease */
100 	if (load < cs_tuners->down_threshold) {
101 		unsigned int freq_target;
102 		/*
103 		 * if we cannot reduce the frequency anymore, break out early
104 		 */
105 		if (policy->cur == policy->min)
106 			goto out;
107 
108 		freq_target = get_freq_target(cs_tuners, policy);
109 		if (dbs_info->requested_freq > freq_target)
110 			dbs_info->requested_freq -= freq_target;
111 		else
112 			dbs_info->requested_freq = policy->min;
113 
114 		__cpufreq_driver_target(policy, dbs_info->requested_freq,
115 				CPUFREQ_RELATION_L);
116 	}
117 
118  out:
119 	return dbs_data->sampling_rate;
120 }
121 
122 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
123 				void *data);
124 
125 static struct notifier_block cs_cpufreq_notifier_block = {
126 	.notifier_call = dbs_cpufreq_notifier,
127 };
128 
129 /************************** sysfs interface ************************/
130 static struct dbs_governor cs_dbs_gov;
131 
132 static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
133 					  const char *buf, size_t count)
134 {
135 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
136 	unsigned int input;
137 	int ret;
138 	ret = sscanf(buf, "%u", &input);
139 
140 	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
141 		return -EINVAL;
142 
143 	dbs_data->sampling_down_factor = input;
144 	return count;
145 }
146 
147 static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
148 				  const char *buf, size_t count)
149 {
150 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
151 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
152 	unsigned int input;
153 	int ret;
154 	ret = sscanf(buf, "%u", &input);
155 
156 	if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
157 		return -EINVAL;
158 
159 	dbs_data->up_threshold = input;
160 	return count;
161 }
162 
163 static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
164 				    const char *buf, size_t count)
165 {
166 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
167 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
168 	unsigned int input;
169 	int ret;
170 	ret = sscanf(buf, "%u", &input);
171 
172 	/* cannot be lower than 11 otherwise freq will not fall */
173 	if (ret != 1 || input < 11 || input > 100 ||
174 			input >= dbs_data->up_threshold)
175 		return -EINVAL;
176 
177 	cs_tuners->down_threshold = input;
178 	return count;
179 }
180 
181 static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
182 				      const char *buf, size_t count)
183 {
184 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
185 	unsigned int input;
186 	int ret;
187 
188 	ret = sscanf(buf, "%u", &input);
189 	if (ret != 1)
190 		return -EINVAL;
191 
192 	if (input > 1)
193 		input = 1;
194 
195 	if (input == dbs_data->ignore_nice_load) /* nothing to do */
196 		return count;
197 
198 	dbs_data->ignore_nice_load = input;
199 
200 	/* we need to re-evaluate prev_cpu_idle */
201 	gov_update_cpu_data(dbs_data);
202 
203 	return count;
204 }
205 
206 static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
207 			       size_t count)
208 {
209 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
210 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
211 	unsigned int input;
212 	int ret;
213 	ret = sscanf(buf, "%u", &input);
214 
215 	if (ret != 1)
216 		return -EINVAL;
217 
218 	if (input > 100)
219 		input = 100;
220 
221 	/*
222 	 * no need to test here if freq_step is zero as the user might actually
223 	 * want this, they would be crazy though :)
224 	 */
225 	cs_tuners->freq_step = input;
226 	return count;
227 }
228 
229 gov_show_one_common(sampling_rate);
230 gov_show_one_common(sampling_down_factor);
231 gov_show_one_common(up_threshold);
232 gov_show_one_common(ignore_nice_load);
233 gov_show_one_common(min_sampling_rate);
234 gov_show_one(cs, down_threshold);
235 gov_show_one(cs, freq_step);
236 
237 gov_attr_rw(sampling_rate);
238 gov_attr_rw(sampling_down_factor);
239 gov_attr_rw(up_threshold);
240 gov_attr_rw(ignore_nice_load);
241 gov_attr_ro(min_sampling_rate);
242 gov_attr_rw(down_threshold);
243 gov_attr_rw(freq_step);
244 
245 static struct attribute *cs_attributes[] = {
246 	&min_sampling_rate.attr,
247 	&sampling_rate.attr,
248 	&sampling_down_factor.attr,
249 	&up_threshold.attr,
250 	&down_threshold.attr,
251 	&ignore_nice_load.attr,
252 	&freq_step.attr,
253 	NULL
254 };
255 
256 /************************** sysfs end ************************/
257 
258 static struct policy_dbs_info *cs_alloc(void)
259 {
260 	struct cs_policy_dbs_info *dbs_info;
261 
262 	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
263 	return dbs_info ? &dbs_info->policy_dbs : NULL;
264 }
265 
266 static void cs_free(struct policy_dbs_info *policy_dbs)
267 {
268 	kfree(to_dbs_info(policy_dbs));
269 }
270 
271 static int cs_init(struct dbs_data *dbs_data, bool notify)
272 {
273 	struct cs_dbs_tuners *tuners;
274 
275 	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
276 	if (!tuners) {
277 		pr_err("%s: kzalloc failed\n", __func__);
278 		return -ENOMEM;
279 	}
280 
281 	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
282 	tuners->freq_step = DEF_FREQUENCY_STEP;
283 	dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
284 	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
285 	dbs_data->ignore_nice_load = 0;
286 
287 	dbs_data->tuners = tuners;
288 	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
289 		jiffies_to_usecs(10);
290 
291 	if (notify)
292 		cpufreq_register_notifier(&cs_cpufreq_notifier_block,
293 					  CPUFREQ_TRANSITION_NOTIFIER);
294 
295 	return 0;
296 }
297 
298 static void cs_exit(struct dbs_data *dbs_data, bool notify)
299 {
300 	if (notify)
301 		cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
302 					    CPUFREQ_TRANSITION_NOTIFIER);
303 
304 	kfree(dbs_data->tuners);
305 }
306 
307 static void cs_start(struct cpufreq_policy *policy)
308 {
309 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
310 
311 	dbs_info->down_skip = 0;
312 	dbs_info->requested_freq = policy->cur;
313 }
314 
315 static struct dbs_governor cs_dbs_gov = {
316 	.gov = {
317 		.name = "conservative",
318 		.governor = cpufreq_governor_dbs,
319 		.max_transition_latency = TRANSITION_LATENCY_LIMIT,
320 		.owner = THIS_MODULE,
321 	},
322 	.kobj_type = { .default_attrs = cs_attributes },
323 	.gov_dbs_timer = cs_dbs_timer,
324 	.alloc = cs_alloc,
325 	.free = cs_free,
326 	.init = cs_init,
327 	.exit = cs_exit,
328 	.start = cs_start,
329 };
330 
331 #define CPU_FREQ_GOV_CONSERVATIVE	(&cs_dbs_gov.gov)
332 
333 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
334 				void *data)
335 {
336 	struct cpufreq_freqs *freq = data;
337 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
338 	struct cs_policy_dbs_info *dbs_info;
339 
340 	if (!policy)
341 		return 0;
342 
343 	/* policy isn't governed by conservative governor */
344 	if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
345 		return 0;
346 
347 	dbs_info = to_dbs_info(policy->governor_data);
348 	/*
349 	 * we only care if our internally tracked freq moves outside the 'valid'
350 	 * ranges of frequency available to us otherwise we do not change it
351 	*/
352 	if (dbs_info->requested_freq > policy->max
353 			|| dbs_info->requested_freq < policy->min)
354 		dbs_info->requested_freq = freq->new;
355 
356 	return 0;
357 }
358 
359 static int __init cpufreq_gov_dbs_init(void)
360 {
361 	return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE);
362 }
363 
364 static void __exit cpufreq_gov_dbs_exit(void)
365 {
366 	cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE);
367 }
368 
369 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
370 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
371 		"Low Latency Frequency Transition capable processors "
372 		"optimised for use in a battery environment");
373 MODULE_LICENSE("GPL");
374 
375 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
376 struct cpufreq_governor *cpufreq_default_governor(void)
377 {
378 	return CPU_FREQ_GOV_CONSERVATIVE;
379 }
380 
381 fs_initcall(cpufreq_gov_dbs_init);
382 #else
383 module_init(cpufreq_gov_dbs_init);
384 #endif
385 module_exit(cpufreq_gov_dbs_exit);
386