1 /* 2 * drivers/cpufreq/cpufreq_conservative.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/slab.h> 15 #include "cpufreq_governor.h" 16 17 /* Conservative governor macros */ 18 #define DEF_FREQUENCY_UP_THRESHOLD (80) 19 #define DEF_FREQUENCY_DOWN_THRESHOLD (20) 20 #define DEF_FREQUENCY_STEP (5) 21 #define DEF_SAMPLING_DOWN_FACTOR (1) 22 #define MAX_SAMPLING_DOWN_FACTOR (10) 23 24 static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); 25 26 static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, 27 struct cpufreq_policy *policy) 28 { 29 unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100; 30 31 /* max freq cannot be less than 100. But who knows... */ 32 if (unlikely(freq_target == 0)) 33 freq_target = DEF_FREQUENCY_STEP; 34 35 return freq_target; 36 } 37 38 /* 39 * Every sampling_rate, we check, if current idle time is less than 20% 40 * (default), then we try to increase frequency. Every sampling_rate * 41 * sampling_down_factor, we check, if current idle time is more than 80% 42 * (default), then we try to decrease frequency 43 * 44 * Any frequency increase takes it to the maximum frequency. Frequency reduction 45 * happens at minimum steps of 5% (default) of maximum frequency 46 */ 47 static void cs_check_cpu(int cpu, unsigned int load) 48 { 49 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); 50 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 51 struct dbs_data *dbs_data = policy->governor_data; 52 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 53 54 /* 55 * break out if we 'cannot' reduce the speed as the user might 56 * want freq_step to be zero 57 */ 58 if (cs_tuners->freq_step == 0) 59 return; 60 61 /* Check for frequency increase */ 62 if (load > cs_tuners->up_threshold) { 63 dbs_info->down_skip = 0; 64 65 /* if we are already at full speed then break out early */ 66 if (dbs_info->requested_freq == policy->max) 67 return; 68 69 dbs_info->requested_freq += get_freq_target(cs_tuners, policy); 70 71 __cpufreq_driver_target(policy, dbs_info->requested_freq, 72 CPUFREQ_RELATION_H); 73 return; 74 } 75 76 /* if sampling_down_factor is active break out early */ 77 if (++dbs_info->down_skip < cs_tuners->sampling_down_factor) 78 return; 79 dbs_info->down_skip = 0; 80 81 /* Check for frequency decrease */ 82 if (load < cs_tuners->down_threshold) { 83 unsigned int freq_target; 84 /* 85 * if we cannot reduce the frequency anymore, break out early 86 */ 87 if (policy->cur == policy->min) 88 return; 89 90 freq_target = get_freq_target(cs_tuners, policy); 91 if (dbs_info->requested_freq > freq_target) 92 dbs_info->requested_freq -= freq_target; 93 else 94 dbs_info->requested_freq = policy->min; 95 96 __cpufreq_driver_target(policy, dbs_info->requested_freq, 97 CPUFREQ_RELATION_L); 98 return; 99 } 100 } 101 102 static void cs_dbs_timer(struct work_struct *work) 103 { 104 struct cs_cpu_dbs_info_s *dbs_info = container_of(work, 105 struct cs_cpu_dbs_info_s, cdbs.work.work); 106 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 107 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, 108 cpu); 109 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; 110 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 111 int delay = delay_for_sampling_rate(cs_tuners->sampling_rate); 112 bool modify_all = true; 113 114 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 115 if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) 116 modify_all = false; 117 else 118 dbs_check_cpu(dbs_data, cpu); 119 120 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); 121 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 122 } 123 124 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 125 void *data) 126 { 127 struct cpufreq_freqs *freq = data; 128 struct cs_cpu_dbs_info_s *dbs_info = 129 &per_cpu(cs_cpu_dbs_info, freq->cpu); 130 struct cpufreq_policy *policy; 131 132 if (!dbs_info->enable) 133 return 0; 134 135 policy = dbs_info->cdbs.cur_policy; 136 137 /* 138 * we only care if our internally tracked freq moves outside the 'valid' 139 * ranges of frequency available to us otherwise we do not change it 140 */ 141 if (dbs_info->requested_freq > policy->max 142 || dbs_info->requested_freq < policy->min) 143 dbs_info->requested_freq = freq->new; 144 145 return 0; 146 } 147 148 /************************** sysfs interface ************************/ 149 static struct common_dbs_data cs_dbs_cdata; 150 151 static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, 152 const char *buf, size_t count) 153 { 154 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 155 unsigned int input; 156 int ret; 157 ret = sscanf(buf, "%u", &input); 158 159 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 160 return -EINVAL; 161 162 cs_tuners->sampling_down_factor = input; 163 return count; 164 } 165 166 static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, 167 size_t count) 168 { 169 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 170 unsigned int input; 171 int ret; 172 ret = sscanf(buf, "%u", &input); 173 174 if (ret != 1) 175 return -EINVAL; 176 177 cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate); 178 return count; 179 } 180 181 static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, 182 size_t count) 183 { 184 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 185 unsigned int input; 186 int ret; 187 ret = sscanf(buf, "%u", &input); 188 189 if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold) 190 return -EINVAL; 191 192 cs_tuners->up_threshold = input; 193 return count; 194 } 195 196 static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, 197 size_t count) 198 { 199 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 200 unsigned int input; 201 int ret; 202 ret = sscanf(buf, "%u", &input); 203 204 /* cannot be lower than 11 otherwise freq will not fall */ 205 if (ret != 1 || input < 11 || input > 100 || 206 input >= cs_tuners->up_threshold) 207 return -EINVAL; 208 209 cs_tuners->down_threshold = input; 210 return count; 211 } 212 213 static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, 214 const char *buf, size_t count) 215 { 216 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 217 unsigned int input, j; 218 int ret; 219 220 ret = sscanf(buf, "%u", &input); 221 if (ret != 1) 222 return -EINVAL; 223 224 if (input > 1) 225 input = 1; 226 227 if (input == cs_tuners->ignore_nice_load) /* nothing to do */ 228 return count; 229 230 cs_tuners->ignore_nice_load = input; 231 232 /* we need to re-evaluate prev_cpu_idle */ 233 for_each_online_cpu(j) { 234 struct cs_cpu_dbs_info_s *dbs_info; 235 dbs_info = &per_cpu(cs_cpu_dbs_info, j); 236 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 237 &dbs_info->cdbs.prev_cpu_wall, 0); 238 if (cs_tuners->ignore_nice_load) 239 dbs_info->cdbs.prev_cpu_nice = 240 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 241 } 242 return count; 243 } 244 245 static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf, 246 size_t count) 247 { 248 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 249 unsigned int input; 250 int ret; 251 ret = sscanf(buf, "%u", &input); 252 253 if (ret != 1) 254 return -EINVAL; 255 256 if (input > 100) 257 input = 100; 258 259 /* 260 * no need to test here if freq_step is zero as the user might actually 261 * want this, they would be crazy though :) 262 */ 263 cs_tuners->freq_step = input; 264 return count; 265 } 266 267 show_store_one(cs, sampling_rate); 268 show_store_one(cs, sampling_down_factor); 269 show_store_one(cs, up_threshold); 270 show_store_one(cs, down_threshold); 271 show_store_one(cs, ignore_nice_load); 272 show_store_one(cs, freq_step); 273 declare_show_sampling_rate_min(cs); 274 275 gov_sys_pol_attr_rw(sampling_rate); 276 gov_sys_pol_attr_rw(sampling_down_factor); 277 gov_sys_pol_attr_rw(up_threshold); 278 gov_sys_pol_attr_rw(down_threshold); 279 gov_sys_pol_attr_rw(ignore_nice_load); 280 gov_sys_pol_attr_rw(freq_step); 281 gov_sys_pol_attr_ro(sampling_rate_min); 282 283 static struct attribute *dbs_attributes_gov_sys[] = { 284 &sampling_rate_min_gov_sys.attr, 285 &sampling_rate_gov_sys.attr, 286 &sampling_down_factor_gov_sys.attr, 287 &up_threshold_gov_sys.attr, 288 &down_threshold_gov_sys.attr, 289 &ignore_nice_load_gov_sys.attr, 290 &freq_step_gov_sys.attr, 291 NULL 292 }; 293 294 static struct attribute_group cs_attr_group_gov_sys = { 295 .attrs = dbs_attributes_gov_sys, 296 .name = "conservative", 297 }; 298 299 static struct attribute *dbs_attributes_gov_pol[] = { 300 &sampling_rate_min_gov_pol.attr, 301 &sampling_rate_gov_pol.attr, 302 &sampling_down_factor_gov_pol.attr, 303 &up_threshold_gov_pol.attr, 304 &down_threshold_gov_pol.attr, 305 &ignore_nice_load_gov_pol.attr, 306 &freq_step_gov_pol.attr, 307 NULL 308 }; 309 310 static struct attribute_group cs_attr_group_gov_pol = { 311 .attrs = dbs_attributes_gov_pol, 312 .name = "conservative", 313 }; 314 315 /************************** sysfs end ************************/ 316 317 static int cs_init(struct dbs_data *dbs_data) 318 { 319 struct cs_dbs_tuners *tuners; 320 321 tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); 322 if (!tuners) { 323 pr_err("%s: kzalloc failed\n", __func__); 324 return -ENOMEM; 325 } 326 327 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; 328 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; 329 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; 330 tuners->ignore_nice_load = 0; 331 tuners->freq_step = DEF_FREQUENCY_STEP; 332 333 dbs_data->tuners = tuners; 334 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * 335 jiffies_to_usecs(10); 336 mutex_init(&dbs_data->mutex); 337 return 0; 338 } 339 340 static void cs_exit(struct dbs_data *dbs_data) 341 { 342 kfree(dbs_data->tuners); 343 } 344 345 define_get_cpu_dbs_routines(cs_cpu_dbs_info); 346 347 static struct notifier_block cs_cpufreq_notifier_block = { 348 .notifier_call = dbs_cpufreq_notifier, 349 }; 350 351 static struct cs_ops cs_ops = { 352 .notifier_block = &cs_cpufreq_notifier_block, 353 }; 354 355 static struct common_dbs_data cs_dbs_cdata = { 356 .governor = GOV_CONSERVATIVE, 357 .attr_group_gov_sys = &cs_attr_group_gov_sys, 358 .attr_group_gov_pol = &cs_attr_group_gov_pol, 359 .get_cpu_cdbs = get_cpu_cdbs, 360 .get_cpu_dbs_info_s = get_cpu_dbs_info_s, 361 .gov_dbs_timer = cs_dbs_timer, 362 .gov_check_cpu = cs_check_cpu, 363 .gov_ops = &cs_ops, 364 .init = cs_init, 365 .exit = cs_exit, 366 }; 367 368 static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, 369 unsigned int event) 370 { 371 return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event); 372 } 373 374 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 375 static 376 #endif 377 struct cpufreq_governor cpufreq_gov_conservative = { 378 .name = "conservative", 379 .governor = cs_cpufreq_governor_dbs, 380 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 381 .owner = THIS_MODULE, 382 }; 383 384 static int __init cpufreq_gov_dbs_init(void) 385 { 386 return cpufreq_register_governor(&cpufreq_gov_conservative); 387 } 388 389 static void __exit cpufreq_gov_dbs_exit(void) 390 { 391 cpufreq_unregister_governor(&cpufreq_gov_conservative); 392 } 393 394 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); 395 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " 396 "Low Latency Frequency Transition capable processors " 397 "optimised for use in a battery environment"); 398 MODULE_LICENSE("GPL"); 399 400 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 401 fs_initcall(cpufreq_gov_dbs_init); 402 #else 403 module_init(cpufreq_gov_dbs_init); 404 #endif 405 module_exit(cpufreq_gov_dbs_exit); 406