1 /* 2 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/cpufreq.h> 33 #include <linux/proc_fs.h> 34 #include <linux/seq_file.h> 35 36 #include <asm/uaccess.h> 37 38 #include <acpi/acpi_bus.h> 39 #include <acpi/processor.h> 40 #include <acpi/acpi_drivers.h> 41 42 #define ACPI_PROCESSOR_COMPONENT 0x01000000 43 #define ACPI_PROCESSOR_CLASS "processor" 44 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 45 ACPI_MODULE_NAME("processor_thermal"); 46 47 /* -------------------------------------------------------------------------- 48 Limit Interface 49 -------------------------------------------------------------------------- */ 50 static int acpi_processor_apply_limit(struct acpi_processor *pr) 51 { 52 int result = 0; 53 u16 px = 0; 54 u16 tx = 0; 55 56 57 if (!pr) 58 return -EINVAL; 59 60 if (!pr->flags.limit) 61 return -ENODEV; 62 63 if (pr->flags.throttling) { 64 if (pr->limit.user.tx > tx) 65 tx = pr->limit.user.tx; 66 if (pr->limit.thermal.tx > tx) 67 tx = pr->limit.thermal.tx; 68 69 result = acpi_processor_set_throttling(pr, tx); 70 if (result) 71 goto end; 72 } 73 74 pr->limit.state.px = px; 75 pr->limit.state.tx = tx; 76 77 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 78 "Processor [%d] limit set to (P%d:T%d)\n", pr->id, 79 pr->limit.state.px, pr->limit.state.tx)); 80 81 end: 82 if (result) 83 printk(KERN_ERR PREFIX "Unable to set limit\n"); 84 85 return result; 86 } 87 88 #ifdef CONFIG_CPU_FREQ 89 90 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it 91 * offers (in most cases) voltage scaling in addition to frequency scaling, and 92 * thus a cubic (instead of linear) reduction of energy. Also, we allow for 93 * _any_ cpufreq driver and not only the acpi-cpufreq driver. 94 */ 95 96 static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS]; 97 static unsigned int acpi_thermal_cpufreq_is_init = 0; 98 99 static int cpu_has_cpufreq(unsigned int cpu) 100 { 101 struct cpufreq_policy policy; 102 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) 103 return 0; 104 return 1; 105 } 106 107 static int acpi_thermal_cpufreq_increase(unsigned int cpu) 108 { 109 if (!cpu_has_cpufreq(cpu)) 110 return -ENODEV; 111 112 if (cpufreq_thermal_reduction_pctg[cpu] < 60) { 113 cpufreq_thermal_reduction_pctg[cpu] += 20; 114 cpufreq_update_policy(cpu); 115 return 0; 116 } 117 118 return -ERANGE; 119 } 120 121 static int acpi_thermal_cpufreq_decrease(unsigned int cpu) 122 { 123 if (!cpu_has_cpufreq(cpu)) 124 return -ENODEV; 125 126 if (cpufreq_thermal_reduction_pctg[cpu] > 20) 127 cpufreq_thermal_reduction_pctg[cpu] -= 20; 128 else 129 cpufreq_thermal_reduction_pctg[cpu] = 0; 130 cpufreq_update_policy(cpu); 131 /* We reached max freq again and can leave passive mode */ 132 return !cpufreq_thermal_reduction_pctg[cpu]; 133 } 134 135 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, 136 unsigned long event, void *data) 137 { 138 struct cpufreq_policy *policy = data; 139 unsigned long max_freq = 0; 140 141 if (event != CPUFREQ_ADJUST) 142 goto out; 143 144 max_freq = 145 (policy->cpuinfo.max_freq * 146 (100 - cpufreq_thermal_reduction_pctg[policy->cpu])) / 100; 147 148 cpufreq_verify_within_limits(policy, 0, max_freq); 149 150 out: 151 return 0; 152 } 153 154 static struct notifier_block acpi_thermal_cpufreq_notifier_block = { 155 .notifier_call = acpi_thermal_cpufreq_notifier, 156 }; 157 158 void acpi_thermal_cpufreq_init(void) 159 { 160 int i; 161 162 for (i = 0; i < NR_CPUS; i++) 163 cpufreq_thermal_reduction_pctg[i] = 0; 164 165 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, 166 CPUFREQ_POLICY_NOTIFIER); 167 if (!i) 168 acpi_thermal_cpufreq_is_init = 1; 169 } 170 171 void acpi_thermal_cpufreq_exit(void) 172 { 173 if (acpi_thermal_cpufreq_is_init) 174 cpufreq_unregister_notifier 175 (&acpi_thermal_cpufreq_notifier_block, 176 CPUFREQ_POLICY_NOTIFIER); 177 178 acpi_thermal_cpufreq_is_init = 0; 179 } 180 181 #else /* ! CONFIG_CPU_FREQ */ 182 183 static int acpi_thermal_cpufreq_increase(unsigned int cpu) 184 { 185 return -ENODEV; 186 } 187 static int acpi_thermal_cpufreq_decrease(unsigned int cpu) 188 { 189 return -ENODEV; 190 } 191 192 #endif 193 194 int acpi_processor_set_thermal_limit(acpi_handle handle, int type) 195 { 196 int result = 0; 197 struct acpi_processor *pr = NULL; 198 struct acpi_device *device = NULL; 199 int tx = 0, max_tx_px = 0; 200 201 202 if ((type < ACPI_PROCESSOR_LIMIT_NONE) 203 || (type > ACPI_PROCESSOR_LIMIT_DECREMENT)) 204 return -EINVAL; 205 206 result = acpi_bus_get_device(handle, &device); 207 if (result) 208 return result; 209 210 pr = acpi_driver_data(device); 211 if (!pr) 212 return -ENODEV; 213 214 /* Thermal limits are always relative to the current Px/Tx state. */ 215 if (pr->flags.throttling) 216 pr->limit.thermal.tx = pr->throttling.state; 217 218 /* 219 * Our default policy is to only use throttling at the lowest 220 * performance state. 221 */ 222 223 tx = pr->limit.thermal.tx; 224 225 switch (type) { 226 227 case ACPI_PROCESSOR_LIMIT_NONE: 228 do { 229 result = acpi_thermal_cpufreq_decrease(pr->id); 230 } while (!result); 231 tx = 0; 232 break; 233 234 case ACPI_PROCESSOR_LIMIT_INCREMENT: 235 /* if going up: P-states first, T-states later */ 236 237 result = acpi_thermal_cpufreq_increase(pr->id); 238 if (!result) 239 goto end; 240 else if (result == -ERANGE) 241 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 242 "At maximum performance state\n")); 243 244 if (pr->flags.throttling) { 245 if (tx == (pr->throttling.state_count - 1)) 246 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 247 "At maximum throttling state\n")); 248 else 249 tx++; 250 } 251 break; 252 253 case ACPI_PROCESSOR_LIMIT_DECREMENT: 254 /* if going down: T-states first, P-states later */ 255 256 if (pr->flags.throttling) { 257 if (tx == 0) { 258 max_tx_px = 1; 259 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 260 "At minimum throttling state\n")); 261 } else { 262 tx--; 263 goto end; 264 } 265 } 266 267 result = acpi_thermal_cpufreq_decrease(pr->id); 268 if (result) { 269 /* 270 * We only could get -ERANGE, 1 or 0. 271 * In the first two cases we reached max freq again. 272 */ 273 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 274 "At minimum performance state\n")); 275 max_tx_px = 1; 276 } else 277 max_tx_px = 0; 278 279 break; 280 } 281 282 end: 283 if (pr->flags.throttling) { 284 pr->limit.thermal.px = 0; 285 pr->limit.thermal.tx = tx; 286 287 result = acpi_processor_apply_limit(pr); 288 if (result) 289 printk(KERN_ERR PREFIX "Unable to set thermal limit\n"); 290 291 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n", 292 pr->limit.thermal.px, pr->limit.thermal.tx)); 293 } else 294 result = 0; 295 if (max_tx_px) 296 return 1; 297 else 298 return result; 299 } 300 301 int acpi_processor_get_limit_info(struct acpi_processor *pr) 302 { 303 304 if (!pr) 305 return -EINVAL; 306 307 if (pr->flags.throttling) 308 pr->flags.limit = 1; 309 310 return 0; 311 } 312 313 /* /proc interface */ 314 315 static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset) 316 { 317 struct acpi_processor *pr = (struct acpi_processor *)seq->private; 318 319 320 if (!pr) 321 goto end; 322 323 if (!pr->flags.limit) { 324 seq_puts(seq, "<not supported>\n"); 325 goto end; 326 } 327 328 seq_printf(seq, "active limit: P%d:T%d\n" 329 "user limit: P%d:T%d\n" 330 "thermal limit: P%d:T%d\n", 331 pr->limit.state.px, pr->limit.state.tx, 332 pr->limit.user.px, pr->limit.user.tx, 333 pr->limit.thermal.px, pr->limit.thermal.tx); 334 335 end: 336 return 0; 337 } 338 339 static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file) 340 { 341 return single_open(file, acpi_processor_limit_seq_show, 342 PDE(inode)->data); 343 } 344 345 static ssize_t acpi_processor_write_limit(struct file * file, 346 const char __user * buffer, 347 size_t count, loff_t * data) 348 { 349 int result = 0; 350 struct seq_file *m = file->private_data; 351 struct acpi_processor *pr = m->private; 352 char limit_string[25] = { '\0' }; 353 int px = 0; 354 int tx = 0; 355 356 357 if (!pr || (count > sizeof(limit_string) - 1)) { 358 return -EINVAL; 359 } 360 361 if (copy_from_user(limit_string, buffer, count)) { 362 return -EFAULT; 363 } 364 365 limit_string[count] = '\0'; 366 367 if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) { 368 printk(KERN_ERR PREFIX "Invalid data format\n"); 369 return -EINVAL; 370 } 371 372 if (pr->flags.throttling) { 373 if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) { 374 printk(KERN_ERR PREFIX "Invalid tx\n"); 375 return -EINVAL; 376 } 377 pr->limit.user.tx = tx; 378 } 379 380 result = acpi_processor_apply_limit(pr); 381 382 return count; 383 } 384 385 struct file_operations acpi_processor_limit_fops = { 386 .open = acpi_processor_limit_open_fs, 387 .read = seq_read, 388 .write = acpi_processor_write_limit, 389 .llseek = seq_lseek, 390 .release = single_release, 391 }; 392