1 /* 2 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/cpufreq.h> 33 #include <linux/proc_fs.h> 34 #include <linux/seq_file.h> 35 #include <linux/sysdev.h> 36 37 #include <asm/uaccess.h> 38 39 #include <acpi/acpi_bus.h> 40 #include <acpi/processor.h> 41 #include <acpi/acpi_drivers.h> 42 43 #define ACPI_PROCESSOR_COMPONENT 0x01000000 44 #define ACPI_PROCESSOR_CLASS "processor" 45 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 46 ACPI_MODULE_NAME("processor_thermal"); 47 48 /* -------------------------------------------------------------------------- 49 Limit Interface 50 -------------------------------------------------------------------------- */ 51 static int acpi_processor_apply_limit(struct acpi_processor *pr) 52 { 53 int result = 0; 54 u16 px = 0; 55 u16 tx = 0; 56 57 58 if (!pr) 59 return -EINVAL; 60 61 if (!pr->flags.limit) 62 return -ENODEV; 63 64 if (pr->flags.throttling) { 65 if (pr->limit.user.tx > tx) 66 tx = pr->limit.user.tx; 67 if (pr->limit.thermal.tx > tx) 68 tx = pr->limit.thermal.tx; 69 70 result = acpi_processor_set_throttling(pr, tx); 71 if (result) 72 goto end; 73 } 74 75 pr->limit.state.px = px; 76 pr->limit.state.tx = tx; 77 78 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 79 "Processor [%d] limit set to (P%d:T%d)\n", pr->id, 80 pr->limit.state.px, pr->limit.state.tx)); 81 82 end: 83 if (result) 84 printk(KERN_ERR PREFIX "Unable to set limit\n"); 85 86 return result; 87 } 88 89 #ifdef CONFIG_CPU_FREQ 90 91 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it 92 * offers (in most cases) voltage scaling in addition to frequency scaling, and 93 * thus a cubic (instead of linear) reduction of energy. Also, we allow for 94 * _any_ cpufreq driver and not only the acpi-cpufreq driver. 95 */ 96 97 #define CPUFREQ_THERMAL_MIN_STEP 0 98 #define CPUFREQ_THERMAL_MAX_STEP 3 99 100 static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS]; 101 static unsigned int acpi_thermal_cpufreq_is_init = 0; 102 103 static int cpu_has_cpufreq(unsigned int cpu) 104 { 105 struct cpufreq_policy policy; 106 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) 107 return 0; 108 return 1; 109 } 110 111 static int acpi_thermal_cpufreq_increase(unsigned int cpu) 112 { 113 if (!cpu_has_cpufreq(cpu)) 114 return -ENODEV; 115 116 if (cpufreq_thermal_reduction_pctg[cpu] < 117 CPUFREQ_THERMAL_MAX_STEP) { 118 cpufreq_thermal_reduction_pctg[cpu]++; 119 cpufreq_update_policy(cpu); 120 return 0; 121 } 122 123 return -ERANGE; 124 } 125 126 static int acpi_thermal_cpufreq_decrease(unsigned int cpu) 127 { 128 if (!cpu_has_cpufreq(cpu)) 129 return -ENODEV; 130 131 if (cpufreq_thermal_reduction_pctg[cpu] > 132 (CPUFREQ_THERMAL_MIN_STEP + 1)) 133 cpufreq_thermal_reduction_pctg[cpu]--; 134 else 135 cpufreq_thermal_reduction_pctg[cpu] = 0; 136 cpufreq_update_policy(cpu); 137 /* We reached max freq again and can leave passive mode */ 138 return !cpufreq_thermal_reduction_pctg[cpu]; 139 } 140 141 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, 142 unsigned long event, void *data) 143 { 144 struct cpufreq_policy *policy = data; 145 unsigned long max_freq = 0; 146 147 if (event != CPUFREQ_ADJUST) 148 goto out; 149 150 max_freq = 151 (policy->cpuinfo.max_freq * 152 (100 - cpufreq_thermal_reduction_pctg[policy->cpu] * 20)) / 100; 153 154 cpufreq_verify_within_limits(policy, 0, max_freq); 155 156 out: 157 return 0; 158 } 159 160 static struct notifier_block acpi_thermal_cpufreq_notifier_block = { 161 .notifier_call = acpi_thermal_cpufreq_notifier, 162 }; 163 164 static int cpufreq_get_max_state(unsigned int cpu) 165 { 166 if (!cpu_has_cpufreq(cpu)) 167 return 0; 168 169 return CPUFREQ_THERMAL_MAX_STEP; 170 } 171 172 static int cpufreq_get_cur_state(unsigned int cpu) 173 { 174 if (!cpu_has_cpufreq(cpu)) 175 return 0; 176 177 return cpufreq_thermal_reduction_pctg[cpu]; 178 } 179 180 static int cpufreq_set_cur_state(unsigned int cpu, int state) 181 { 182 if (!cpu_has_cpufreq(cpu)) 183 return 0; 184 185 cpufreq_thermal_reduction_pctg[cpu] = state; 186 cpufreq_update_policy(cpu); 187 return 0; 188 } 189 190 void acpi_thermal_cpufreq_init(void) 191 { 192 int i; 193 194 for (i = 0; i < NR_CPUS; i++) 195 cpufreq_thermal_reduction_pctg[i] = 0; 196 197 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, 198 CPUFREQ_POLICY_NOTIFIER); 199 if (!i) 200 acpi_thermal_cpufreq_is_init = 1; 201 } 202 203 void acpi_thermal_cpufreq_exit(void) 204 { 205 if (acpi_thermal_cpufreq_is_init) 206 cpufreq_unregister_notifier 207 (&acpi_thermal_cpufreq_notifier_block, 208 CPUFREQ_POLICY_NOTIFIER); 209 210 acpi_thermal_cpufreq_is_init = 0; 211 } 212 213 #else /* ! CONFIG_CPU_FREQ */ 214 static int cpufreq_get_max_state(unsigned int cpu) 215 { 216 return 0; 217 } 218 219 static int cpufreq_get_cur_state(unsigned int cpu) 220 { 221 return 0; 222 } 223 224 static int cpufreq_set_cur_state(unsigned int cpu, int state) 225 { 226 return 0; 227 } 228 229 static int acpi_thermal_cpufreq_increase(unsigned int cpu) 230 { 231 return -ENODEV; 232 } 233 static int acpi_thermal_cpufreq_decrease(unsigned int cpu) 234 { 235 return -ENODEV; 236 } 237 238 #endif 239 240 int acpi_processor_set_thermal_limit(acpi_handle handle, int type) 241 { 242 int result = 0; 243 struct acpi_processor *pr = NULL; 244 struct acpi_device *device = NULL; 245 int tx = 0, max_tx_px = 0; 246 247 248 if ((type < ACPI_PROCESSOR_LIMIT_NONE) 249 || (type > ACPI_PROCESSOR_LIMIT_DECREMENT)) 250 return -EINVAL; 251 252 result = acpi_bus_get_device(handle, &device); 253 if (result) 254 return result; 255 256 pr = acpi_driver_data(device); 257 if (!pr) 258 return -ENODEV; 259 260 /* Thermal limits are always relative to the current Px/Tx state. */ 261 if (pr->flags.throttling) 262 pr->limit.thermal.tx = pr->throttling.state; 263 264 /* 265 * Our default policy is to only use throttling at the lowest 266 * performance state. 267 */ 268 269 tx = pr->limit.thermal.tx; 270 271 switch (type) { 272 273 case ACPI_PROCESSOR_LIMIT_NONE: 274 do { 275 result = acpi_thermal_cpufreq_decrease(pr->id); 276 } while (!result); 277 tx = 0; 278 break; 279 280 case ACPI_PROCESSOR_LIMIT_INCREMENT: 281 /* if going up: P-states first, T-states later */ 282 283 result = acpi_thermal_cpufreq_increase(pr->id); 284 if (!result) 285 goto end; 286 else if (result == -ERANGE) 287 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 288 "At maximum performance state\n")); 289 290 if (pr->flags.throttling) { 291 if (tx == (pr->throttling.state_count - 1)) 292 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 293 "At maximum throttling state\n")); 294 else 295 tx++; 296 } 297 break; 298 299 case ACPI_PROCESSOR_LIMIT_DECREMENT: 300 /* if going down: T-states first, P-states later */ 301 302 if (pr->flags.throttling) { 303 if (tx == 0) { 304 max_tx_px = 1; 305 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 306 "At minimum throttling state\n")); 307 } else { 308 tx--; 309 goto end; 310 } 311 } 312 313 result = acpi_thermal_cpufreq_decrease(pr->id); 314 if (result) { 315 /* 316 * We only could get -ERANGE, 1 or 0. 317 * In the first two cases we reached max freq again. 318 */ 319 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 320 "At minimum performance state\n")); 321 max_tx_px = 1; 322 } else 323 max_tx_px = 0; 324 325 break; 326 } 327 328 end: 329 if (pr->flags.throttling) { 330 pr->limit.thermal.px = 0; 331 pr->limit.thermal.tx = tx; 332 333 result = acpi_processor_apply_limit(pr); 334 if (result) 335 printk(KERN_ERR PREFIX "Unable to set thermal limit\n"); 336 337 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n", 338 pr->limit.thermal.px, pr->limit.thermal.tx)); 339 } else 340 result = 0; 341 if (max_tx_px) 342 return 1; 343 else 344 return result; 345 } 346 347 int acpi_processor_get_limit_info(struct acpi_processor *pr) 348 { 349 350 if (!pr) 351 return -EINVAL; 352 353 if (pr->flags.throttling) 354 pr->flags.limit = 1; 355 356 return 0; 357 } 358 359 /* thermal coolign device callbacks */ 360 static int acpi_processor_max_state(struct acpi_processor *pr) 361 { 362 int max_state = 0; 363 364 /* 365 * There exists four states according to 366 * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3 367 */ 368 max_state += cpufreq_get_max_state(pr->id); 369 if (pr->flags.throttling) 370 max_state += (pr->throttling.state_count -1); 371 372 return max_state; 373 } 374 static int 375 processor_get_max_state(struct thermal_cooling_device *cdev, char *buf) 376 { 377 struct acpi_device *device = cdev->devdata; 378 struct acpi_processor *pr = acpi_driver_data(device); 379 380 if (!device || !pr) 381 return -EINVAL; 382 383 return sprintf(buf, "%d\n", acpi_processor_max_state(pr)); 384 } 385 386 static int 387 processor_get_cur_state(struct thermal_cooling_device *cdev, char *buf) 388 { 389 struct acpi_device *device = cdev->devdata; 390 struct acpi_processor *pr = acpi_driver_data(device); 391 int cur_state; 392 393 if (!device || !pr) 394 return -EINVAL; 395 396 cur_state = cpufreq_get_cur_state(pr->id); 397 if (pr->flags.throttling) 398 cur_state += pr->throttling.state; 399 400 return sprintf(buf, "%d\n", cur_state); 401 } 402 403 static int 404 processor_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state) 405 { 406 struct acpi_device *device = cdev->devdata; 407 struct acpi_processor *pr = acpi_driver_data(device); 408 int result = 0; 409 int max_pstate; 410 411 if (!device || !pr) 412 return -EINVAL; 413 414 max_pstate = cpufreq_get_max_state(pr->id); 415 416 if (state > acpi_processor_max_state(pr)) 417 return -EINVAL; 418 419 if (state <= max_pstate) { 420 if (pr->flags.throttling && pr->throttling.state) 421 result = acpi_processor_set_throttling(pr, 0); 422 cpufreq_set_cur_state(pr->id, state); 423 } else { 424 cpufreq_set_cur_state(pr->id, max_pstate); 425 result = acpi_processor_set_throttling(pr, 426 state - max_pstate); 427 } 428 return result; 429 } 430 431 struct thermal_cooling_device_ops processor_cooling_ops = { 432 .get_max_state = processor_get_max_state, 433 .get_cur_state = processor_get_cur_state, 434 .set_cur_state = processor_set_cur_state, 435 }; 436 437 /* /proc interface */ 438 439 static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset) 440 { 441 struct acpi_processor *pr = (struct acpi_processor *)seq->private; 442 443 444 if (!pr) 445 goto end; 446 447 if (!pr->flags.limit) { 448 seq_puts(seq, "<not supported>\n"); 449 goto end; 450 } 451 452 seq_printf(seq, "active limit: P%d:T%d\n" 453 "user limit: P%d:T%d\n" 454 "thermal limit: P%d:T%d\n", 455 pr->limit.state.px, pr->limit.state.tx, 456 pr->limit.user.px, pr->limit.user.tx, 457 pr->limit.thermal.px, pr->limit.thermal.tx); 458 459 end: 460 return 0; 461 } 462 463 static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file) 464 { 465 return single_open(file, acpi_processor_limit_seq_show, 466 PDE(inode)->data); 467 } 468 469 static ssize_t acpi_processor_write_limit(struct file * file, 470 const char __user * buffer, 471 size_t count, loff_t * data) 472 { 473 int result = 0; 474 struct seq_file *m = file->private_data; 475 struct acpi_processor *pr = m->private; 476 char limit_string[25] = { '\0' }; 477 int px = 0; 478 int tx = 0; 479 480 481 if (!pr || (count > sizeof(limit_string) - 1)) { 482 return -EINVAL; 483 } 484 485 if (copy_from_user(limit_string, buffer, count)) { 486 return -EFAULT; 487 } 488 489 limit_string[count] = '\0'; 490 491 if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) { 492 printk(KERN_ERR PREFIX "Invalid data format\n"); 493 return -EINVAL; 494 } 495 496 if (pr->flags.throttling) { 497 if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) { 498 printk(KERN_ERR PREFIX "Invalid tx\n"); 499 return -EINVAL; 500 } 501 pr->limit.user.tx = tx; 502 } 503 504 result = acpi_processor_apply_limit(pr); 505 506 return count; 507 } 508 509 struct file_operations acpi_processor_limit_fops = { 510 .open = acpi_processor_limit_open_fs, 511 .read = seq_read, 512 .write = acpi_processor_write_limit, 513 .llseek = seq_lseek, 514 .release = single_release, 515 }; 516