1 /* 2 * processor_throttling.c - Throttling submodule of the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/cpufreq.h> 33 #include <linux/proc_fs.h> 34 #include <linux/seq_file.h> 35 36 #include <asm/io.h> 37 #include <asm/uaccess.h> 38 39 #include <acpi/acpi_bus.h> 40 #include <acpi/processor.h> 41 42 #define ACPI_PROCESSOR_COMPONENT 0x01000000 43 #define ACPI_PROCESSOR_CLASS "processor" 44 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 45 ACPI_MODULE_NAME("processor_throttling"); 46 47 static int acpi_processor_get_throttling(struct acpi_processor *pr); 48 int acpi_processor_set_throttling(struct acpi_processor *pr, int state); 49 50 /* 51 * _TPC - Throttling Present Capabilities 52 */ 53 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 54 { 55 acpi_status status = 0; 56 unsigned long tpc = 0; 57 58 if (!pr) 59 return -EINVAL; 60 status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); 61 if (ACPI_FAILURE(status)) { 62 if (status != AE_NOT_FOUND) { 63 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); 64 } 65 return -ENODEV; 66 } 67 pr->throttling_platform_limit = (int)tpc; 68 return 0; 69 } 70 71 int acpi_processor_tstate_has_changed(struct acpi_processor *pr) 72 { 73 return acpi_processor_get_platform_limit(pr); 74 } 75 76 /* 77 * _PTC - Processor Throttling Control (and status) register location 78 */ 79 static int acpi_processor_get_throttling_control(struct acpi_processor *pr) 80 { 81 int result = 0; 82 acpi_status status = 0; 83 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 84 union acpi_object *ptc = NULL; 85 union acpi_object obj = { 0 }; 86 87 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); 88 if (ACPI_FAILURE(status)) { 89 if (status != AE_NOT_FOUND) { 90 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); 91 } 92 return -ENODEV; 93 } 94 95 ptc = (union acpi_object *)buffer.pointer; 96 if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) 97 || (ptc->package.count != 2)) { 98 printk(KERN_ERR PREFIX "Invalid _PTC data\n"); 99 result = -EFAULT; 100 goto end; 101 } 102 103 /* 104 * control_register 105 */ 106 107 obj = ptc->package.elements[0]; 108 109 if ((obj.type != ACPI_TYPE_BUFFER) 110 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 111 || (obj.buffer.pointer == NULL)) { 112 printk(KERN_ERR PREFIX 113 "Invalid _PTC data (control_register)\n"); 114 result = -EFAULT; 115 goto end; 116 } 117 memcpy(&pr->throttling.control_register, obj.buffer.pointer, 118 sizeof(struct acpi_ptc_register)); 119 120 /* 121 * status_register 122 */ 123 124 obj = ptc->package.elements[1]; 125 126 if ((obj.type != ACPI_TYPE_BUFFER) 127 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 128 || (obj.buffer.pointer == NULL)) { 129 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); 130 result = -EFAULT; 131 goto end; 132 } 133 134 memcpy(&pr->throttling.status_register, obj.buffer.pointer, 135 sizeof(struct acpi_ptc_register)); 136 137 end: 138 kfree(buffer.pointer); 139 140 return result; 141 } 142 143 /* 144 * _TSS - Throttling Supported States 145 */ 146 static int acpi_processor_get_throttling_states(struct acpi_processor *pr) 147 { 148 int result = 0; 149 acpi_status status = AE_OK; 150 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 151 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 152 struct acpi_buffer state = { 0, NULL }; 153 union acpi_object *tss = NULL; 154 int i; 155 156 status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); 157 if (ACPI_FAILURE(status)) { 158 if (status != AE_NOT_FOUND) { 159 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); 160 } 161 return -ENODEV; 162 } 163 164 tss = buffer.pointer; 165 if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { 166 printk(KERN_ERR PREFIX "Invalid _TSS data\n"); 167 result = -EFAULT; 168 goto end; 169 } 170 171 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 172 tss->package.count)); 173 174 pr->throttling.state_count = tss->package.count; 175 pr->throttling.states_tss = 176 kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, 177 GFP_KERNEL); 178 if (!pr->throttling.states_tss) { 179 result = -ENOMEM; 180 goto end; 181 } 182 183 for (i = 0; i < pr->throttling.state_count; i++) { 184 185 struct acpi_processor_tx_tss *tx = 186 (struct acpi_processor_tx_tss *)&(pr->throttling. 187 states_tss[i]); 188 189 state.length = sizeof(struct acpi_processor_tx_tss); 190 state.pointer = tx; 191 192 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 193 194 status = acpi_extract_package(&(tss->package.elements[i]), 195 &format, &state); 196 if (ACPI_FAILURE(status)) { 197 ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); 198 result = -EFAULT; 199 kfree(pr->throttling.states_tss); 200 goto end; 201 } 202 203 if (!tx->freqpercentage) { 204 printk(KERN_ERR PREFIX 205 "Invalid _TSS data: freq is zero\n"); 206 result = -EFAULT; 207 kfree(pr->throttling.states_tss); 208 goto end; 209 } 210 } 211 212 end: 213 kfree(buffer.pointer); 214 215 return result; 216 } 217 218 /* 219 * _TSD - T-State Dependencies 220 */ 221 static int acpi_processor_get_tsd(struct acpi_processor *pr) 222 { 223 int result = 0; 224 acpi_status status = AE_OK; 225 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 226 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 227 struct acpi_buffer state = { 0, NULL }; 228 union acpi_object *tsd = NULL; 229 struct acpi_tsd_package *pdomain; 230 231 status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); 232 if (ACPI_FAILURE(status)) { 233 if (status != AE_NOT_FOUND) { 234 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); 235 } 236 return -ENODEV; 237 } 238 239 tsd = buffer.pointer; 240 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { 241 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n")); 242 result = -EFAULT; 243 goto end; 244 } 245 246 if (tsd->package.count != 1) { 247 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n")); 248 result = -EFAULT; 249 goto end; 250 } 251 252 pdomain = &(pr->throttling.domain_info); 253 254 state.length = sizeof(struct acpi_tsd_package); 255 state.pointer = pdomain; 256 257 status = acpi_extract_package(&(tsd->package.elements[0]), 258 &format, &state); 259 if (ACPI_FAILURE(status)) { 260 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n")); 261 result = -EFAULT; 262 goto end; 263 } 264 265 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { 266 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n")); 267 result = -EFAULT; 268 goto end; 269 } 270 271 if (pdomain->revision != ACPI_TSD_REV0_REVISION) { 272 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n")); 273 result = -EFAULT; 274 goto end; 275 } 276 277 end: 278 kfree(buffer.pointer); 279 return result; 280 } 281 282 /* -------------------------------------------------------------------------- 283 Throttling Control 284 -------------------------------------------------------------------------- */ 285 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) 286 { 287 int state = 0; 288 u32 value = 0; 289 u32 duty_mask = 0; 290 u32 duty_value = 0; 291 292 if (!pr) 293 return -EINVAL; 294 295 if (!pr->flags.throttling) 296 return -ENODEV; 297 298 pr->throttling.state = 0; 299 300 duty_mask = pr->throttling.state_count - 1; 301 302 duty_mask <<= pr->throttling.duty_offset; 303 304 local_irq_disable(); 305 306 value = inl(pr->throttling.address); 307 308 /* 309 * Compute the current throttling state when throttling is enabled 310 * (bit 4 is on). 311 */ 312 if (value & 0x10) { 313 duty_value = value & duty_mask; 314 duty_value >>= pr->throttling.duty_offset; 315 316 if (duty_value) 317 state = pr->throttling.state_count - duty_value; 318 } 319 320 pr->throttling.state = state; 321 322 local_irq_enable(); 323 324 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 325 "Throttling state is T%d (%d%% throttling applied)\n", 326 state, pr->throttling.states[state].performance)); 327 328 return 0; 329 } 330 331 static int acpi_read_throttling_status(struct acpi_processor_throttling 332 *throttling) 333 { 334 int value = -1; 335 switch (throttling->status_register.space_id) { 336 case ACPI_ADR_SPACE_SYSTEM_IO: 337 acpi_os_read_port((acpi_io_address) throttling->status_register. 338 address, &value, 339 (u32) throttling->status_register.bit_width * 340 8); 341 break; 342 case ACPI_ADR_SPACE_FIXED_HARDWARE: 343 printk(KERN_ERR PREFIX 344 "HARDWARE addr space,NOT supported yet\n"); 345 break; 346 default: 347 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 348 (u32) (throttling->status_register.space_id)); 349 } 350 return value; 351 } 352 353 static int acpi_write_throttling_state(struct acpi_processor_throttling 354 *throttling, int value) 355 { 356 int ret = -1; 357 358 switch (throttling->control_register.space_id) { 359 case ACPI_ADR_SPACE_SYSTEM_IO: 360 acpi_os_write_port((acpi_io_address) throttling-> 361 control_register.address, value, 362 (u32) throttling->control_register. 363 bit_width * 8); 364 ret = 0; 365 break; 366 case ACPI_ADR_SPACE_FIXED_HARDWARE: 367 printk(KERN_ERR PREFIX 368 "HARDWARE addr space,NOT supported yet\n"); 369 break; 370 default: 371 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 372 (u32) (throttling->control_register.space_id)); 373 } 374 return ret; 375 } 376 377 static int acpi_get_throttling_state(struct acpi_processor *pr, int value) 378 { 379 int i; 380 381 for (i = 0; i < pr->throttling.state_count; i++) { 382 struct acpi_processor_tx_tss *tx = 383 (struct acpi_processor_tx_tss *)&(pr->throttling. 384 states_tss[i]); 385 if (tx->control == value) 386 break; 387 } 388 if (i > pr->throttling.state_count) 389 i = -1; 390 return i; 391 } 392 393 static int acpi_get_throttling_value(struct acpi_processor *pr, int state) 394 { 395 int value = -1; 396 if (state >= 0 && state <= pr->throttling.state_count) { 397 struct acpi_processor_tx_tss *tx = 398 (struct acpi_processor_tx_tss *)&(pr->throttling. 399 states_tss[state]); 400 value = tx->control; 401 } 402 return value; 403 } 404 405 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) 406 { 407 int state = 0; 408 u32 value = 0; 409 410 if (!pr) 411 return -EINVAL; 412 413 if (!pr->flags.throttling) 414 return -ENODEV; 415 416 pr->throttling.state = 0; 417 local_irq_disable(); 418 value = acpi_read_throttling_status(&pr->throttling); 419 if (value >= 0) { 420 state = acpi_get_throttling_state(pr, value); 421 pr->throttling.state = state; 422 } 423 local_irq_enable(); 424 425 return 0; 426 } 427 428 static int acpi_processor_get_throttling(struct acpi_processor *pr) 429 { 430 return pr->throttling.acpi_processor_get_throttling(pr); 431 } 432 433 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, 434 int state) 435 { 436 u32 value = 0; 437 u32 duty_mask = 0; 438 u32 duty_value = 0; 439 440 if (!pr) 441 return -EINVAL; 442 443 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 444 return -EINVAL; 445 446 if (!pr->flags.throttling) 447 return -ENODEV; 448 449 if (state == pr->throttling.state) 450 return 0; 451 452 if (state < pr->throttling_platform_limit) 453 return -EPERM; 454 /* 455 * Calculate the duty_value and duty_mask. 456 */ 457 if (state) { 458 duty_value = pr->throttling.state_count - state; 459 460 duty_value <<= pr->throttling.duty_offset; 461 462 /* Used to clear all duty_value bits */ 463 duty_mask = pr->throttling.state_count - 1; 464 465 duty_mask <<= acpi_gbl_FADT.duty_offset; 466 duty_mask = ~duty_mask; 467 } 468 469 local_irq_disable(); 470 471 /* 472 * Disable throttling by writing a 0 to bit 4. Note that we must 473 * turn it off before you can change the duty_value. 474 */ 475 value = inl(pr->throttling.address); 476 if (value & 0x10) { 477 value &= 0xFFFFFFEF; 478 outl(value, pr->throttling.address); 479 } 480 481 /* 482 * Write the new duty_value and then enable throttling. Note 483 * that a state value of 0 leaves throttling disabled. 484 */ 485 if (state) { 486 value &= duty_mask; 487 value |= duty_value; 488 outl(value, pr->throttling.address); 489 490 value |= 0x00000010; 491 outl(value, pr->throttling.address); 492 } 493 494 pr->throttling.state = state; 495 496 local_irq_enable(); 497 498 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 499 "Throttling state set to T%d (%d%%)\n", state, 500 (pr->throttling.states[state].performance ? pr-> 501 throttling.states[state].performance / 10 : 0))); 502 503 return 0; 504 } 505 506 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, 507 int state) 508 { 509 u32 value = 0; 510 511 if (!pr) 512 return -EINVAL; 513 514 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 515 return -EINVAL; 516 517 if (!pr->flags.throttling) 518 return -ENODEV; 519 520 if (state == pr->throttling.state) 521 return 0; 522 523 if (state < pr->throttling_platform_limit) 524 return -EPERM; 525 526 local_irq_disable(); 527 528 value = acpi_get_throttling_value(pr, state); 529 if (value >= 0) { 530 acpi_write_throttling_state(&pr->throttling, value); 531 pr->throttling.state = state; 532 } 533 local_irq_enable(); 534 535 return 0; 536 } 537 538 int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 539 { 540 return pr->throttling.acpi_processor_set_throttling(pr, state); 541 } 542 543 int acpi_processor_get_throttling_info(struct acpi_processor *pr) 544 { 545 int result = 0; 546 int step = 0; 547 int i = 0; 548 549 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 550 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", 551 pr->throttling.address, 552 pr->throttling.duty_offset, 553 pr->throttling.duty_width)); 554 555 if (!pr) 556 return -EINVAL; 557 558 /* 559 * Evaluate _PTC, _TSS and _TPC 560 * They must all be present or none of them can be used. 561 */ 562 if (acpi_processor_get_throttling_control(pr) || 563 acpi_processor_get_throttling_states(pr) || 564 acpi_processor_get_platform_limit(pr)) 565 { 566 pr->throttling.acpi_processor_get_throttling = 567 &acpi_processor_get_throttling_fadt; 568 pr->throttling.acpi_processor_set_throttling = 569 &acpi_processor_set_throttling_fadt; 570 } else { 571 pr->throttling.acpi_processor_get_throttling = 572 &acpi_processor_get_throttling_ptc; 573 pr->throttling.acpi_processor_set_throttling = 574 &acpi_processor_set_throttling_ptc; 575 } 576 577 acpi_processor_get_tsd(pr); 578 579 if (!pr->throttling.address) { 580 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); 581 return 0; 582 } else if (!pr->throttling.duty_width) { 583 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); 584 return 0; 585 } 586 /* TBD: Support duty_cycle values that span bit 4. */ 587 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { 588 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); 589 return 0; 590 } 591 592 /* 593 * PIIX4 Errata: We don't support throttling on the original PIIX4. 594 * This shouldn't be an issue as few (if any) mobile systems ever 595 * used this part. 596 */ 597 if (errata.piix4.throttle) { 598 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 599 "Throttling not supported on PIIX4 A- or B-step\n")); 600 return 0; 601 } 602 603 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; 604 605 /* 606 * Compute state values. Note that throttling displays a linear power/ 607 * performance relationship (at 50% performance the CPU will consume 608 * 50% power). Values are in 1/10th of a percent to preserve accuracy. 609 */ 610 611 step = (1000 / pr->throttling.state_count); 612 613 for (i = 0; i < pr->throttling.state_count; i++) { 614 pr->throttling.states[i].performance = step * i; 615 pr->throttling.states[i].power = step * i; 616 } 617 618 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 619 pr->throttling.state_count)); 620 621 pr->flags.throttling = 1; 622 623 /* 624 * Disable throttling (if enabled). We'll let subsequent policy (e.g. 625 * thermal) decide to lower performance if it so chooses, but for now 626 * we'll crank up the speed. 627 */ 628 629 result = acpi_processor_get_throttling(pr); 630 if (result) 631 goto end; 632 633 if (pr->throttling.state) { 634 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 635 "Disabling throttling (was T%d)\n", 636 pr->throttling.state)); 637 result = acpi_processor_set_throttling(pr, 0); 638 if (result) 639 goto end; 640 } 641 642 end: 643 if (result) 644 pr->flags.throttling = 0; 645 646 return result; 647 } 648 649 /* proc interface */ 650 651 static int acpi_processor_throttling_seq_show(struct seq_file *seq, 652 void *offset) 653 { 654 struct acpi_processor *pr = seq->private; 655 int i = 0; 656 int result = 0; 657 658 if (!pr) 659 goto end; 660 661 if (!(pr->throttling.state_count > 0)) { 662 seq_puts(seq, "<not supported>\n"); 663 goto end; 664 } 665 666 result = acpi_processor_get_throttling(pr); 667 668 if (result) { 669 seq_puts(seq, 670 "Could not determine current throttling state.\n"); 671 goto end; 672 } 673 674 seq_printf(seq, "state count: %d\n" 675 "active state: T%d\n" 676 "state available: T%d to T%d\n", 677 pr->throttling.state_count, pr->throttling.state, 678 pr->throttling_platform_limit, 679 pr->throttling.state_count - 1); 680 681 seq_puts(seq, "states:\n"); 682 if (pr->throttling.acpi_processor_get_throttling == 683 acpi_processor_get_throttling_fadt) { 684 for (i = 0; i < pr->throttling.state_count; i++) 685 seq_printf(seq, " %cT%d: %02d%%\n", 686 (i == pr->throttling.state ? '*' : ' '), i, 687 (pr->throttling.states[i].performance ? pr-> 688 throttling.states[i].performance / 10 : 0)); 689 } else { 690 for (i = 0; i < pr->throttling.state_count; i++) 691 seq_printf(seq, " %cT%d: %02d%%\n", 692 (i == pr->throttling.state ? '*' : ' '), i, 693 (int)pr->throttling.states_tss[i]. 694 freqpercentage); 695 } 696 697 end: 698 return 0; 699 } 700 701 static int acpi_processor_throttling_open_fs(struct inode *inode, 702 struct file *file) 703 { 704 return single_open(file, acpi_processor_throttling_seq_show, 705 PDE(inode)->data); 706 } 707 708 static ssize_t acpi_processor_write_throttling(struct file *file, 709 const char __user * buffer, 710 size_t count, loff_t * data) 711 { 712 int result = 0; 713 struct seq_file *m = file->private_data; 714 struct acpi_processor *pr = m->private; 715 char state_string[12] = { '\0' }; 716 717 if (!pr || (count > sizeof(state_string) - 1)) 718 return -EINVAL; 719 720 if (copy_from_user(state_string, buffer, count)) 721 return -EFAULT; 722 723 state_string[count] = '\0'; 724 725 result = acpi_processor_set_throttling(pr, 726 simple_strtoul(state_string, 727 NULL, 0)); 728 if (result) 729 return result; 730 731 return count; 732 } 733 734 struct file_operations acpi_processor_throttling_fops = { 735 .open = acpi_processor_throttling_open_fs, 736 .read = seq_read, 737 .write = acpi_processor_write_throttling, 738 .llseek = seq_lseek, 739 .release = single_release, 740 }; 741