1 /* 2 * processor_throttling.c - Throttling submodule of the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/sched.h> 33 #include <linux/cpufreq.h> 34 #include <linux/proc_fs.h> 35 #include <linux/seq_file.h> 36 37 #include <asm/io.h> 38 #include <asm/uaccess.h> 39 40 #include <acpi/acpi_bus.h> 41 #include <acpi/acpi_drivers.h> 42 #include <acpi/processor.h> 43 44 #define PREFIX "ACPI: " 45 46 #define ACPI_PROCESSOR_CLASS "processor" 47 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 48 ACPI_MODULE_NAME("processor_throttling"); 49 50 /* ignore_tpc: 51 * 0 -> acpi processor driver doesn't ignore _TPC values 52 * 1 -> acpi processor driver ignores _TPC values 53 */ 54 static int ignore_tpc; 55 module_param(ignore_tpc, int, 0644); 56 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support"); 57 58 struct throttling_tstate { 59 unsigned int cpu; /* cpu nr */ 60 int target_state; /* target T-state */ 61 }; 62 63 #define THROTTLING_PRECHANGE (1) 64 #define THROTTLING_POSTCHANGE (2) 65 66 static int acpi_processor_get_throttling(struct acpi_processor *pr); 67 int acpi_processor_set_throttling(struct acpi_processor *pr, 68 int state, bool force); 69 70 static int acpi_processor_update_tsd_coord(void) 71 { 72 int count, count_target; 73 int retval = 0; 74 unsigned int i, j; 75 cpumask_var_t covered_cpus; 76 struct acpi_processor *pr, *match_pr; 77 struct acpi_tsd_package *pdomain, *match_pdomain; 78 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 79 80 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 81 return -ENOMEM; 82 83 /* 84 * Now that we have _TSD data from all CPUs, lets setup T-state 85 * coordination between all CPUs. 86 */ 87 for_each_possible_cpu(i) { 88 pr = per_cpu(processors, i); 89 if (!pr) 90 continue; 91 92 /* Basic validity check for domain info */ 93 pthrottling = &(pr->throttling); 94 95 /* 96 * If tsd package for one cpu is invalid, the coordination 97 * among all CPUs is thought as invalid. 98 * Maybe it is ugly. 99 */ 100 if (!pthrottling->tsd_valid_flag) { 101 retval = -EINVAL; 102 break; 103 } 104 } 105 if (retval) 106 goto err_ret; 107 108 for_each_possible_cpu(i) { 109 pr = per_cpu(processors, i); 110 if (!pr) 111 continue; 112 113 if (cpumask_test_cpu(i, covered_cpus)) 114 continue; 115 pthrottling = &pr->throttling; 116 117 pdomain = &(pthrottling->domain_info); 118 cpumask_set_cpu(i, pthrottling->shared_cpu_map); 119 cpumask_set_cpu(i, covered_cpus); 120 /* 121 * If the number of processor in the TSD domain is 1, it is 122 * unnecessary to parse the coordination for this CPU. 123 */ 124 if (pdomain->num_processors <= 1) 125 continue; 126 127 /* Validate the Domain info */ 128 count_target = pdomain->num_processors; 129 count = 1; 130 131 for_each_possible_cpu(j) { 132 if (i == j) 133 continue; 134 135 match_pr = per_cpu(processors, j); 136 if (!match_pr) 137 continue; 138 139 match_pthrottling = &(match_pr->throttling); 140 match_pdomain = &(match_pthrottling->domain_info); 141 if (match_pdomain->domain != pdomain->domain) 142 continue; 143 144 /* Here i and j are in the same domain. 145 * If two TSD packages have the same domain, they 146 * should have the same num_porcessors and 147 * coordination type. Otherwise it will be regarded 148 * as illegal. 149 */ 150 if (match_pdomain->num_processors != count_target) { 151 retval = -EINVAL; 152 goto err_ret; 153 } 154 155 if (pdomain->coord_type != match_pdomain->coord_type) { 156 retval = -EINVAL; 157 goto err_ret; 158 } 159 160 cpumask_set_cpu(j, covered_cpus); 161 cpumask_set_cpu(j, pthrottling->shared_cpu_map); 162 count++; 163 } 164 for_each_possible_cpu(j) { 165 if (i == j) 166 continue; 167 168 match_pr = per_cpu(processors, j); 169 if (!match_pr) 170 continue; 171 172 match_pthrottling = &(match_pr->throttling); 173 match_pdomain = &(match_pthrottling->domain_info); 174 if (match_pdomain->domain != pdomain->domain) 175 continue; 176 177 /* 178 * If some CPUS have the same domain, they 179 * will have the same shared_cpu_map. 180 */ 181 cpumask_copy(match_pthrottling->shared_cpu_map, 182 pthrottling->shared_cpu_map); 183 } 184 } 185 186 err_ret: 187 free_cpumask_var(covered_cpus); 188 189 for_each_possible_cpu(i) { 190 pr = per_cpu(processors, i); 191 if (!pr) 192 continue; 193 194 /* 195 * Assume no coordination on any error parsing domain info. 196 * The coordination type will be forced as SW_ALL. 197 */ 198 if (retval) { 199 pthrottling = &(pr->throttling); 200 cpumask_clear(pthrottling->shared_cpu_map); 201 cpumask_set_cpu(i, pthrottling->shared_cpu_map); 202 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 203 } 204 } 205 206 return retval; 207 } 208 209 /* 210 * Update the T-state coordination after the _TSD 211 * data for all cpus is obtained. 212 */ 213 void acpi_processor_throttling_init(void) 214 { 215 if (acpi_processor_update_tsd_coord()) 216 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 217 "Assume no T-state coordination\n")); 218 219 return; 220 } 221 222 static int acpi_processor_throttling_notifier(unsigned long event, void *data) 223 { 224 struct throttling_tstate *p_tstate = data; 225 struct acpi_processor *pr; 226 unsigned int cpu ; 227 int target_state; 228 struct acpi_processor_limit *p_limit; 229 struct acpi_processor_throttling *p_throttling; 230 231 cpu = p_tstate->cpu; 232 pr = per_cpu(processors, cpu); 233 if (!pr) { 234 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); 235 return 0; 236 } 237 if (!pr->flags.throttling) { 238 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " 239 "unsupported on CPU %d\n", cpu)); 240 return 0; 241 } 242 target_state = p_tstate->target_state; 243 p_throttling = &(pr->throttling); 244 switch (event) { 245 case THROTTLING_PRECHANGE: 246 /* 247 * Prechange event is used to choose one proper t-state, 248 * which meets the limits of thermal, user and _TPC. 249 */ 250 p_limit = &pr->limit; 251 if (p_limit->thermal.tx > target_state) 252 target_state = p_limit->thermal.tx; 253 if (p_limit->user.tx > target_state) 254 target_state = p_limit->user.tx; 255 if (pr->throttling_platform_limit > target_state) 256 target_state = pr->throttling_platform_limit; 257 if (target_state >= p_throttling->state_count) { 258 printk(KERN_WARNING 259 "Exceed the limit of T-state \n"); 260 target_state = p_throttling->state_count - 1; 261 } 262 p_tstate->target_state = target_state; 263 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" 264 "target T-state of CPU %d is T%d\n", 265 cpu, target_state)); 266 break; 267 case THROTTLING_POSTCHANGE: 268 /* 269 * Postchange event is only used to update the 270 * T-state flag of acpi_processor_throttling. 271 */ 272 p_throttling->state = target_state; 273 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" 274 "CPU %d is switched to T%d\n", 275 cpu, target_state)); 276 break; 277 default: 278 printk(KERN_WARNING 279 "Unsupported Throttling notifier event\n"); 280 break; 281 } 282 283 return 0; 284 } 285 286 /* 287 * _TPC - Throttling Present Capabilities 288 */ 289 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 290 { 291 acpi_status status = 0; 292 unsigned long long tpc = 0; 293 294 if (!pr) 295 return -EINVAL; 296 297 if (ignore_tpc) 298 goto end; 299 300 status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); 301 if (ACPI_FAILURE(status)) { 302 if (status != AE_NOT_FOUND) { 303 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); 304 } 305 return -ENODEV; 306 } 307 308 end: 309 pr->throttling_platform_limit = (int)tpc; 310 return 0; 311 } 312 313 int acpi_processor_tstate_has_changed(struct acpi_processor *pr) 314 { 315 int result = 0; 316 int throttling_limit; 317 int current_state; 318 struct acpi_processor_limit *limit; 319 int target_state; 320 321 if (ignore_tpc) 322 return 0; 323 324 result = acpi_processor_get_platform_limit(pr); 325 if (result) { 326 /* Throttling Limit is unsupported */ 327 return result; 328 } 329 330 throttling_limit = pr->throttling_platform_limit; 331 if (throttling_limit >= pr->throttling.state_count) { 332 /* Uncorrect Throttling Limit */ 333 return -EINVAL; 334 } 335 336 current_state = pr->throttling.state; 337 if (current_state > throttling_limit) { 338 /* 339 * The current state can meet the requirement of 340 * _TPC limit. But it is reasonable that OSPM changes 341 * t-states from high to low for better performance. 342 * Of course the limit condition of thermal 343 * and user should be considered. 344 */ 345 limit = &pr->limit; 346 target_state = throttling_limit; 347 if (limit->thermal.tx > target_state) 348 target_state = limit->thermal.tx; 349 if (limit->user.tx > target_state) 350 target_state = limit->user.tx; 351 } else if (current_state == throttling_limit) { 352 /* 353 * Unnecessary to change the throttling state 354 */ 355 return 0; 356 } else { 357 /* 358 * If the current state is lower than the limit of _TPC, it 359 * will be forced to switch to the throttling state defined 360 * by throttling_platfor_limit. 361 * Because the previous state meets with the limit condition 362 * of thermal and user, it is unnecessary to check it again. 363 */ 364 target_state = throttling_limit; 365 } 366 return acpi_processor_set_throttling(pr, target_state, false); 367 } 368 369 /* 370 * _PTC - Processor Throttling Control (and status) register location 371 */ 372 static int acpi_processor_get_throttling_control(struct acpi_processor *pr) 373 { 374 int result = 0; 375 acpi_status status = 0; 376 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 377 union acpi_object *ptc = NULL; 378 union acpi_object obj = { 0 }; 379 struct acpi_processor_throttling *throttling; 380 381 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); 382 if (ACPI_FAILURE(status)) { 383 if (status != AE_NOT_FOUND) { 384 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); 385 } 386 return -ENODEV; 387 } 388 389 ptc = (union acpi_object *)buffer.pointer; 390 if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) 391 || (ptc->package.count != 2)) { 392 printk(KERN_ERR PREFIX "Invalid _PTC data\n"); 393 result = -EFAULT; 394 goto end; 395 } 396 397 /* 398 * control_register 399 */ 400 401 obj = ptc->package.elements[0]; 402 403 if ((obj.type != ACPI_TYPE_BUFFER) 404 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 405 || (obj.buffer.pointer == NULL)) { 406 printk(KERN_ERR PREFIX 407 "Invalid _PTC data (control_register)\n"); 408 result = -EFAULT; 409 goto end; 410 } 411 memcpy(&pr->throttling.control_register, obj.buffer.pointer, 412 sizeof(struct acpi_ptc_register)); 413 414 /* 415 * status_register 416 */ 417 418 obj = ptc->package.elements[1]; 419 420 if ((obj.type != ACPI_TYPE_BUFFER) 421 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 422 || (obj.buffer.pointer == NULL)) { 423 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); 424 result = -EFAULT; 425 goto end; 426 } 427 428 memcpy(&pr->throttling.status_register, obj.buffer.pointer, 429 sizeof(struct acpi_ptc_register)); 430 431 throttling = &pr->throttling; 432 433 if ((throttling->control_register.bit_width + 434 throttling->control_register.bit_offset) > 32) { 435 printk(KERN_ERR PREFIX "Invalid _PTC control register\n"); 436 result = -EFAULT; 437 goto end; 438 } 439 440 if ((throttling->status_register.bit_width + 441 throttling->status_register.bit_offset) > 32) { 442 printk(KERN_ERR PREFIX "Invalid _PTC status register\n"); 443 result = -EFAULT; 444 goto end; 445 } 446 447 end: 448 kfree(buffer.pointer); 449 450 return result; 451 } 452 453 /* 454 * _TSS - Throttling Supported States 455 */ 456 static int acpi_processor_get_throttling_states(struct acpi_processor *pr) 457 { 458 int result = 0; 459 acpi_status status = AE_OK; 460 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 461 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 462 struct acpi_buffer state = { 0, NULL }; 463 union acpi_object *tss = NULL; 464 int i; 465 466 status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); 467 if (ACPI_FAILURE(status)) { 468 if (status != AE_NOT_FOUND) { 469 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); 470 } 471 return -ENODEV; 472 } 473 474 tss = buffer.pointer; 475 if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { 476 printk(KERN_ERR PREFIX "Invalid _TSS data\n"); 477 result = -EFAULT; 478 goto end; 479 } 480 481 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 482 tss->package.count)); 483 484 pr->throttling.state_count = tss->package.count; 485 pr->throttling.states_tss = 486 kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, 487 GFP_KERNEL); 488 if (!pr->throttling.states_tss) { 489 result = -ENOMEM; 490 goto end; 491 } 492 493 for (i = 0; i < pr->throttling.state_count; i++) { 494 495 struct acpi_processor_tx_tss *tx = 496 (struct acpi_processor_tx_tss *)&(pr->throttling. 497 states_tss[i]); 498 499 state.length = sizeof(struct acpi_processor_tx_tss); 500 state.pointer = tx; 501 502 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 503 504 status = acpi_extract_package(&(tss->package.elements[i]), 505 &format, &state); 506 if (ACPI_FAILURE(status)) { 507 ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); 508 result = -EFAULT; 509 kfree(pr->throttling.states_tss); 510 goto end; 511 } 512 513 if (!tx->freqpercentage) { 514 printk(KERN_ERR PREFIX 515 "Invalid _TSS data: freq is zero\n"); 516 result = -EFAULT; 517 kfree(pr->throttling.states_tss); 518 goto end; 519 } 520 } 521 522 end: 523 kfree(buffer.pointer); 524 525 return result; 526 } 527 528 /* 529 * _TSD - T-State Dependencies 530 */ 531 static int acpi_processor_get_tsd(struct acpi_processor *pr) 532 { 533 int result = 0; 534 acpi_status status = AE_OK; 535 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 536 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 537 struct acpi_buffer state = { 0, NULL }; 538 union acpi_object *tsd = NULL; 539 struct acpi_tsd_package *pdomain; 540 struct acpi_processor_throttling *pthrottling; 541 542 pthrottling = &pr->throttling; 543 pthrottling->tsd_valid_flag = 0; 544 545 status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); 546 if (ACPI_FAILURE(status)) { 547 if (status != AE_NOT_FOUND) { 548 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); 549 } 550 return -ENODEV; 551 } 552 553 tsd = buffer.pointer; 554 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { 555 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 556 result = -EFAULT; 557 goto end; 558 } 559 560 if (tsd->package.count != 1) { 561 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 562 result = -EFAULT; 563 goto end; 564 } 565 566 pdomain = &(pr->throttling.domain_info); 567 568 state.length = sizeof(struct acpi_tsd_package); 569 state.pointer = pdomain; 570 571 status = acpi_extract_package(&(tsd->package.elements[0]), 572 &format, &state); 573 if (ACPI_FAILURE(status)) { 574 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 575 result = -EFAULT; 576 goto end; 577 } 578 579 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { 580 printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n"); 581 result = -EFAULT; 582 goto end; 583 } 584 585 if (pdomain->revision != ACPI_TSD_REV0_REVISION) { 586 printk(KERN_ERR PREFIX "Unknown _TSD:revision\n"); 587 result = -EFAULT; 588 goto end; 589 } 590 591 pthrottling = &pr->throttling; 592 pthrottling->tsd_valid_flag = 1; 593 pthrottling->shared_type = pdomain->coord_type; 594 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 595 /* 596 * If the coordination type is not defined in ACPI spec, 597 * the tsd_valid_flag will be clear and coordination type 598 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL. 599 */ 600 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 601 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 602 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 603 pthrottling->tsd_valid_flag = 0; 604 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 605 } 606 607 end: 608 kfree(buffer.pointer); 609 return result; 610 } 611 612 /* -------------------------------------------------------------------------- 613 Throttling Control 614 -------------------------------------------------------------------------- */ 615 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) 616 { 617 int state = 0; 618 u32 value = 0; 619 u32 duty_mask = 0; 620 u32 duty_value = 0; 621 622 if (!pr) 623 return -EINVAL; 624 625 if (!pr->flags.throttling) 626 return -ENODEV; 627 628 pr->throttling.state = 0; 629 630 duty_mask = pr->throttling.state_count - 1; 631 632 duty_mask <<= pr->throttling.duty_offset; 633 634 local_irq_disable(); 635 636 value = inl(pr->throttling.address); 637 638 /* 639 * Compute the current throttling state when throttling is enabled 640 * (bit 4 is on). 641 */ 642 if (value & 0x10) { 643 duty_value = value & duty_mask; 644 duty_value >>= pr->throttling.duty_offset; 645 646 if (duty_value) 647 state = pr->throttling.state_count - duty_value; 648 } 649 650 pr->throttling.state = state; 651 652 local_irq_enable(); 653 654 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 655 "Throttling state is T%d (%d%% throttling applied)\n", 656 state, pr->throttling.states[state].performance)); 657 658 return 0; 659 } 660 661 #ifdef CONFIG_X86 662 static int acpi_throttling_rdmsr(struct acpi_processor *pr, 663 acpi_integer * value) 664 { 665 struct cpuinfo_x86 *c; 666 u64 msr_high, msr_low; 667 unsigned int cpu; 668 u64 msr = 0; 669 int ret = -1; 670 671 cpu = pr->id; 672 c = &cpu_data(cpu); 673 674 if ((c->x86_vendor != X86_VENDOR_INTEL) || 675 !cpu_has(c, X86_FEATURE_ACPI)) { 676 printk(KERN_ERR PREFIX 677 "HARDWARE addr space,NOT supported yet\n"); 678 } else { 679 msr_low = 0; 680 msr_high = 0; 681 rdmsr_safe(MSR_IA32_THERM_CONTROL, 682 (u32 *)&msr_low , (u32 *) &msr_high); 683 msr = (msr_high << 32) | msr_low; 684 *value = (acpi_integer) msr; 685 ret = 0; 686 } 687 return ret; 688 } 689 690 static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value) 691 { 692 struct cpuinfo_x86 *c; 693 unsigned int cpu; 694 int ret = -1; 695 u64 msr; 696 697 cpu = pr->id; 698 c = &cpu_data(cpu); 699 700 if ((c->x86_vendor != X86_VENDOR_INTEL) || 701 !cpu_has(c, X86_FEATURE_ACPI)) { 702 printk(KERN_ERR PREFIX 703 "HARDWARE addr space,NOT supported yet\n"); 704 } else { 705 msr = value; 706 wrmsr_safe(MSR_IA32_THERM_CONTROL, 707 msr & 0xffffffff, msr >> 32); 708 ret = 0; 709 } 710 return ret; 711 } 712 #else 713 static int acpi_throttling_rdmsr(struct acpi_processor *pr, 714 acpi_integer * value) 715 { 716 printk(KERN_ERR PREFIX 717 "HARDWARE addr space,NOT supported yet\n"); 718 return -1; 719 } 720 721 static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value) 722 { 723 printk(KERN_ERR PREFIX 724 "HARDWARE addr space,NOT supported yet\n"); 725 return -1; 726 } 727 #endif 728 729 static int acpi_read_throttling_status(struct acpi_processor *pr, 730 acpi_integer *value) 731 { 732 u32 bit_width, bit_offset; 733 u64 ptc_value; 734 u64 ptc_mask; 735 struct acpi_processor_throttling *throttling; 736 int ret = -1; 737 738 throttling = &pr->throttling; 739 switch (throttling->status_register.space_id) { 740 case ACPI_ADR_SPACE_SYSTEM_IO: 741 ptc_value = 0; 742 bit_width = throttling->status_register.bit_width; 743 bit_offset = throttling->status_register.bit_offset; 744 745 acpi_os_read_port((acpi_io_address) throttling->status_register. 746 address, (u32 *) &ptc_value, 747 (u32) (bit_width + bit_offset)); 748 ptc_mask = (1 << bit_width) - 1; 749 *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask); 750 ret = 0; 751 break; 752 case ACPI_ADR_SPACE_FIXED_HARDWARE: 753 ret = acpi_throttling_rdmsr(pr, value); 754 break; 755 default: 756 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 757 (u32) (throttling->status_register.space_id)); 758 } 759 return ret; 760 } 761 762 static int acpi_write_throttling_state(struct acpi_processor *pr, 763 acpi_integer value) 764 { 765 u32 bit_width, bit_offset; 766 u64 ptc_value; 767 u64 ptc_mask; 768 struct acpi_processor_throttling *throttling; 769 int ret = -1; 770 771 throttling = &pr->throttling; 772 switch (throttling->control_register.space_id) { 773 case ACPI_ADR_SPACE_SYSTEM_IO: 774 bit_width = throttling->control_register.bit_width; 775 bit_offset = throttling->control_register.bit_offset; 776 ptc_mask = (1 << bit_width) - 1; 777 ptc_value = value & ptc_mask; 778 779 acpi_os_write_port((acpi_io_address) throttling-> 780 control_register.address, 781 (u32) (ptc_value << bit_offset), 782 (u32) (bit_width + bit_offset)); 783 ret = 0; 784 break; 785 case ACPI_ADR_SPACE_FIXED_HARDWARE: 786 ret = acpi_throttling_wrmsr(pr, value); 787 break; 788 default: 789 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 790 (u32) (throttling->control_register.space_id)); 791 } 792 return ret; 793 } 794 795 static int acpi_get_throttling_state(struct acpi_processor *pr, 796 acpi_integer value) 797 { 798 int i; 799 800 for (i = 0; i < pr->throttling.state_count; i++) { 801 struct acpi_processor_tx_tss *tx = 802 (struct acpi_processor_tx_tss *)&(pr->throttling. 803 states_tss[i]); 804 if (tx->control == value) 805 return i; 806 } 807 return -1; 808 } 809 810 static int acpi_get_throttling_value(struct acpi_processor *pr, 811 int state, acpi_integer *value) 812 { 813 int ret = -1; 814 815 if (state >= 0 && state <= pr->throttling.state_count) { 816 struct acpi_processor_tx_tss *tx = 817 (struct acpi_processor_tx_tss *)&(pr->throttling. 818 states_tss[state]); 819 *value = tx->control; 820 ret = 0; 821 } 822 return ret; 823 } 824 825 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) 826 { 827 int state = 0; 828 int ret; 829 acpi_integer value; 830 831 if (!pr) 832 return -EINVAL; 833 834 if (!pr->flags.throttling) 835 return -ENODEV; 836 837 pr->throttling.state = 0; 838 839 value = 0; 840 ret = acpi_read_throttling_status(pr, &value); 841 if (ret >= 0) { 842 state = acpi_get_throttling_state(pr, value); 843 if (state == -1) { 844 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 845 "Invalid throttling state, reset\n")); 846 state = 0; 847 ret = acpi_processor_set_throttling(pr, state, true); 848 if (ret) 849 return ret; 850 } 851 pr->throttling.state = state; 852 } 853 854 return 0; 855 } 856 857 static int acpi_processor_get_throttling(struct acpi_processor *pr) 858 { 859 cpumask_var_t saved_mask; 860 int ret; 861 862 if (!pr) 863 return -EINVAL; 864 865 if (!pr->flags.throttling) 866 return -ENODEV; 867 868 if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 869 return -ENOMEM; 870 871 /* 872 * Migrate task to the cpu pointed by pr. 873 */ 874 cpumask_copy(saved_mask, ¤t->cpus_allowed); 875 /* FIXME: use work_on_cpu() */ 876 set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 877 ret = pr->throttling.acpi_processor_get_throttling(pr); 878 /* restore the previous state */ 879 set_cpus_allowed_ptr(current, saved_mask); 880 free_cpumask_var(saved_mask); 881 882 return ret; 883 } 884 885 static int acpi_processor_get_fadt_info(struct acpi_processor *pr) 886 { 887 int i, step; 888 889 if (!pr->throttling.address) { 890 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); 891 return -EINVAL; 892 } else if (!pr->throttling.duty_width) { 893 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); 894 return -EINVAL; 895 } 896 /* TBD: Support duty_cycle values that span bit 4. */ 897 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { 898 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); 899 return -EINVAL; 900 } 901 902 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; 903 904 /* 905 * Compute state values. Note that throttling displays a linear power 906 * performance relationship (at 50% performance the CPU will consume 907 * 50% power). Values are in 1/10th of a percent to preserve accuracy. 908 */ 909 910 step = (1000 / pr->throttling.state_count); 911 912 for (i = 0; i < pr->throttling.state_count; i++) { 913 pr->throttling.states[i].performance = 1000 - step * i; 914 pr->throttling.states[i].power = 1000 - step * i; 915 } 916 return 0; 917 } 918 919 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, 920 int state, bool force) 921 { 922 u32 value = 0; 923 u32 duty_mask = 0; 924 u32 duty_value = 0; 925 926 if (!pr) 927 return -EINVAL; 928 929 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 930 return -EINVAL; 931 932 if (!pr->flags.throttling) 933 return -ENODEV; 934 935 if (!force && (state == pr->throttling.state)) 936 return 0; 937 938 if (state < pr->throttling_platform_limit) 939 return -EPERM; 940 /* 941 * Calculate the duty_value and duty_mask. 942 */ 943 if (state) { 944 duty_value = pr->throttling.state_count - state; 945 946 duty_value <<= pr->throttling.duty_offset; 947 948 /* Used to clear all duty_value bits */ 949 duty_mask = pr->throttling.state_count - 1; 950 951 duty_mask <<= acpi_gbl_FADT.duty_offset; 952 duty_mask = ~duty_mask; 953 } 954 955 local_irq_disable(); 956 957 /* 958 * Disable throttling by writing a 0 to bit 4. Note that we must 959 * turn it off before you can change the duty_value. 960 */ 961 value = inl(pr->throttling.address); 962 if (value & 0x10) { 963 value &= 0xFFFFFFEF; 964 outl(value, pr->throttling.address); 965 } 966 967 /* 968 * Write the new duty_value and then enable throttling. Note 969 * that a state value of 0 leaves throttling disabled. 970 */ 971 if (state) { 972 value &= duty_mask; 973 value |= duty_value; 974 outl(value, pr->throttling.address); 975 976 value |= 0x00000010; 977 outl(value, pr->throttling.address); 978 } 979 980 pr->throttling.state = state; 981 982 local_irq_enable(); 983 984 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 985 "Throttling state set to T%d (%d%%)\n", state, 986 (pr->throttling.states[state].performance ? pr-> 987 throttling.states[state].performance / 10 : 0))); 988 989 return 0; 990 } 991 992 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, 993 int state, bool force) 994 { 995 int ret; 996 acpi_integer value; 997 998 if (!pr) 999 return -EINVAL; 1000 1001 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1002 return -EINVAL; 1003 1004 if (!pr->flags.throttling) 1005 return -ENODEV; 1006 1007 if (!force && (state == pr->throttling.state)) 1008 return 0; 1009 1010 if (state < pr->throttling_platform_limit) 1011 return -EPERM; 1012 1013 value = 0; 1014 ret = acpi_get_throttling_value(pr, state, &value); 1015 if (ret >= 0) { 1016 acpi_write_throttling_state(pr, value); 1017 pr->throttling.state = state; 1018 } 1019 1020 return 0; 1021 } 1022 1023 int acpi_processor_set_throttling(struct acpi_processor *pr, 1024 int state, bool force) 1025 { 1026 cpumask_var_t saved_mask; 1027 int ret = 0; 1028 unsigned int i; 1029 struct acpi_processor *match_pr; 1030 struct acpi_processor_throttling *p_throttling; 1031 struct throttling_tstate t_state; 1032 cpumask_var_t online_throttling_cpus; 1033 1034 if (!pr) 1035 return -EINVAL; 1036 1037 if (!pr->flags.throttling) 1038 return -ENODEV; 1039 1040 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1041 return -EINVAL; 1042 1043 if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 1044 return -ENOMEM; 1045 1046 if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { 1047 free_cpumask_var(saved_mask); 1048 return -ENOMEM; 1049 } 1050 1051 cpumask_copy(saved_mask, ¤t->cpus_allowed); 1052 t_state.target_state = state; 1053 p_throttling = &(pr->throttling); 1054 cpumask_and(online_throttling_cpus, cpu_online_mask, 1055 p_throttling->shared_cpu_map); 1056 /* 1057 * The throttling notifier will be called for every 1058 * affected cpu in order to get one proper T-state. 1059 * The notifier event is THROTTLING_PRECHANGE. 1060 */ 1061 for_each_cpu(i, online_throttling_cpus) { 1062 t_state.cpu = i; 1063 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1064 &t_state); 1065 } 1066 /* 1067 * The function of acpi_processor_set_throttling will be called 1068 * to switch T-state. If the coordination type is SW_ALL or HW_ALL, 1069 * it is necessary to call it for every affected cpu. Otherwise 1070 * it can be called only for the cpu pointed by pr. 1071 */ 1072 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1073 /* FIXME: use work_on_cpu() */ 1074 set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 1075 ret = p_throttling->acpi_processor_set_throttling(pr, 1076 t_state.target_state, force); 1077 } else { 1078 /* 1079 * When the T-state coordination is SW_ALL or HW_ALL, 1080 * it is necessary to set T-state for every affected 1081 * cpus. 1082 */ 1083 for_each_cpu(i, online_throttling_cpus) { 1084 match_pr = per_cpu(processors, i); 1085 /* 1086 * If the pointer is invalid, we will report the 1087 * error message and continue. 1088 */ 1089 if (!match_pr) { 1090 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1091 "Invalid Pointer for CPU %d\n", i)); 1092 continue; 1093 } 1094 /* 1095 * If the throttling control is unsupported on CPU i, 1096 * we will report the error message and continue. 1097 */ 1098 if (!match_pr->flags.throttling) { 1099 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1100 "Throttling Controll is unsupported " 1101 "on CPU %d\n", i)); 1102 continue; 1103 } 1104 t_state.cpu = i; 1105 /* FIXME: use work_on_cpu() */ 1106 set_cpus_allowed_ptr(current, cpumask_of(i)); 1107 ret = match_pr->throttling. 1108 acpi_processor_set_throttling( 1109 match_pr, t_state.target_state, force); 1110 } 1111 } 1112 /* 1113 * After the set_throttling is called, the 1114 * throttling notifier is called for every 1115 * affected cpu to update the T-states. 1116 * The notifier event is THROTTLING_POSTCHANGE 1117 */ 1118 for_each_cpu(i, online_throttling_cpus) { 1119 t_state.cpu = i; 1120 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1121 &t_state); 1122 } 1123 /* restore the previous state */ 1124 /* FIXME: use work_on_cpu() */ 1125 set_cpus_allowed_ptr(current, saved_mask); 1126 free_cpumask_var(online_throttling_cpus); 1127 free_cpumask_var(saved_mask); 1128 return ret; 1129 } 1130 1131 int acpi_processor_get_throttling_info(struct acpi_processor *pr) 1132 { 1133 int result = 0; 1134 struct acpi_processor_throttling *pthrottling; 1135 1136 if (!pr) 1137 return -EINVAL; 1138 1139 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1140 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", 1141 pr->throttling.address, 1142 pr->throttling.duty_offset, 1143 pr->throttling.duty_width)); 1144 1145 /* 1146 * Evaluate _PTC, _TSS and _TPC 1147 * They must all be present or none of them can be used. 1148 */ 1149 if (acpi_processor_get_throttling_control(pr) || 1150 acpi_processor_get_throttling_states(pr) || 1151 acpi_processor_get_platform_limit(pr)) 1152 { 1153 pr->throttling.acpi_processor_get_throttling = 1154 &acpi_processor_get_throttling_fadt; 1155 pr->throttling.acpi_processor_set_throttling = 1156 &acpi_processor_set_throttling_fadt; 1157 if (acpi_processor_get_fadt_info(pr)) 1158 return 0; 1159 } else { 1160 pr->throttling.acpi_processor_get_throttling = 1161 &acpi_processor_get_throttling_ptc; 1162 pr->throttling.acpi_processor_set_throttling = 1163 &acpi_processor_set_throttling_ptc; 1164 } 1165 1166 /* 1167 * If TSD package for one CPU can't be parsed successfully, it means 1168 * that this CPU will have no coordination with other CPUs. 1169 */ 1170 if (acpi_processor_get_tsd(pr)) { 1171 pthrottling = &pr->throttling; 1172 pthrottling->tsd_valid_flag = 0; 1173 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 1174 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1175 } 1176 1177 /* 1178 * PIIX4 Errata: We don't support throttling on the original PIIX4. 1179 * This shouldn't be an issue as few (if any) mobile systems ever 1180 * used this part. 1181 */ 1182 if (errata.piix4.throttle) { 1183 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1184 "Throttling not supported on PIIX4 A- or B-step\n")); 1185 return 0; 1186 } 1187 1188 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 1189 pr->throttling.state_count)); 1190 1191 pr->flags.throttling = 1; 1192 1193 /* 1194 * Disable throttling (if enabled). We'll let subsequent policy (e.g. 1195 * thermal) decide to lower performance if it so chooses, but for now 1196 * we'll crank up the speed. 1197 */ 1198 1199 result = acpi_processor_get_throttling(pr); 1200 if (result) 1201 goto end; 1202 1203 if (pr->throttling.state) { 1204 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1205 "Disabling throttling (was T%d)\n", 1206 pr->throttling.state)); 1207 result = acpi_processor_set_throttling(pr, 0, false); 1208 if (result) 1209 goto end; 1210 } 1211 1212 end: 1213 if (result) 1214 pr->flags.throttling = 0; 1215 1216 return result; 1217 } 1218 1219 /* proc interface */ 1220 #ifdef CONFIG_ACPI_PROCFS 1221 static int acpi_processor_throttling_seq_show(struct seq_file *seq, 1222 void *offset) 1223 { 1224 struct acpi_processor *pr = seq->private; 1225 int i = 0; 1226 int result = 0; 1227 1228 if (!pr) 1229 goto end; 1230 1231 if (!(pr->throttling.state_count > 0)) { 1232 seq_puts(seq, "<not supported>\n"); 1233 goto end; 1234 } 1235 1236 result = acpi_processor_get_throttling(pr); 1237 1238 if (result) { 1239 seq_puts(seq, 1240 "Could not determine current throttling state.\n"); 1241 goto end; 1242 } 1243 1244 seq_printf(seq, "state count: %d\n" 1245 "active state: T%d\n" 1246 "state available: T%d to T%d\n", 1247 pr->throttling.state_count, pr->throttling.state, 1248 pr->throttling_platform_limit, 1249 pr->throttling.state_count - 1); 1250 1251 seq_puts(seq, "states:\n"); 1252 if (pr->throttling.acpi_processor_get_throttling == 1253 acpi_processor_get_throttling_fadt) { 1254 for (i = 0; i < pr->throttling.state_count; i++) 1255 seq_printf(seq, " %cT%d: %02d%%\n", 1256 (i == pr->throttling.state ? '*' : ' '), i, 1257 (pr->throttling.states[i].performance ? pr-> 1258 throttling.states[i].performance / 10 : 0)); 1259 } else { 1260 for (i = 0; i < pr->throttling.state_count; i++) 1261 seq_printf(seq, " %cT%d: %02d%%\n", 1262 (i == pr->throttling.state ? '*' : ' '), i, 1263 (int)pr->throttling.states_tss[i]. 1264 freqpercentage); 1265 } 1266 1267 end: 1268 return 0; 1269 } 1270 1271 static int acpi_processor_throttling_open_fs(struct inode *inode, 1272 struct file *file) 1273 { 1274 return single_open(file, acpi_processor_throttling_seq_show, 1275 PDE(inode)->data); 1276 } 1277 1278 static ssize_t acpi_processor_write_throttling(struct file *file, 1279 const char __user * buffer, 1280 size_t count, loff_t * data) 1281 { 1282 int result = 0; 1283 struct seq_file *m = file->private_data; 1284 struct acpi_processor *pr = m->private; 1285 char state_string[5] = ""; 1286 char *charp = NULL; 1287 size_t state_val = 0; 1288 char tmpbuf[5] = ""; 1289 1290 if (!pr || (count > sizeof(state_string) - 1)) 1291 return -EINVAL; 1292 1293 if (copy_from_user(state_string, buffer, count)) 1294 return -EFAULT; 1295 1296 state_string[count] = '\0'; 1297 if ((count > 0) && (state_string[count-1] == '\n')) 1298 state_string[count-1] = '\0'; 1299 1300 charp = state_string; 1301 if ((state_string[0] == 't') || (state_string[0] == 'T')) 1302 charp++; 1303 1304 state_val = simple_strtoul(charp, NULL, 0); 1305 if (state_val >= pr->throttling.state_count) 1306 return -EINVAL; 1307 1308 snprintf(tmpbuf, 5, "%zu", state_val); 1309 1310 if (strcmp(tmpbuf, charp) != 0) 1311 return -EINVAL; 1312 1313 result = acpi_processor_set_throttling(pr, state_val, false); 1314 if (result) 1315 return result; 1316 1317 return count; 1318 } 1319 1320 const struct file_operations acpi_processor_throttling_fops = { 1321 .owner = THIS_MODULE, 1322 .open = acpi_processor_throttling_open_fs, 1323 .read = seq_read, 1324 .write = acpi_processor_write_throttling, 1325 .llseek = seq_lseek, 1326 .release = single_release, 1327 }; 1328 #endif 1329