1 /* 2 * processor_throttling.c - Throttling submodule of the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/sched.h> 34 #include <linux/cpufreq.h> 35 #include <linux/acpi.h> 36 #include <acpi/processor.h> 37 #include <asm/io.h> 38 #include <asm/uaccess.h> 39 40 #define PREFIX "ACPI: " 41 42 #define ACPI_PROCESSOR_CLASS "processor" 43 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 44 ACPI_MODULE_NAME("processor_throttling"); 45 46 /* ignore_tpc: 47 * 0 -> acpi processor driver doesn't ignore _TPC values 48 * 1 -> acpi processor driver ignores _TPC values 49 */ 50 static int ignore_tpc; 51 module_param(ignore_tpc, int, 0644); 52 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support"); 53 54 struct throttling_tstate { 55 unsigned int cpu; /* cpu nr */ 56 int target_state; /* target T-state */ 57 }; 58 59 #define THROTTLING_PRECHANGE (1) 60 #define THROTTLING_POSTCHANGE (2) 61 62 static int acpi_processor_get_throttling(struct acpi_processor *pr); 63 int acpi_processor_set_throttling(struct acpi_processor *pr, 64 int state, bool force); 65 66 static int acpi_processor_update_tsd_coord(void) 67 { 68 int count, count_target; 69 int retval = 0; 70 unsigned int i, j; 71 cpumask_var_t covered_cpus; 72 struct acpi_processor *pr, *match_pr; 73 struct acpi_tsd_package *pdomain, *match_pdomain; 74 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 75 76 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 77 return -ENOMEM; 78 79 /* 80 * Now that we have _TSD data from all CPUs, lets setup T-state 81 * coordination between all CPUs. 82 */ 83 for_each_possible_cpu(i) { 84 pr = per_cpu(processors, i); 85 if (!pr) 86 continue; 87 88 /* Basic validity check for domain info */ 89 pthrottling = &(pr->throttling); 90 91 /* 92 * If tsd package for one cpu is invalid, the coordination 93 * among all CPUs is thought as invalid. 94 * Maybe it is ugly. 95 */ 96 if (!pthrottling->tsd_valid_flag) { 97 retval = -EINVAL; 98 break; 99 } 100 } 101 if (retval) 102 goto err_ret; 103 104 for_each_possible_cpu(i) { 105 pr = per_cpu(processors, i); 106 if (!pr) 107 continue; 108 109 if (cpumask_test_cpu(i, covered_cpus)) 110 continue; 111 pthrottling = &pr->throttling; 112 113 pdomain = &(pthrottling->domain_info); 114 cpumask_set_cpu(i, pthrottling->shared_cpu_map); 115 cpumask_set_cpu(i, covered_cpus); 116 /* 117 * If the number of processor in the TSD domain is 1, it is 118 * unnecessary to parse the coordination for this CPU. 119 */ 120 if (pdomain->num_processors <= 1) 121 continue; 122 123 /* Validate the Domain info */ 124 count_target = pdomain->num_processors; 125 count = 1; 126 127 for_each_possible_cpu(j) { 128 if (i == j) 129 continue; 130 131 match_pr = per_cpu(processors, j); 132 if (!match_pr) 133 continue; 134 135 match_pthrottling = &(match_pr->throttling); 136 match_pdomain = &(match_pthrottling->domain_info); 137 if (match_pdomain->domain != pdomain->domain) 138 continue; 139 140 /* Here i and j are in the same domain. 141 * If two TSD packages have the same domain, they 142 * should have the same num_porcessors and 143 * coordination type. Otherwise it will be regarded 144 * as illegal. 145 */ 146 if (match_pdomain->num_processors != count_target) { 147 retval = -EINVAL; 148 goto err_ret; 149 } 150 151 if (pdomain->coord_type != match_pdomain->coord_type) { 152 retval = -EINVAL; 153 goto err_ret; 154 } 155 156 cpumask_set_cpu(j, covered_cpus); 157 cpumask_set_cpu(j, pthrottling->shared_cpu_map); 158 count++; 159 } 160 for_each_possible_cpu(j) { 161 if (i == j) 162 continue; 163 164 match_pr = per_cpu(processors, j); 165 if (!match_pr) 166 continue; 167 168 match_pthrottling = &(match_pr->throttling); 169 match_pdomain = &(match_pthrottling->domain_info); 170 if (match_pdomain->domain != pdomain->domain) 171 continue; 172 173 /* 174 * If some CPUS have the same domain, they 175 * will have the same shared_cpu_map. 176 */ 177 cpumask_copy(match_pthrottling->shared_cpu_map, 178 pthrottling->shared_cpu_map); 179 } 180 } 181 182 err_ret: 183 free_cpumask_var(covered_cpus); 184 185 for_each_possible_cpu(i) { 186 pr = per_cpu(processors, i); 187 if (!pr) 188 continue; 189 190 /* 191 * Assume no coordination on any error parsing domain info. 192 * The coordination type will be forced as SW_ALL. 193 */ 194 if (retval) { 195 pthrottling = &(pr->throttling); 196 cpumask_clear(pthrottling->shared_cpu_map); 197 cpumask_set_cpu(i, pthrottling->shared_cpu_map); 198 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 199 } 200 } 201 202 return retval; 203 } 204 205 /* 206 * Update the T-state coordination after the _TSD 207 * data for all cpus is obtained. 208 */ 209 void acpi_processor_throttling_init(void) 210 { 211 if (acpi_processor_update_tsd_coord()) { 212 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 213 "Assume no T-state coordination\n")); 214 } 215 216 return; 217 } 218 219 static int acpi_processor_throttling_notifier(unsigned long event, void *data) 220 { 221 struct throttling_tstate *p_tstate = data; 222 struct acpi_processor *pr; 223 unsigned int cpu ; 224 int target_state; 225 struct acpi_processor_limit *p_limit; 226 struct acpi_processor_throttling *p_throttling; 227 228 cpu = p_tstate->cpu; 229 pr = per_cpu(processors, cpu); 230 if (!pr) { 231 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); 232 return 0; 233 } 234 if (!pr->flags.throttling) { 235 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " 236 "unsupported on CPU %d\n", cpu)); 237 return 0; 238 } 239 target_state = p_tstate->target_state; 240 p_throttling = &(pr->throttling); 241 switch (event) { 242 case THROTTLING_PRECHANGE: 243 /* 244 * Prechange event is used to choose one proper t-state, 245 * which meets the limits of thermal, user and _TPC. 246 */ 247 p_limit = &pr->limit; 248 if (p_limit->thermal.tx > target_state) 249 target_state = p_limit->thermal.tx; 250 if (p_limit->user.tx > target_state) 251 target_state = p_limit->user.tx; 252 if (pr->throttling_platform_limit > target_state) 253 target_state = pr->throttling_platform_limit; 254 if (target_state >= p_throttling->state_count) { 255 printk(KERN_WARNING 256 "Exceed the limit of T-state \n"); 257 target_state = p_throttling->state_count - 1; 258 } 259 p_tstate->target_state = target_state; 260 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" 261 "target T-state of CPU %d is T%d\n", 262 cpu, target_state)); 263 break; 264 case THROTTLING_POSTCHANGE: 265 /* 266 * Postchange event is only used to update the 267 * T-state flag of acpi_processor_throttling. 268 */ 269 p_throttling->state = target_state; 270 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" 271 "CPU %d is switched to T%d\n", 272 cpu, target_state)); 273 break; 274 default: 275 printk(KERN_WARNING 276 "Unsupported Throttling notifier event\n"); 277 break; 278 } 279 280 return 0; 281 } 282 283 /* 284 * _TPC - Throttling Present Capabilities 285 */ 286 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 287 { 288 acpi_status status = 0; 289 unsigned long long tpc = 0; 290 291 if (!pr) 292 return -EINVAL; 293 294 if (ignore_tpc) 295 goto end; 296 297 status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); 298 if (ACPI_FAILURE(status)) { 299 if (status != AE_NOT_FOUND) { 300 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); 301 } 302 return -ENODEV; 303 } 304 305 end: 306 pr->throttling_platform_limit = (int)tpc; 307 return 0; 308 } 309 310 int acpi_processor_tstate_has_changed(struct acpi_processor *pr) 311 { 312 int result = 0; 313 int throttling_limit; 314 int current_state; 315 struct acpi_processor_limit *limit; 316 int target_state; 317 318 if (ignore_tpc) 319 return 0; 320 321 result = acpi_processor_get_platform_limit(pr); 322 if (result) { 323 /* Throttling Limit is unsupported */ 324 return result; 325 } 326 327 throttling_limit = pr->throttling_platform_limit; 328 if (throttling_limit >= pr->throttling.state_count) { 329 /* Uncorrect Throttling Limit */ 330 return -EINVAL; 331 } 332 333 current_state = pr->throttling.state; 334 if (current_state > throttling_limit) { 335 /* 336 * The current state can meet the requirement of 337 * _TPC limit. But it is reasonable that OSPM changes 338 * t-states from high to low for better performance. 339 * Of course the limit condition of thermal 340 * and user should be considered. 341 */ 342 limit = &pr->limit; 343 target_state = throttling_limit; 344 if (limit->thermal.tx > target_state) 345 target_state = limit->thermal.tx; 346 if (limit->user.tx > target_state) 347 target_state = limit->user.tx; 348 } else if (current_state == throttling_limit) { 349 /* 350 * Unnecessary to change the throttling state 351 */ 352 return 0; 353 } else { 354 /* 355 * If the current state is lower than the limit of _TPC, it 356 * will be forced to switch to the throttling state defined 357 * by throttling_platfor_limit. 358 * Because the previous state meets with the limit condition 359 * of thermal and user, it is unnecessary to check it again. 360 */ 361 target_state = throttling_limit; 362 } 363 return acpi_processor_set_throttling(pr, target_state, false); 364 } 365 366 /* 367 * This function is used to reevaluate whether the T-state is valid 368 * after one CPU is onlined/offlined. 369 * It is noted that it won't reevaluate the following properties for 370 * the T-state. 371 * 1. Control method. 372 * 2. the number of supported T-state 373 * 3. TSD domain 374 */ 375 void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, 376 unsigned long action) 377 { 378 int result = 0; 379 380 if (action == CPU_DEAD) { 381 /* When one CPU is offline, the T-state throttling 382 * will be invalidated. 383 */ 384 pr->flags.throttling = 0; 385 return; 386 } 387 /* the following is to recheck whether the T-state is valid for 388 * the online CPU 389 */ 390 if (!pr->throttling.state_count) { 391 /* If the number of T-state is invalid, it is 392 * invalidated. 393 */ 394 pr->flags.throttling = 0; 395 return; 396 } 397 pr->flags.throttling = 1; 398 399 /* Disable throttling (if enabled). We'll let subsequent 400 * policy (e.g.thermal) decide to lower performance if it 401 * so chooses, but for now we'll crank up the speed. 402 */ 403 404 result = acpi_processor_get_throttling(pr); 405 if (result) 406 goto end; 407 408 if (pr->throttling.state) { 409 result = acpi_processor_set_throttling(pr, 0, false); 410 if (result) 411 goto end; 412 } 413 414 end: 415 if (result) 416 pr->flags.throttling = 0; 417 } 418 /* 419 * _PTC - Processor Throttling Control (and status) register location 420 */ 421 static int acpi_processor_get_throttling_control(struct acpi_processor *pr) 422 { 423 int result = 0; 424 acpi_status status = 0; 425 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 426 union acpi_object *ptc = NULL; 427 union acpi_object obj = { 0 }; 428 struct acpi_processor_throttling *throttling; 429 430 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); 431 if (ACPI_FAILURE(status)) { 432 if (status != AE_NOT_FOUND) { 433 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); 434 } 435 return -ENODEV; 436 } 437 438 ptc = (union acpi_object *)buffer.pointer; 439 if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) 440 || (ptc->package.count != 2)) { 441 printk(KERN_ERR PREFIX "Invalid _PTC data\n"); 442 result = -EFAULT; 443 goto end; 444 } 445 446 /* 447 * control_register 448 */ 449 450 obj = ptc->package.elements[0]; 451 452 if ((obj.type != ACPI_TYPE_BUFFER) 453 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 454 || (obj.buffer.pointer == NULL)) { 455 printk(KERN_ERR PREFIX 456 "Invalid _PTC data (control_register)\n"); 457 result = -EFAULT; 458 goto end; 459 } 460 memcpy(&pr->throttling.control_register, obj.buffer.pointer, 461 sizeof(struct acpi_ptc_register)); 462 463 /* 464 * status_register 465 */ 466 467 obj = ptc->package.elements[1]; 468 469 if ((obj.type != ACPI_TYPE_BUFFER) 470 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 471 || (obj.buffer.pointer == NULL)) { 472 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); 473 result = -EFAULT; 474 goto end; 475 } 476 477 memcpy(&pr->throttling.status_register, obj.buffer.pointer, 478 sizeof(struct acpi_ptc_register)); 479 480 throttling = &pr->throttling; 481 482 if ((throttling->control_register.bit_width + 483 throttling->control_register.bit_offset) > 32) { 484 printk(KERN_ERR PREFIX "Invalid _PTC control register\n"); 485 result = -EFAULT; 486 goto end; 487 } 488 489 if ((throttling->status_register.bit_width + 490 throttling->status_register.bit_offset) > 32) { 491 printk(KERN_ERR PREFIX "Invalid _PTC status register\n"); 492 result = -EFAULT; 493 goto end; 494 } 495 496 end: 497 kfree(buffer.pointer); 498 499 return result; 500 } 501 502 /* 503 * _TSS - Throttling Supported States 504 */ 505 static int acpi_processor_get_throttling_states(struct acpi_processor *pr) 506 { 507 int result = 0; 508 acpi_status status = AE_OK; 509 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 510 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 511 struct acpi_buffer state = { 0, NULL }; 512 union acpi_object *tss = NULL; 513 int i; 514 515 status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); 516 if (ACPI_FAILURE(status)) { 517 if (status != AE_NOT_FOUND) { 518 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); 519 } 520 return -ENODEV; 521 } 522 523 tss = buffer.pointer; 524 if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { 525 printk(KERN_ERR PREFIX "Invalid _TSS data\n"); 526 result = -EFAULT; 527 goto end; 528 } 529 530 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 531 tss->package.count)); 532 533 pr->throttling.state_count = tss->package.count; 534 pr->throttling.states_tss = 535 kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, 536 GFP_KERNEL); 537 if (!pr->throttling.states_tss) { 538 result = -ENOMEM; 539 goto end; 540 } 541 542 for (i = 0; i < pr->throttling.state_count; i++) { 543 544 struct acpi_processor_tx_tss *tx = 545 (struct acpi_processor_tx_tss *)&(pr->throttling. 546 states_tss[i]); 547 548 state.length = sizeof(struct acpi_processor_tx_tss); 549 state.pointer = tx; 550 551 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 552 553 status = acpi_extract_package(&(tss->package.elements[i]), 554 &format, &state); 555 if (ACPI_FAILURE(status)) { 556 ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); 557 result = -EFAULT; 558 kfree(pr->throttling.states_tss); 559 goto end; 560 } 561 562 if (!tx->freqpercentage) { 563 printk(KERN_ERR PREFIX 564 "Invalid _TSS data: freq is zero\n"); 565 result = -EFAULT; 566 kfree(pr->throttling.states_tss); 567 goto end; 568 } 569 } 570 571 end: 572 kfree(buffer.pointer); 573 574 return result; 575 } 576 577 /* 578 * _TSD - T-State Dependencies 579 */ 580 static int acpi_processor_get_tsd(struct acpi_processor *pr) 581 { 582 int result = 0; 583 acpi_status status = AE_OK; 584 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 585 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 586 struct acpi_buffer state = { 0, NULL }; 587 union acpi_object *tsd = NULL; 588 struct acpi_tsd_package *pdomain; 589 struct acpi_processor_throttling *pthrottling; 590 591 pthrottling = &pr->throttling; 592 pthrottling->tsd_valid_flag = 0; 593 594 status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); 595 if (ACPI_FAILURE(status)) { 596 if (status != AE_NOT_FOUND) { 597 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); 598 } 599 return -ENODEV; 600 } 601 602 tsd = buffer.pointer; 603 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { 604 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 605 result = -EFAULT; 606 goto end; 607 } 608 609 if (tsd->package.count != 1) { 610 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 611 result = -EFAULT; 612 goto end; 613 } 614 615 pdomain = &(pr->throttling.domain_info); 616 617 state.length = sizeof(struct acpi_tsd_package); 618 state.pointer = pdomain; 619 620 status = acpi_extract_package(&(tsd->package.elements[0]), 621 &format, &state); 622 if (ACPI_FAILURE(status)) { 623 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 624 result = -EFAULT; 625 goto end; 626 } 627 628 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { 629 printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n"); 630 result = -EFAULT; 631 goto end; 632 } 633 634 if (pdomain->revision != ACPI_TSD_REV0_REVISION) { 635 printk(KERN_ERR PREFIX "Unknown _TSD:revision\n"); 636 result = -EFAULT; 637 goto end; 638 } 639 640 pthrottling = &pr->throttling; 641 pthrottling->tsd_valid_flag = 1; 642 pthrottling->shared_type = pdomain->coord_type; 643 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 644 /* 645 * If the coordination type is not defined in ACPI spec, 646 * the tsd_valid_flag will be clear and coordination type 647 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL. 648 */ 649 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 650 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 651 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 652 pthrottling->tsd_valid_flag = 0; 653 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 654 } 655 656 end: 657 kfree(buffer.pointer); 658 return result; 659 } 660 661 /* -------------------------------------------------------------------------- 662 Throttling Control 663 -------------------------------------------------------------------------- */ 664 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) 665 { 666 int state = 0; 667 u32 value = 0; 668 u32 duty_mask = 0; 669 u32 duty_value = 0; 670 671 if (!pr) 672 return -EINVAL; 673 674 if (!pr->flags.throttling) 675 return -ENODEV; 676 677 pr->throttling.state = 0; 678 679 duty_mask = pr->throttling.state_count - 1; 680 681 duty_mask <<= pr->throttling.duty_offset; 682 683 local_irq_disable(); 684 685 value = inl(pr->throttling.address); 686 687 /* 688 * Compute the current throttling state when throttling is enabled 689 * (bit 4 is on). 690 */ 691 if (value & 0x10) { 692 duty_value = value & duty_mask; 693 duty_value >>= pr->throttling.duty_offset; 694 695 if (duty_value) 696 state = pr->throttling.state_count - duty_value; 697 } 698 699 pr->throttling.state = state; 700 701 local_irq_enable(); 702 703 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 704 "Throttling state is T%d (%d%% throttling applied)\n", 705 state, pr->throttling.states[state].performance)); 706 707 return 0; 708 } 709 710 #ifdef CONFIG_X86 711 static int acpi_throttling_rdmsr(u64 *value) 712 { 713 u64 msr_high, msr_low; 714 u64 msr = 0; 715 int ret = -1; 716 717 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || 718 !this_cpu_has(X86_FEATURE_ACPI)) { 719 printk(KERN_ERR PREFIX 720 "HARDWARE addr space,NOT supported yet\n"); 721 } else { 722 msr_low = 0; 723 msr_high = 0; 724 rdmsr_safe(MSR_IA32_THERM_CONTROL, 725 (u32 *)&msr_low , (u32 *) &msr_high); 726 msr = (msr_high << 32) | msr_low; 727 *value = (u64) msr; 728 ret = 0; 729 } 730 return ret; 731 } 732 733 static int acpi_throttling_wrmsr(u64 value) 734 { 735 int ret = -1; 736 u64 msr; 737 738 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || 739 !this_cpu_has(X86_FEATURE_ACPI)) { 740 printk(KERN_ERR PREFIX 741 "HARDWARE addr space,NOT supported yet\n"); 742 } else { 743 msr = value; 744 wrmsr_safe(MSR_IA32_THERM_CONTROL, 745 msr & 0xffffffff, msr >> 32); 746 ret = 0; 747 } 748 return ret; 749 } 750 #else 751 static int acpi_throttling_rdmsr(u64 *value) 752 { 753 printk(KERN_ERR PREFIX 754 "HARDWARE addr space,NOT supported yet\n"); 755 return -1; 756 } 757 758 static int acpi_throttling_wrmsr(u64 value) 759 { 760 printk(KERN_ERR PREFIX 761 "HARDWARE addr space,NOT supported yet\n"); 762 return -1; 763 } 764 #endif 765 766 static int acpi_read_throttling_status(struct acpi_processor *pr, 767 u64 *value) 768 { 769 u32 bit_width, bit_offset; 770 u32 ptc_value; 771 u64 ptc_mask; 772 struct acpi_processor_throttling *throttling; 773 int ret = -1; 774 775 throttling = &pr->throttling; 776 switch (throttling->status_register.space_id) { 777 case ACPI_ADR_SPACE_SYSTEM_IO: 778 bit_width = throttling->status_register.bit_width; 779 bit_offset = throttling->status_register.bit_offset; 780 781 acpi_os_read_port((acpi_io_address) throttling->status_register. 782 address, &ptc_value, 783 (u32) (bit_width + bit_offset)); 784 ptc_mask = (1 << bit_width) - 1; 785 *value = (u64) ((ptc_value >> bit_offset) & ptc_mask); 786 ret = 0; 787 break; 788 case ACPI_ADR_SPACE_FIXED_HARDWARE: 789 ret = acpi_throttling_rdmsr(value); 790 break; 791 default: 792 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 793 (u32) (throttling->status_register.space_id)); 794 } 795 return ret; 796 } 797 798 static int acpi_write_throttling_state(struct acpi_processor *pr, 799 u64 value) 800 { 801 u32 bit_width, bit_offset; 802 u64 ptc_value; 803 u64 ptc_mask; 804 struct acpi_processor_throttling *throttling; 805 int ret = -1; 806 807 throttling = &pr->throttling; 808 switch (throttling->control_register.space_id) { 809 case ACPI_ADR_SPACE_SYSTEM_IO: 810 bit_width = throttling->control_register.bit_width; 811 bit_offset = throttling->control_register.bit_offset; 812 ptc_mask = (1 << bit_width) - 1; 813 ptc_value = value & ptc_mask; 814 815 acpi_os_write_port((acpi_io_address) throttling-> 816 control_register.address, 817 (u32) (ptc_value << bit_offset), 818 (u32) (bit_width + bit_offset)); 819 ret = 0; 820 break; 821 case ACPI_ADR_SPACE_FIXED_HARDWARE: 822 ret = acpi_throttling_wrmsr(value); 823 break; 824 default: 825 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 826 (u32) (throttling->control_register.space_id)); 827 } 828 return ret; 829 } 830 831 static int acpi_get_throttling_state(struct acpi_processor *pr, 832 u64 value) 833 { 834 int i; 835 836 for (i = 0; i < pr->throttling.state_count; i++) { 837 struct acpi_processor_tx_tss *tx = 838 (struct acpi_processor_tx_tss *)&(pr->throttling. 839 states_tss[i]); 840 if (tx->control == value) 841 return i; 842 } 843 return -1; 844 } 845 846 static int acpi_get_throttling_value(struct acpi_processor *pr, 847 int state, u64 *value) 848 { 849 int ret = -1; 850 851 if (state >= 0 && state <= pr->throttling.state_count) { 852 struct acpi_processor_tx_tss *tx = 853 (struct acpi_processor_tx_tss *)&(pr->throttling. 854 states_tss[state]); 855 *value = tx->control; 856 ret = 0; 857 } 858 return ret; 859 } 860 861 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) 862 { 863 int state = 0; 864 int ret; 865 u64 value; 866 867 if (!pr) 868 return -EINVAL; 869 870 if (!pr->flags.throttling) 871 return -ENODEV; 872 873 pr->throttling.state = 0; 874 875 value = 0; 876 ret = acpi_read_throttling_status(pr, &value); 877 if (ret >= 0) { 878 state = acpi_get_throttling_state(pr, value); 879 if (state == -1) { 880 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 881 "Invalid throttling state, reset\n")); 882 state = 0; 883 ret = acpi_processor_set_throttling(pr, state, true); 884 if (ret) 885 return ret; 886 } 887 pr->throttling.state = state; 888 } 889 890 return 0; 891 } 892 893 static int acpi_processor_get_throttling(struct acpi_processor *pr) 894 { 895 cpumask_var_t saved_mask; 896 int ret; 897 898 if (!pr) 899 return -EINVAL; 900 901 if (!pr->flags.throttling) 902 return -ENODEV; 903 904 if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 905 return -ENOMEM; 906 907 /* 908 * Migrate task to the cpu pointed by pr. 909 */ 910 cpumask_copy(saved_mask, ¤t->cpus_allowed); 911 /* FIXME: use work_on_cpu() */ 912 if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { 913 /* Can't migrate to the target pr->id CPU. Exit */ 914 free_cpumask_var(saved_mask); 915 return -ENODEV; 916 } 917 ret = pr->throttling.acpi_processor_get_throttling(pr); 918 /* restore the previous state */ 919 set_cpus_allowed_ptr(current, saved_mask); 920 free_cpumask_var(saved_mask); 921 922 return ret; 923 } 924 925 static int acpi_processor_get_fadt_info(struct acpi_processor *pr) 926 { 927 int i, step; 928 929 if (!pr->throttling.address) { 930 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); 931 return -EINVAL; 932 } else if (!pr->throttling.duty_width) { 933 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); 934 return -EINVAL; 935 } 936 /* TBD: Support duty_cycle values that span bit 4. */ 937 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { 938 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); 939 return -EINVAL; 940 } 941 942 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; 943 944 /* 945 * Compute state values. Note that throttling displays a linear power 946 * performance relationship (at 50% performance the CPU will consume 947 * 50% power). Values are in 1/10th of a percent to preserve accuracy. 948 */ 949 950 step = (1000 / pr->throttling.state_count); 951 952 for (i = 0; i < pr->throttling.state_count; i++) { 953 pr->throttling.states[i].performance = 1000 - step * i; 954 pr->throttling.states[i].power = 1000 - step * i; 955 } 956 return 0; 957 } 958 959 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, 960 int state, bool force) 961 { 962 u32 value = 0; 963 u32 duty_mask = 0; 964 u32 duty_value = 0; 965 966 if (!pr) 967 return -EINVAL; 968 969 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 970 return -EINVAL; 971 972 if (!pr->flags.throttling) 973 return -ENODEV; 974 975 if (!force && (state == pr->throttling.state)) 976 return 0; 977 978 if (state < pr->throttling_platform_limit) 979 return -EPERM; 980 /* 981 * Calculate the duty_value and duty_mask. 982 */ 983 if (state) { 984 duty_value = pr->throttling.state_count - state; 985 986 duty_value <<= pr->throttling.duty_offset; 987 988 /* Used to clear all duty_value bits */ 989 duty_mask = pr->throttling.state_count - 1; 990 991 duty_mask <<= acpi_gbl_FADT.duty_offset; 992 duty_mask = ~duty_mask; 993 } 994 995 local_irq_disable(); 996 997 /* 998 * Disable throttling by writing a 0 to bit 4. Note that we must 999 * turn it off before you can change the duty_value. 1000 */ 1001 value = inl(pr->throttling.address); 1002 if (value & 0x10) { 1003 value &= 0xFFFFFFEF; 1004 outl(value, pr->throttling.address); 1005 } 1006 1007 /* 1008 * Write the new duty_value and then enable throttling. Note 1009 * that a state value of 0 leaves throttling disabled. 1010 */ 1011 if (state) { 1012 value &= duty_mask; 1013 value |= duty_value; 1014 outl(value, pr->throttling.address); 1015 1016 value |= 0x00000010; 1017 outl(value, pr->throttling.address); 1018 } 1019 1020 pr->throttling.state = state; 1021 1022 local_irq_enable(); 1023 1024 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1025 "Throttling state set to T%d (%d%%)\n", state, 1026 (pr->throttling.states[state].performance ? pr-> 1027 throttling.states[state].performance / 10 : 0))); 1028 1029 return 0; 1030 } 1031 1032 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, 1033 int state, bool force) 1034 { 1035 int ret; 1036 u64 value; 1037 1038 if (!pr) 1039 return -EINVAL; 1040 1041 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1042 return -EINVAL; 1043 1044 if (!pr->flags.throttling) 1045 return -ENODEV; 1046 1047 if (!force && (state == pr->throttling.state)) 1048 return 0; 1049 1050 if (state < pr->throttling_platform_limit) 1051 return -EPERM; 1052 1053 value = 0; 1054 ret = acpi_get_throttling_value(pr, state, &value); 1055 if (ret >= 0) { 1056 acpi_write_throttling_state(pr, value); 1057 pr->throttling.state = state; 1058 } 1059 1060 return 0; 1061 } 1062 1063 int acpi_processor_set_throttling(struct acpi_processor *pr, 1064 int state, bool force) 1065 { 1066 cpumask_var_t saved_mask; 1067 int ret = 0; 1068 unsigned int i; 1069 struct acpi_processor *match_pr; 1070 struct acpi_processor_throttling *p_throttling; 1071 struct throttling_tstate t_state; 1072 cpumask_var_t online_throttling_cpus; 1073 1074 if (!pr) 1075 return -EINVAL; 1076 1077 if (!pr->flags.throttling) 1078 return -ENODEV; 1079 1080 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1081 return -EINVAL; 1082 1083 if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 1084 return -ENOMEM; 1085 1086 if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { 1087 free_cpumask_var(saved_mask); 1088 return -ENOMEM; 1089 } 1090 1091 if (cpu_is_offline(pr->id)) { 1092 /* 1093 * the cpu pointed by pr->id is offline. Unnecessary to change 1094 * the throttling state any more. 1095 */ 1096 return -ENODEV; 1097 } 1098 1099 cpumask_copy(saved_mask, ¤t->cpus_allowed); 1100 t_state.target_state = state; 1101 p_throttling = &(pr->throttling); 1102 cpumask_and(online_throttling_cpus, cpu_online_mask, 1103 p_throttling->shared_cpu_map); 1104 /* 1105 * The throttling notifier will be called for every 1106 * affected cpu in order to get one proper T-state. 1107 * The notifier event is THROTTLING_PRECHANGE. 1108 */ 1109 for_each_cpu(i, online_throttling_cpus) { 1110 t_state.cpu = i; 1111 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1112 &t_state); 1113 } 1114 /* 1115 * The function of acpi_processor_set_throttling will be called 1116 * to switch T-state. If the coordination type is SW_ALL or HW_ALL, 1117 * it is necessary to call it for every affected cpu. Otherwise 1118 * it can be called only for the cpu pointed by pr. 1119 */ 1120 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1121 /* FIXME: use work_on_cpu() */ 1122 if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { 1123 /* Can't migrate to the pr->id CPU. Exit */ 1124 ret = -ENODEV; 1125 goto exit; 1126 } 1127 ret = p_throttling->acpi_processor_set_throttling(pr, 1128 t_state.target_state, force); 1129 } else { 1130 /* 1131 * When the T-state coordination is SW_ALL or HW_ALL, 1132 * it is necessary to set T-state for every affected 1133 * cpus. 1134 */ 1135 for_each_cpu(i, online_throttling_cpus) { 1136 match_pr = per_cpu(processors, i); 1137 /* 1138 * If the pointer is invalid, we will report the 1139 * error message and continue. 1140 */ 1141 if (!match_pr) { 1142 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1143 "Invalid Pointer for CPU %d\n", i)); 1144 continue; 1145 } 1146 /* 1147 * If the throttling control is unsupported on CPU i, 1148 * we will report the error message and continue. 1149 */ 1150 if (!match_pr->flags.throttling) { 1151 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1152 "Throttling Control is unsupported " 1153 "on CPU %d\n", i)); 1154 continue; 1155 } 1156 t_state.cpu = i; 1157 /* FIXME: use work_on_cpu() */ 1158 if (set_cpus_allowed_ptr(current, cpumask_of(i))) 1159 continue; 1160 ret = match_pr->throttling. 1161 acpi_processor_set_throttling( 1162 match_pr, t_state.target_state, force); 1163 } 1164 } 1165 /* 1166 * After the set_throttling is called, the 1167 * throttling notifier is called for every 1168 * affected cpu to update the T-states. 1169 * The notifier event is THROTTLING_POSTCHANGE 1170 */ 1171 for_each_cpu(i, online_throttling_cpus) { 1172 t_state.cpu = i; 1173 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1174 &t_state); 1175 } 1176 /* restore the previous state */ 1177 /* FIXME: use work_on_cpu() */ 1178 set_cpus_allowed_ptr(current, saved_mask); 1179 exit: 1180 free_cpumask_var(online_throttling_cpus); 1181 free_cpumask_var(saved_mask); 1182 return ret; 1183 } 1184 1185 int acpi_processor_get_throttling_info(struct acpi_processor *pr) 1186 { 1187 int result = 0; 1188 struct acpi_processor_throttling *pthrottling; 1189 1190 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1191 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", 1192 pr->throttling.address, 1193 pr->throttling.duty_offset, 1194 pr->throttling.duty_width)); 1195 1196 /* 1197 * Evaluate _PTC, _TSS and _TPC 1198 * They must all be present or none of them can be used. 1199 */ 1200 if (acpi_processor_get_throttling_control(pr) || 1201 acpi_processor_get_throttling_states(pr) || 1202 acpi_processor_get_platform_limit(pr)) 1203 { 1204 pr->throttling.acpi_processor_get_throttling = 1205 &acpi_processor_get_throttling_fadt; 1206 pr->throttling.acpi_processor_set_throttling = 1207 &acpi_processor_set_throttling_fadt; 1208 if (acpi_processor_get_fadt_info(pr)) 1209 return 0; 1210 } else { 1211 pr->throttling.acpi_processor_get_throttling = 1212 &acpi_processor_get_throttling_ptc; 1213 pr->throttling.acpi_processor_set_throttling = 1214 &acpi_processor_set_throttling_ptc; 1215 } 1216 1217 /* 1218 * If TSD package for one CPU can't be parsed successfully, it means 1219 * that this CPU will have no coordination with other CPUs. 1220 */ 1221 if (acpi_processor_get_tsd(pr)) { 1222 pthrottling = &pr->throttling; 1223 pthrottling->tsd_valid_flag = 0; 1224 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 1225 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1226 } 1227 1228 /* 1229 * PIIX4 Errata: We don't support throttling on the original PIIX4. 1230 * This shouldn't be an issue as few (if any) mobile systems ever 1231 * used this part. 1232 */ 1233 if (errata.piix4.throttle) { 1234 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1235 "Throttling not supported on PIIX4 A- or B-step\n")); 1236 return 0; 1237 } 1238 1239 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 1240 pr->throttling.state_count)); 1241 1242 pr->flags.throttling = 1; 1243 1244 /* 1245 * Disable throttling (if enabled). We'll let subsequent policy (e.g. 1246 * thermal) decide to lower performance if it so chooses, but for now 1247 * we'll crank up the speed. 1248 */ 1249 1250 result = acpi_processor_get_throttling(pr); 1251 if (result) 1252 goto end; 1253 1254 if (pr->throttling.state) { 1255 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1256 "Disabling throttling (was T%d)\n", 1257 pr->throttling.state)); 1258 result = acpi_processor_set_throttling(pr, 0, false); 1259 if (result) 1260 goto end; 1261 } 1262 1263 end: 1264 if (result) 1265 pr->flags.throttling = 0; 1266 1267 return result; 1268 } 1269 1270