1 /* 2 * processor_throttling.c - Throttling submodule of the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/sched.h> 34 #include <linux/cpufreq.h> 35 #include <linux/acpi.h> 36 #include <acpi/processor.h> 37 #include <asm/io.h> 38 #include <asm/uaccess.h> 39 40 #define PREFIX "ACPI: " 41 42 #define ACPI_PROCESSOR_CLASS "processor" 43 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 44 ACPI_MODULE_NAME("processor_throttling"); 45 46 /* ignore_tpc: 47 * 0 -> acpi processor driver doesn't ignore _TPC values 48 * 1 -> acpi processor driver ignores _TPC values 49 */ 50 static int ignore_tpc; 51 module_param(ignore_tpc, int, 0644); 52 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support"); 53 54 struct throttling_tstate { 55 unsigned int cpu; /* cpu nr */ 56 int target_state; /* target T-state */ 57 }; 58 59 struct acpi_processor_throttling_arg { 60 struct acpi_processor *pr; 61 int target_state; 62 bool force; 63 }; 64 65 #define THROTTLING_PRECHANGE (1) 66 #define THROTTLING_POSTCHANGE (2) 67 68 static int acpi_processor_get_throttling(struct acpi_processor *pr); 69 int acpi_processor_set_throttling(struct acpi_processor *pr, 70 int state, bool force); 71 72 static int acpi_processor_update_tsd_coord(void) 73 { 74 int count, count_target; 75 int retval = 0; 76 unsigned int i, j; 77 cpumask_var_t covered_cpus; 78 struct acpi_processor *pr, *match_pr; 79 struct acpi_tsd_package *pdomain, *match_pdomain; 80 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 81 82 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 83 return -ENOMEM; 84 85 /* 86 * Now that we have _TSD data from all CPUs, lets setup T-state 87 * coordination between all CPUs. 88 */ 89 for_each_possible_cpu(i) { 90 pr = per_cpu(processors, i); 91 if (!pr) 92 continue; 93 94 /* Basic validity check for domain info */ 95 pthrottling = &(pr->throttling); 96 97 /* 98 * If tsd package for one cpu is invalid, the coordination 99 * among all CPUs is thought as invalid. 100 * Maybe it is ugly. 101 */ 102 if (!pthrottling->tsd_valid_flag) { 103 retval = -EINVAL; 104 break; 105 } 106 } 107 if (retval) 108 goto err_ret; 109 110 for_each_possible_cpu(i) { 111 pr = per_cpu(processors, i); 112 if (!pr) 113 continue; 114 115 if (cpumask_test_cpu(i, covered_cpus)) 116 continue; 117 pthrottling = &pr->throttling; 118 119 pdomain = &(pthrottling->domain_info); 120 cpumask_set_cpu(i, pthrottling->shared_cpu_map); 121 cpumask_set_cpu(i, covered_cpus); 122 /* 123 * If the number of processor in the TSD domain is 1, it is 124 * unnecessary to parse the coordination for this CPU. 125 */ 126 if (pdomain->num_processors <= 1) 127 continue; 128 129 /* Validate the Domain info */ 130 count_target = pdomain->num_processors; 131 count = 1; 132 133 for_each_possible_cpu(j) { 134 if (i == j) 135 continue; 136 137 match_pr = per_cpu(processors, j); 138 if (!match_pr) 139 continue; 140 141 match_pthrottling = &(match_pr->throttling); 142 match_pdomain = &(match_pthrottling->domain_info); 143 if (match_pdomain->domain != pdomain->domain) 144 continue; 145 146 /* Here i and j are in the same domain. 147 * If two TSD packages have the same domain, they 148 * should have the same num_porcessors and 149 * coordination type. Otherwise it will be regarded 150 * as illegal. 151 */ 152 if (match_pdomain->num_processors != count_target) { 153 retval = -EINVAL; 154 goto err_ret; 155 } 156 157 if (pdomain->coord_type != match_pdomain->coord_type) { 158 retval = -EINVAL; 159 goto err_ret; 160 } 161 162 cpumask_set_cpu(j, covered_cpus); 163 cpumask_set_cpu(j, pthrottling->shared_cpu_map); 164 count++; 165 } 166 for_each_possible_cpu(j) { 167 if (i == j) 168 continue; 169 170 match_pr = per_cpu(processors, j); 171 if (!match_pr) 172 continue; 173 174 match_pthrottling = &(match_pr->throttling); 175 match_pdomain = &(match_pthrottling->domain_info); 176 if (match_pdomain->domain != pdomain->domain) 177 continue; 178 179 /* 180 * If some CPUS have the same domain, they 181 * will have the same shared_cpu_map. 182 */ 183 cpumask_copy(match_pthrottling->shared_cpu_map, 184 pthrottling->shared_cpu_map); 185 } 186 } 187 188 err_ret: 189 free_cpumask_var(covered_cpus); 190 191 for_each_possible_cpu(i) { 192 pr = per_cpu(processors, i); 193 if (!pr) 194 continue; 195 196 /* 197 * Assume no coordination on any error parsing domain info. 198 * The coordination type will be forced as SW_ALL. 199 */ 200 if (retval) { 201 pthrottling = &(pr->throttling); 202 cpumask_clear(pthrottling->shared_cpu_map); 203 cpumask_set_cpu(i, pthrottling->shared_cpu_map); 204 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 205 } 206 } 207 208 return retval; 209 } 210 211 /* 212 * Update the T-state coordination after the _TSD 213 * data for all cpus is obtained. 214 */ 215 void acpi_processor_throttling_init(void) 216 { 217 if (acpi_processor_update_tsd_coord()) { 218 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 219 "Assume no T-state coordination\n")); 220 } 221 222 return; 223 } 224 225 static int acpi_processor_throttling_notifier(unsigned long event, void *data) 226 { 227 struct throttling_tstate *p_tstate = data; 228 struct acpi_processor *pr; 229 unsigned int cpu ; 230 int target_state; 231 struct acpi_processor_limit *p_limit; 232 struct acpi_processor_throttling *p_throttling; 233 234 cpu = p_tstate->cpu; 235 pr = per_cpu(processors, cpu); 236 if (!pr) { 237 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); 238 return 0; 239 } 240 if (!pr->flags.throttling) { 241 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " 242 "unsupported on CPU %d\n", cpu)); 243 return 0; 244 } 245 target_state = p_tstate->target_state; 246 p_throttling = &(pr->throttling); 247 switch (event) { 248 case THROTTLING_PRECHANGE: 249 /* 250 * Prechange event is used to choose one proper t-state, 251 * which meets the limits of thermal, user and _TPC. 252 */ 253 p_limit = &pr->limit; 254 if (p_limit->thermal.tx > target_state) 255 target_state = p_limit->thermal.tx; 256 if (p_limit->user.tx > target_state) 257 target_state = p_limit->user.tx; 258 if (pr->throttling_platform_limit > target_state) 259 target_state = pr->throttling_platform_limit; 260 if (target_state >= p_throttling->state_count) { 261 printk(KERN_WARNING 262 "Exceed the limit of T-state \n"); 263 target_state = p_throttling->state_count - 1; 264 } 265 p_tstate->target_state = target_state; 266 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" 267 "target T-state of CPU %d is T%d\n", 268 cpu, target_state)); 269 break; 270 case THROTTLING_POSTCHANGE: 271 /* 272 * Postchange event is only used to update the 273 * T-state flag of acpi_processor_throttling. 274 */ 275 p_throttling->state = target_state; 276 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" 277 "CPU %d is switched to T%d\n", 278 cpu, target_state)); 279 break; 280 default: 281 printk(KERN_WARNING 282 "Unsupported Throttling notifier event\n"); 283 break; 284 } 285 286 return 0; 287 } 288 289 /* 290 * _TPC - Throttling Present Capabilities 291 */ 292 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 293 { 294 acpi_status status = 0; 295 unsigned long long tpc = 0; 296 297 if (!pr) 298 return -EINVAL; 299 300 if (ignore_tpc) 301 goto end; 302 303 status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); 304 if (ACPI_FAILURE(status)) { 305 if (status != AE_NOT_FOUND) { 306 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); 307 } 308 return -ENODEV; 309 } 310 311 end: 312 pr->throttling_platform_limit = (int)tpc; 313 return 0; 314 } 315 316 int acpi_processor_tstate_has_changed(struct acpi_processor *pr) 317 { 318 int result = 0; 319 int throttling_limit; 320 int current_state; 321 struct acpi_processor_limit *limit; 322 int target_state; 323 324 if (ignore_tpc) 325 return 0; 326 327 result = acpi_processor_get_platform_limit(pr); 328 if (result) { 329 /* Throttling Limit is unsupported */ 330 return result; 331 } 332 333 throttling_limit = pr->throttling_platform_limit; 334 if (throttling_limit >= pr->throttling.state_count) { 335 /* Uncorrect Throttling Limit */ 336 return -EINVAL; 337 } 338 339 current_state = pr->throttling.state; 340 if (current_state > throttling_limit) { 341 /* 342 * The current state can meet the requirement of 343 * _TPC limit. But it is reasonable that OSPM changes 344 * t-states from high to low for better performance. 345 * Of course the limit condition of thermal 346 * and user should be considered. 347 */ 348 limit = &pr->limit; 349 target_state = throttling_limit; 350 if (limit->thermal.tx > target_state) 351 target_state = limit->thermal.tx; 352 if (limit->user.tx > target_state) 353 target_state = limit->user.tx; 354 } else if (current_state == throttling_limit) { 355 /* 356 * Unnecessary to change the throttling state 357 */ 358 return 0; 359 } else { 360 /* 361 * If the current state is lower than the limit of _TPC, it 362 * will be forced to switch to the throttling state defined 363 * by throttling_platfor_limit. 364 * Because the previous state meets with the limit condition 365 * of thermal and user, it is unnecessary to check it again. 366 */ 367 target_state = throttling_limit; 368 } 369 return acpi_processor_set_throttling(pr, target_state, false); 370 } 371 372 /* 373 * This function is used to reevaluate whether the T-state is valid 374 * after one CPU is onlined/offlined. 375 * It is noted that it won't reevaluate the following properties for 376 * the T-state. 377 * 1. Control method. 378 * 2. the number of supported T-state 379 * 3. TSD domain 380 */ 381 void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, 382 unsigned long action) 383 { 384 int result = 0; 385 386 if (action == CPU_DEAD) { 387 /* When one CPU is offline, the T-state throttling 388 * will be invalidated. 389 */ 390 pr->flags.throttling = 0; 391 return; 392 } 393 /* the following is to recheck whether the T-state is valid for 394 * the online CPU 395 */ 396 if (!pr->throttling.state_count) { 397 /* If the number of T-state is invalid, it is 398 * invalidated. 399 */ 400 pr->flags.throttling = 0; 401 return; 402 } 403 pr->flags.throttling = 1; 404 405 /* Disable throttling (if enabled). We'll let subsequent 406 * policy (e.g.thermal) decide to lower performance if it 407 * so chooses, but for now we'll crank up the speed. 408 */ 409 410 result = acpi_processor_get_throttling(pr); 411 if (result) 412 goto end; 413 414 if (pr->throttling.state) { 415 result = acpi_processor_set_throttling(pr, 0, false); 416 if (result) 417 goto end; 418 } 419 420 end: 421 if (result) 422 pr->flags.throttling = 0; 423 } 424 /* 425 * _PTC - Processor Throttling Control (and status) register location 426 */ 427 static int acpi_processor_get_throttling_control(struct acpi_processor *pr) 428 { 429 int result = 0; 430 acpi_status status = 0; 431 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 432 union acpi_object *ptc = NULL; 433 union acpi_object obj = { 0 }; 434 struct acpi_processor_throttling *throttling; 435 436 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); 437 if (ACPI_FAILURE(status)) { 438 if (status != AE_NOT_FOUND) { 439 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); 440 } 441 return -ENODEV; 442 } 443 444 ptc = (union acpi_object *)buffer.pointer; 445 if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) 446 || (ptc->package.count != 2)) { 447 printk(KERN_ERR PREFIX "Invalid _PTC data\n"); 448 result = -EFAULT; 449 goto end; 450 } 451 452 /* 453 * control_register 454 */ 455 456 obj = ptc->package.elements[0]; 457 458 if ((obj.type != ACPI_TYPE_BUFFER) 459 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 460 || (obj.buffer.pointer == NULL)) { 461 printk(KERN_ERR PREFIX 462 "Invalid _PTC data (control_register)\n"); 463 result = -EFAULT; 464 goto end; 465 } 466 memcpy(&pr->throttling.control_register, obj.buffer.pointer, 467 sizeof(struct acpi_ptc_register)); 468 469 /* 470 * status_register 471 */ 472 473 obj = ptc->package.elements[1]; 474 475 if ((obj.type != ACPI_TYPE_BUFFER) 476 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 477 || (obj.buffer.pointer == NULL)) { 478 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); 479 result = -EFAULT; 480 goto end; 481 } 482 483 memcpy(&pr->throttling.status_register, obj.buffer.pointer, 484 sizeof(struct acpi_ptc_register)); 485 486 throttling = &pr->throttling; 487 488 if ((throttling->control_register.bit_width + 489 throttling->control_register.bit_offset) > 32) { 490 printk(KERN_ERR PREFIX "Invalid _PTC control register\n"); 491 result = -EFAULT; 492 goto end; 493 } 494 495 if ((throttling->status_register.bit_width + 496 throttling->status_register.bit_offset) > 32) { 497 printk(KERN_ERR PREFIX "Invalid _PTC status register\n"); 498 result = -EFAULT; 499 goto end; 500 } 501 502 end: 503 kfree(buffer.pointer); 504 505 return result; 506 } 507 508 /* 509 * _TSS - Throttling Supported States 510 */ 511 static int acpi_processor_get_throttling_states(struct acpi_processor *pr) 512 { 513 int result = 0; 514 acpi_status status = AE_OK; 515 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 516 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 517 struct acpi_buffer state = { 0, NULL }; 518 union acpi_object *tss = NULL; 519 int i; 520 521 status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); 522 if (ACPI_FAILURE(status)) { 523 if (status != AE_NOT_FOUND) { 524 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); 525 } 526 return -ENODEV; 527 } 528 529 tss = buffer.pointer; 530 if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { 531 printk(KERN_ERR PREFIX "Invalid _TSS data\n"); 532 result = -EFAULT; 533 goto end; 534 } 535 536 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 537 tss->package.count)); 538 539 pr->throttling.state_count = tss->package.count; 540 pr->throttling.states_tss = 541 kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, 542 GFP_KERNEL); 543 if (!pr->throttling.states_tss) { 544 result = -ENOMEM; 545 goto end; 546 } 547 548 for (i = 0; i < pr->throttling.state_count; i++) { 549 550 struct acpi_processor_tx_tss *tx = 551 (struct acpi_processor_tx_tss *)&(pr->throttling. 552 states_tss[i]); 553 554 state.length = sizeof(struct acpi_processor_tx_tss); 555 state.pointer = tx; 556 557 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 558 559 status = acpi_extract_package(&(tss->package.elements[i]), 560 &format, &state); 561 if (ACPI_FAILURE(status)) { 562 ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); 563 result = -EFAULT; 564 kfree(pr->throttling.states_tss); 565 goto end; 566 } 567 568 if (!tx->freqpercentage) { 569 printk(KERN_ERR PREFIX 570 "Invalid _TSS data: freq is zero\n"); 571 result = -EFAULT; 572 kfree(pr->throttling.states_tss); 573 goto end; 574 } 575 } 576 577 end: 578 kfree(buffer.pointer); 579 580 return result; 581 } 582 583 /* 584 * _TSD - T-State Dependencies 585 */ 586 static int acpi_processor_get_tsd(struct acpi_processor *pr) 587 { 588 int result = 0; 589 acpi_status status = AE_OK; 590 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 591 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 592 struct acpi_buffer state = { 0, NULL }; 593 union acpi_object *tsd = NULL; 594 struct acpi_tsd_package *pdomain; 595 struct acpi_processor_throttling *pthrottling; 596 597 pthrottling = &pr->throttling; 598 pthrottling->tsd_valid_flag = 0; 599 600 status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); 601 if (ACPI_FAILURE(status)) { 602 if (status != AE_NOT_FOUND) { 603 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); 604 } 605 return -ENODEV; 606 } 607 608 tsd = buffer.pointer; 609 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { 610 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 611 result = -EFAULT; 612 goto end; 613 } 614 615 if (tsd->package.count != 1) { 616 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 617 result = -EFAULT; 618 goto end; 619 } 620 621 pdomain = &(pr->throttling.domain_info); 622 623 state.length = sizeof(struct acpi_tsd_package); 624 state.pointer = pdomain; 625 626 status = acpi_extract_package(&(tsd->package.elements[0]), 627 &format, &state); 628 if (ACPI_FAILURE(status)) { 629 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 630 result = -EFAULT; 631 goto end; 632 } 633 634 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { 635 printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n"); 636 result = -EFAULT; 637 goto end; 638 } 639 640 if (pdomain->revision != ACPI_TSD_REV0_REVISION) { 641 printk(KERN_ERR PREFIX "Unknown _TSD:revision\n"); 642 result = -EFAULT; 643 goto end; 644 } 645 646 pthrottling = &pr->throttling; 647 pthrottling->tsd_valid_flag = 1; 648 pthrottling->shared_type = pdomain->coord_type; 649 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 650 /* 651 * If the coordination type is not defined in ACPI spec, 652 * the tsd_valid_flag will be clear and coordination type 653 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL. 654 */ 655 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 656 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 657 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 658 pthrottling->tsd_valid_flag = 0; 659 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 660 } 661 662 end: 663 kfree(buffer.pointer); 664 return result; 665 } 666 667 /* -------------------------------------------------------------------------- 668 Throttling Control 669 -------------------------------------------------------------------------- */ 670 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) 671 { 672 int state = 0; 673 u32 value = 0; 674 u32 duty_mask = 0; 675 u32 duty_value = 0; 676 677 if (!pr) 678 return -EINVAL; 679 680 if (!pr->flags.throttling) 681 return -ENODEV; 682 683 pr->throttling.state = 0; 684 685 duty_mask = pr->throttling.state_count - 1; 686 687 duty_mask <<= pr->throttling.duty_offset; 688 689 local_irq_disable(); 690 691 value = inl(pr->throttling.address); 692 693 /* 694 * Compute the current throttling state when throttling is enabled 695 * (bit 4 is on). 696 */ 697 if (value & 0x10) { 698 duty_value = value & duty_mask; 699 duty_value >>= pr->throttling.duty_offset; 700 701 if (duty_value) 702 state = pr->throttling.state_count - duty_value; 703 } 704 705 pr->throttling.state = state; 706 707 local_irq_enable(); 708 709 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 710 "Throttling state is T%d (%d%% throttling applied)\n", 711 state, pr->throttling.states[state].performance)); 712 713 return 0; 714 } 715 716 #ifdef CONFIG_X86 717 static int acpi_throttling_rdmsr(u64 *value) 718 { 719 u64 msr_high, msr_low; 720 u64 msr = 0; 721 int ret = -1; 722 723 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || 724 !this_cpu_has(X86_FEATURE_ACPI)) { 725 printk(KERN_ERR PREFIX 726 "HARDWARE addr space,NOT supported yet\n"); 727 } else { 728 msr_low = 0; 729 msr_high = 0; 730 rdmsr_safe(MSR_IA32_THERM_CONTROL, 731 (u32 *)&msr_low , (u32 *) &msr_high); 732 msr = (msr_high << 32) | msr_low; 733 *value = (u64) msr; 734 ret = 0; 735 } 736 return ret; 737 } 738 739 static int acpi_throttling_wrmsr(u64 value) 740 { 741 int ret = -1; 742 u64 msr; 743 744 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || 745 !this_cpu_has(X86_FEATURE_ACPI)) { 746 printk(KERN_ERR PREFIX 747 "HARDWARE addr space,NOT supported yet\n"); 748 } else { 749 msr = value; 750 wrmsr_safe(MSR_IA32_THERM_CONTROL, 751 msr & 0xffffffff, msr >> 32); 752 ret = 0; 753 } 754 return ret; 755 } 756 #else 757 static int acpi_throttling_rdmsr(u64 *value) 758 { 759 printk(KERN_ERR PREFIX 760 "HARDWARE addr space,NOT supported yet\n"); 761 return -1; 762 } 763 764 static int acpi_throttling_wrmsr(u64 value) 765 { 766 printk(KERN_ERR PREFIX 767 "HARDWARE addr space,NOT supported yet\n"); 768 return -1; 769 } 770 #endif 771 772 static int acpi_read_throttling_status(struct acpi_processor *pr, 773 u64 *value) 774 { 775 u32 bit_width, bit_offset; 776 u32 ptc_value; 777 u64 ptc_mask; 778 struct acpi_processor_throttling *throttling; 779 int ret = -1; 780 781 throttling = &pr->throttling; 782 switch (throttling->status_register.space_id) { 783 case ACPI_ADR_SPACE_SYSTEM_IO: 784 bit_width = throttling->status_register.bit_width; 785 bit_offset = throttling->status_register.bit_offset; 786 787 acpi_os_read_port((acpi_io_address) throttling->status_register. 788 address, &ptc_value, 789 (u32) (bit_width + bit_offset)); 790 ptc_mask = (1 << bit_width) - 1; 791 *value = (u64) ((ptc_value >> bit_offset) & ptc_mask); 792 ret = 0; 793 break; 794 case ACPI_ADR_SPACE_FIXED_HARDWARE: 795 ret = acpi_throttling_rdmsr(value); 796 break; 797 default: 798 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 799 (u32) (throttling->status_register.space_id)); 800 } 801 return ret; 802 } 803 804 static int acpi_write_throttling_state(struct acpi_processor *pr, 805 u64 value) 806 { 807 u32 bit_width, bit_offset; 808 u64 ptc_value; 809 u64 ptc_mask; 810 struct acpi_processor_throttling *throttling; 811 int ret = -1; 812 813 throttling = &pr->throttling; 814 switch (throttling->control_register.space_id) { 815 case ACPI_ADR_SPACE_SYSTEM_IO: 816 bit_width = throttling->control_register.bit_width; 817 bit_offset = throttling->control_register.bit_offset; 818 ptc_mask = (1 << bit_width) - 1; 819 ptc_value = value & ptc_mask; 820 821 acpi_os_write_port((acpi_io_address) throttling-> 822 control_register.address, 823 (u32) (ptc_value << bit_offset), 824 (u32) (bit_width + bit_offset)); 825 ret = 0; 826 break; 827 case ACPI_ADR_SPACE_FIXED_HARDWARE: 828 ret = acpi_throttling_wrmsr(value); 829 break; 830 default: 831 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 832 (u32) (throttling->control_register.space_id)); 833 } 834 return ret; 835 } 836 837 static int acpi_get_throttling_state(struct acpi_processor *pr, 838 u64 value) 839 { 840 int i; 841 842 for (i = 0; i < pr->throttling.state_count; i++) { 843 struct acpi_processor_tx_tss *tx = 844 (struct acpi_processor_tx_tss *)&(pr->throttling. 845 states_tss[i]); 846 if (tx->control == value) 847 return i; 848 } 849 return -1; 850 } 851 852 static int acpi_get_throttling_value(struct acpi_processor *pr, 853 int state, u64 *value) 854 { 855 int ret = -1; 856 857 if (state >= 0 && state <= pr->throttling.state_count) { 858 struct acpi_processor_tx_tss *tx = 859 (struct acpi_processor_tx_tss *)&(pr->throttling. 860 states_tss[state]); 861 *value = tx->control; 862 ret = 0; 863 } 864 return ret; 865 } 866 867 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) 868 { 869 int state = 0; 870 int ret; 871 u64 value; 872 873 if (!pr) 874 return -EINVAL; 875 876 if (!pr->flags.throttling) 877 return -ENODEV; 878 879 pr->throttling.state = 0; 880 881 value = 0; 882 ret = acpi_read_throttling_status(pr, &value); 883 if (ret >= 0) { 884 state = acpi_get_throttling_state(pr, value); 885 if (state == -1) { 886 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 887 "Invalid throttling state, reset\n")); 888 state = 0; 889 ret = acpi_processor_set_throttling(pr, state, true); 890 if (ret) 891 return ret; 892 } 893 pr->throttling.state = state; 894 } 895 896 return 0; 897 } 898 899 static int acpi_processor_get_throttling(struct acpi_processor *pr) 900 { 901 cpumask_var_t saved_mask; 902 int ret; 903 904 if (!pr) 905 return -EINVAL; 906 907 if (!pr->flags.throttling) 908 return -ENODEV; 909 910 if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 911 return -ENOMEM; 912 913 /* 914 * Migrate task to the cpu pointed by pr. 915 */ 916 cpumask_copy(saved_mask, ¤t->cpus_allowed); 917 /* FIXME: use work_on_cpu() */ 918 if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { 919 /* Can't migrate to the target pr->id CPU. Exit */ 920 free_cpumask_var(saved_mask); 921 return -ENODEV; 922 } 923 ret = pr->throttling.acpi_processor_get_throttling(pr); 924 /* restore the previous state */ 925 set_cpus_allowed_ptr(current, saved_mask); 926 free_cpumask_var(saved_mask); 927 928 return ret; 929 } 930 931 static int acpi_processor_get_fadt_info(struct acpi_processor *pr) 932 { 933 int i, step; 934 935 if (!pr->throttling.address) { 936 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); 937 return -EINVAL; 938 } else if (!pr->throttling.duty_width) { 939 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); 940 return -EINVAL; 941 } 942 /* TBD: Support duty_cycle values that span bit 4. */ 943 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { 944 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); 945 return -EINVAL; 946 } 947 948 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; 949 950 /* 951 * Compute state values. Note that throttling displays a linear power 952 * performance relationship (at 50% performance the CPU will consume 953 * 50% power). Values are in 1/10th of a percent to preserve accuracy. 954 */ 955 956 step = (1000 / pr->throttling.state_count); 957 958 for (i = 0; i < pr->throttling.state_count; i++) { 959 pr->throttling.states[i].performance = 1000 - step * i; 960 pr->throttling.states[i].power = 1000 - step * i; 961 } 962 return 0; 963 } 964 965 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, 966 int state, bool force) 967 { 968 u32 value = 0; 969 u32 duty_mask = 0; 970 u32 duty_value = 0; 971 972 if (!pr) 973 return -EINVAL; 974 975 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 976 return -EINVAL; 977 978 if (!pr->flags.throttling) 979 return -ENODEV; 980 981 if (!force && (state == pr->throttling.state)) 982 return 0; 983 984 if (state < pr->throttling_platform_limit) 985 return -EPERM; 986 /* 987 * Calculate the duty_value and duty_mask. 988 */ 989 if (state) { 990 duty_value = pr->throttling.state_count - state; 991 992 duty_value <<= pr->throttling.duty_offset; 993 994 /* Used to clear all duty_value bits */ 995 duty_mask = pr->throttling.state_count - 1; 996 997 duty_mask <<= acpi_gbl_FADT.duty_offset; 998 duty_mask = ~duty_mask; 999 } 1000 1001 local_irq_disable(); 1002 1003 /* 1004 * Disable throttling by writing a 0 to bit 4. Note that we must 1005 * turn it off before you can change the duty_value. 1006 */ 1007 value = inl(pr->throttling.address); 1008 if (value & 0x10) { 1009 value &= 0xFFFFFFEF; 1010 outl(value, pr->throttling.address); 1011 } 1012 1013 /* 1014 * Write the new duty_value and then enable throttling. Note 1015 * that a state value of 0 leaves throttling disabled. 1016 */ 1017 if (state) { 1018 value &= duty_mask; 1019 value |= duty_value; 1020 outl(value, pr->throttling.address); 1021 1022 value |= 0x00000010; 1023 outl(value, pr->throttling.address); 1024 } 1025 1026 pr->throttling.state = state; 1027 1028 local_irq_enable(); 1029 1030 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1031 "Throttling state set to T%d (%d%%)\n", state, 1032 (pr->throttling.states[state].performance ? pr-> 1033 throttling.states[state].performance / 10 : 0))); 1034 1035 return 0; 1036 } 1037 1038 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, 1039 int state, bool force) 1040 { 1041 int ret; 1042 u64 value; 1043 1044 if (!pr) 1045 return -EINVAL; 1046 1047 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1048 return -EINVAL; 1049 1050 if (!pr->flags.throttling) 1051 return -ENODEV; 1052 1053 if (!force && (state == pr->throttling.state)) 1054 return 0; 1055 1056 if (state < pr->throttling_platform_limit) 1057 return -EPERM; 1058 1059 value = 0; 1060 ret = acpi_get_throttling_value(pr, state, &value); 1061 if (ret >= 0) { 1062 acpi_write_throttling_state(pr, value); 1063 pr->throttling.state = state; 1064 } 1065 1066 return 0; 1067 } 1068 1069 static long acpi_processor_throttling_fn(void *data) 1070 { 1071 struct acpi_processor_throttling_arg *arg = data; 1072 struct acpi_processor *pr = arg->pr; 1073 1074 return pr->throttling.acpi_processor_set_throttling(pr, 1075 arg->target_state, arg->force); 1076 } 1077 1078 int acpi_processor_set_throttling(struct acpi_processor *pr, 1079 int state, bool force) 1080 { 1081 int ret = 0; 1082 unsigned int i; 1083 struct acpi_processor *match_pr; 1084 struct acpi_processor_throttling *p_throttling; 1085 struct acpi_processor_throttling_arg arg; 1086 struct throttling_tstate t_state; 1087 1088 if (!pr) 1089 return -EINVAL; 1090 1091 if (!pr->flags.throttling) 1092 return -ENODEV; 1093 1094 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1095 return -EINVAL; 1096 1097 if (cpu_is_offline(pr->id)) { 1098 /* 1099 * the cpu pointed by pr->id is offline. Unnecessary to change 1100 * the throttling state any more. 1101 */ 1102 return -ENODEV; 1103 } 1104 1105 t_state.target_state = state; 1106 p_throttling = &(pr->throttling); 1107 1108 /* 1109 * The throttling notifier will be called for every 1110 * affected cpu in order to get one proper T-state. 1111 * The notifier event is THROTTLING_PRECHANGE. 1112 */ 1113 for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { 1114 t_state.cpu = i; 1115 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1116 &t_state); 1117 } 1118 /* 1119 * The function of acpi_processor_set_throttling will be called 1120 * to switch T-state. If the coordination type is SW_ALL or HW_ALL, 1121 * it is necessary to call it for every affected cpu. Otherwise 1122 * it can be called only for the cpu pointed by pr. 1123 */ 1124 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1125 arg.pr = pr; 1126 arg.target_state = state; 1127 arg.force = force; 1128 ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); 1129 } else { 1130 /* 1131 * When the T-state coordination is SW_ALL or HW_ALL, 1132 * it is necessary to set T-state for every affected 1133 * cpus. 1134 */ 1135 for_each_cpu_and(i, cpu_online_mask, 1136 p_throttling->shared_cpu_map) { 1137 match_pr = per_cpu(processors, i); 1138 /* 1139 * If the pointer is invalid, we will report the 1140 * error message and continue. 1141 */ 1142 if (!match_pr) { 1143 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1144 "Invalid Pointer for CPU %d\n", i)); 1145 continue; 1146 } 1147 /* 1148 * If the throttling control is unsupported on CPU i, 1149 * we will report the error message and continue. 1150 */ 1151 if (!match_pr->flags.throttling) { 1152 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1153 "Throttling Control is unsupported " 1154 "on CPU %d\n", i)); 1155 continue; 1156 } 1157 1158 arg.pr = match_pr; 1159 arg.target_state = state; 1160 arg.force = force; 1161 ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, 1162 &arg); 1163 } 1164 } 1165 /* 1166 * After the set_throttling is called, the 1167 * throttling notifier is called for every 1168 * affected cpu to update the T-states. 1169 * The notifier event is THROTTLING_POSTCHANGE 1170 */ 1171 for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { 1172 t_state.cpu = i; 1173 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1174 &t_state); 1175 } 1176 1177 return ret; 1178 } 1179 1180 int acpi_processor_get_throttling_info(struct acpi_processor *pr) 1181 { 1182 int result = 0; 1183 struct acpi_processor_throttling *pthrottling; 1184 1185 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1186 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", 1187 pr->throttling.address, 1188 pr->throttling.duty_offset, 1189 pr->throttling.duty_width)); 1190 1191 /* 1192 * Evaluate _PTC, _TSS and _TPC 1193 * They must all be present or none of them can be used. 1194 */ 1195 if (acpi_processor_get_throttling_control(pr) || 1196 acpi_processor_get_throttling_states(pr) || 1197 acpi_processor_get_platform_limit(pr)) 1198 { 1199 pr->throttling.acpi_processor_get_throttling = 1200 &acpi_processor_get_throttling_fadt; 1201 pr->throttling.acpi_processor_set_throttling = 1202 &acpi_processor_set_throttling_fadt; 1203 if (acpi_processor_get_fadt_info(pr)) 1204 return 0; 1205 } else { 1206 pr->throttling.acpi_processor_get_throttling = 1207 &acpi_processor_get_throttling_ptc; 1208 pr->throttling.acpi_processor_set_throttling = 1209 &acpi_processor_set_throttling_ptc; 1210 } 1211 1212 /* 1213 * If TSD package for one CPU can't be parsed successfully, it means 1214 * that this CPU will have no coordination with other CPUs. 1215 */ 1216 if (acpi_processor_get_tsd(pr)) { 1217 pthrottling = &pr->throttling; 1218 pthrottling->tsd_valid_flag = 0; 1219 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 1220 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1221 } 1222 1223 /* 1224 * PIIX4 Errata: We don't support throttling on the original PIIX4. 1225 * This shouldn't be an issue as few (if any) mobile systems ever 1226 * used this part. 1227 */ 1228 if (errata.piix4.throttle) { 1229 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1230 "Throttling not supported on PIIX4 A- or B-step\n")); 1231 return 0; 1232 } 1233 1234 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 1235 pr->throttling.state_count)); 1236 1237 pr->flags.throttling = 1; 1238 1239 /* 1240 * Disable throttling (if enabled). We'll let subsequent policy (e.g. 1241 * thermal) decide to lower performance if it so chooses, but for now 1242 * we'll crank up the speed. 1243 */ 1244 1245 result = acpi_processor_get_throttling(pr); 1246 if (result) 1247 goto end; 1248 1249 if (pr->throttling.state) { 1250 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1251 "Disabling throttling (was T%d)\n", 1252 pr->throttling.state)); 1253 result = acpi_processor_set_throttling(pr, 0, false); 1254 if (result) 1255 goto end; 1256 } 1257 1258 end: 1259 if (result) 1260 pr->flags.throttling = 0; 1261 1262 return result; 1263 } 1264 1265