1 /* 2 * processor_idle - idle state submodule to the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 10 * - Added support for C3 on SMP 11 * 12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or (at 17 * your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program; if not, write to the Free Software Foundation, Inc., 26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 27 * 28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/init.h> 34 #include <linux/cpufreq.h> 35 #include <linux/proc_fs.h> 36 #include <linux/seq_file.h> 37 #include <linux/acpi.h> 38 #include <linux/dmi.h> 39 #include <linux/moduleparam.h> 40 #include <linux/sched.h> /* need_resched() */ 41 #include <linux/pm_qos_params.h> 42 #include <linux/clockchips.h> 43 #include <linux/cpuidle.h> 44 #include <linux/irqflags.h> 45 46 /* 47 * Include the apic definitions for x86 to have the APIC timer related defines 48 * available also for UP (on SMP it gets magically included via linux/smp.h). 49 * asm/acpi.h is not an option, as it would require more include magic. Also 50 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 51 */ 52 #ifdef CONFIG_X86 53 #include <asm/apic.h> 54 #endif 55 56 #include <asm/io.h> 57 #include <asm/uaccess.h> 58 59 #include <acpi/acpi_bus.h> 60 #include <acpi/processor.h> 61 #include <asm/processor.h> 62 63 #define ACPI_PROCESSOR_CLASS "processor" 64 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 65 ACPI_MODULE_NAME("processor_idle"); 66 #define ACPI_PROCESSOR_FILE_POWER "power" 67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 68 #define C2_OVERHEAD 1 /* 1us */ 69 #define C3_OVERHEAD 1 /* 1us */ 70 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 71 72 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 73 module_param(max_cstate, uint, 0000); 74 static unsigned int nocst __read_mostly; 75 module_param(nocst, uint, 0000); 76 77 static unsigned int latency_factor __read_mostly = 2; 78 module_param(latency_factor, uint, 0644); 79 80 static s64 us_to_pm_timer_ticks(s64 t) 81 { 82 return div64_u64(t * PM_TIMER_FREQUENCY, 1000000); 83 } 84 /* 85 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 86 * For now disable this. Probably a bug somewhere else. 87 * 88 * To skip this limit, boot/load with a large max_cstate limit. 89 */ 90 static int set_max_cstate(const struct dmi_system_id *id) 91 { 92 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 93 return 0; 94 95 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 96 " Override with \"processor.max_cstate=%d\"\n", id->ident, 97 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 98 99 max_cstate = (long)id->driver_data; 100 101 return 0; 102 } 103 104 /* Actually this shouldn't be __cpuinitdata, would be better to fix the 105 callers to only run once -AK */ 106 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 107 { set_max_cstate, "Clevo 5600D", { 108 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 109 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 110 (void *)2}, 111 {}, 112 }; 113 114 115 /* 116 * Callers should disable interrupts before the call and enable 117 * interrupts after return. 118 */ 119 static void acpi_safe_halt(void) 120 { 121 current_thread_info()->status &= ~TS_POLLING; 122 /* 123 * TS_POLLING-cleared state must be visible before we 124 * test NEED_RESCHED: 125 */ 126 smp_mb(); 127 if (!need_resched()) { 128 safe_halt(); 129 local_irq_disable(); 130 } 131 current_thread_info()->status |= TS_POLLING; 132 } 133 134 #ifdef ARCH_APICTIMER_STOPS_ON_C3 135 136 /* 137 * Some BIOS implementations switch to C3 in the published C2 state. 138 * This seems to be a common problem on AMD boxen, but other vendors 139 * are affected too. We pick the most conservative approach: we assume 140 * that the local APIC stops in both C2 and C3. 141 */ 142 static void acpi_timer_check_state(int state, struct acpi_processor *pr, 143 struct acpi_processor_cx *cx) 144 { 145 struct acpi_processor_power *pwr = &pr->power; 146 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 147 148 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 149 return; 150 151 /* 152 * Check, if one of the previous states already marked the lapic 153 * unstable 154 */ 155 if (pwr->timer_broadcast_on_state < state) 156 return; 157 158 if (cx->type >= type) 159 pr->power.timer_broadcast_on_state = state; 160 } 161 162 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) 163 { 164 unsigned long reason; 165 166 reason = pr->power.timer_broadcast_on_state < INT_MAX ? 167 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 168 169 clockevents_notify(reason, &pr->id); 170 } 171 172 /* Power(C) State timer broadcast control */ 173 static void acpi_state_timer_broadcast(struct acpi_processor *pr, 174 struct acpi_processor_cx *cx, 175 int broadcast) 176 { 177 int state = cx - pr->power.states; 178 179 if (state >= pr->power.timer_broadcast_on_state) { 180 unsigned long reason; 181 182 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 183 CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 184 clockevents_notify(reason, &pr->id); 185 } 186 } 187 188 #else 189 190 static void acpi_timer_check_state(int state, struct acpi_processor *pr, 191 struct acpi_processor_cx *cstate) { } 192 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } 193 static void acpi_state_timer_broadcast(struct acpi_processor *pr, 194 struct acpi_processor_cx *cx, 195 int broadcast) 196 { 197 } 198 199 #endif 200 201 /* 202 * Suspend / resume control 203 */ 204 static int acpi_idle_suspend; 205 206 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 207 { 208 acpi_idle_suspend = 1; 209 return 0; 210 } 211 212 int acpi_processor_resume(struct acpi_device * device) 213 { 214 acpi_idle_suspend = 0; 215 return 0; 216 } 217 218 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 219 static int tsc_halts_in_c(int state) 220 { 221 switch (boot_cpu_data.x86_vendor) { 222 case X86_VENDOR_AMD: 223 case X86_VENDOR_INTEL: 224 /* 225 * AMD Fam10h TSC will tick in all 226 * C/P/S0/S1 states when this bit is set. 227 */ 228 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 229 return 0; 230 231 /*FALL THROUGH*/ 232 default: 233 return state > ACPI_STATE_C1; 234 } 235 } 236 #endif 237 238 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 239 { 240 241 if (!pr) 242 return -EINVAL; 243 244 if (!pr->pblk) 245 return -ENODEV; 246 247 /* if info is obtained from pblk/fadt, type equals state */ 248 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 249 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 250 251 #ifndef CONFIG_HOTPLUG_CPU 252 /* 253 * Check for P_LVL2_UP flag before entering C2 and above on 254 * an SMP system. 255 */ 256 if ((num_online_cpus() > 1) && 257 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 258 return -ENODEV; 259 #endif 260 261 /* determine C2 and C3 address from pblk */ 262 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 263 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 264 265 /* determine latencies from FADT */ 266 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 267 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 268 269 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 270 "lvl2[0x%08x] lvl3[0x%08x]\n", 271 pr->power.states[ACPI_STATE_C2].address, 272 pr->power.states[ACPI_STATE_C3].address)); 273 274 return 0; 275 } 276 277 static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 278 { 279 if (!pr->power.states[ACPI_STATE_C1].valid) { 280 /* set the first C-State to C1 */ 281 /* all processors need to support C1 */ 282 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 283 pr->power.states[ACPI_STATE_C1].valid = 1; 284 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 285 } 286 /* the C0 state only exists as a filler in our array */ 287 pr->power.states[ACPI_STATE_C0].valid = 1; 288 return 0; 289 } 290 291 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 292 { 293 acpi_status status = 0; 294 acpi_integer count; 295 int current_count; 296 int i; 297 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 298 union acpi_object *cst; 299 300 301 if (nocst) 302 return -ENODEV; 303 304 current_count = 0; 305 306 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 307 if (ACPI_FAILURE(status)) { 308 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 309 return -ENODEV; 310 } 311 312 cst = buffer.pointer; 313 314 /* There must be at least 2 elements */ 315 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 316 printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 317 status = -EFAULT; 318 goto end; 319 } 320 321 count = cst->package.elements[0].integer.value; 322 323 /* Validate number of power states. */ 324 if (count < 1 || count != cst->package.count - 1) { 325 printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 326 status = -EFAULT; 327 goto end; 328 } 329 330 /* Tell driver that at least _CST is supported. */ 331 pr->flags.has_cst = 1; 332 333 for (i = 1; i <= count; i++) { 334 union acpi_object *element; 335 union acpi_object *obj; 336 struct acpi_power_register *reg; 337 struct acpi_processor_cx cx; 338 339 memset(&cx, 0, sizeof(cx)); 340 341 element = &(cst->package.elements[i]); 342 if (element->type != ACPI_TYPE_PACKAGE) 343 continue; 344 345 if (element->package.count != 4) 346 continue; 347 348 obj = &(element->package.elements[0]); 349 350 if (obj->type != ACPI_TYPE_BUFFER) 351 continue; 352 353 reg = (struct acpi_power_register *)obj->buffer.pointer; 354 355 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 356 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 357 continue; 358 359 /* There should be an easy way to extract an integer... */ 360 obj = &(element->package.elements[1]); 361 if (obj->type != ACPI_TYPE_INTEGER) 362 continue; 363 364 cx.type = obj->integer.value; 365 /* 366 * Some buggy BIOSes won't list C1 in _CST - 367 * Let acpi_processor_get_power_info_default() handle them later 368 */ 369 if (i == 1 && cx.type != ACPI_STATE_C1) 370 current_count++; 371 372 cx.address = reg->address; 373 cx.index = current_count + 1; 374 375 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 376 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 377 if (acpi_processor_ffh_cstate_probe 378 (pr->id, &cx, reg) == 0) { 379 cx.entry_method = ACPI_CSTATE_FFH; 380 } else if (cx.type == ACPI_STATE_C1) { 381 /* 382 * C1 is a special case where FIXED_HARDWARE 383 * can be handled in non-MWAIT way as well. 384 * In that case, save this _CST entry info. 385 * Otherwise, ignore this info and continue. 386 */ 387 cx.entry_method = ACPI_CSTATE_HALT; 388 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 389 } else { 390 continue; 391 } 392 if (cx.type == ACPI_STATE_C1 && 393 (idle_halt || idle_nomwait)) { 394 /* 395 * In most cases the C1 space_id obtained from 396 * _CST object is FIXED_HARDWARE access mode. 397 * But when the option of idle=halt is added, 398 * the entry_method type should be changed from 399 * CSTATE_FFH to CSTATE_HALT. 400 * When the option of idle=nomwait is added, 401 * the C1 entry_method type should be 402 * CSTATE_HALT. 403 */ 404 cx.entry_method = ACPI_CSTATE_HALT; 405 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 406 } 407 } else { 408 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 409 cx.address); 410 } 411 412 if (cx.type == ACPI_STATE_C1) { 413 cx.valid = 1; 414 } 415 416 obj = &(element->package.elements[2]); 417 if (obj->type != ACPI_TYPE_INTEGER) 418 continue; 419 420 cx.latency = obj->integer.value; 421 422 obj = &(element->package.elements[3]); 423 if (obj->type != ACPI_TYPE_INTEGER) 424 continue; 425 426 cx.power = obj->integer.value; 427 428 current_count++; 429 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 430 431 /* 432 * We support total ACPI_PROCESSOR_MAX_POWER - 1 433 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 434 */ 435 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 436 printk(KERN_WARNING 437 "Limiting number of power states to max (%d)\n", 438 ACPI_PROCESSOR_MAX_POWER); 439 printk(KERN_WARNING 440 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 441 break; 442 } 443 } 444 445 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 446 current_count)); 447 448 /* Validate number of power states discovered */ 449 if (current_count < 2) 450 status = -EFAULT; 451 452 end: 453 kfree(buffer.pointer); 454 455 return status; 456 } 457 458 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 459 { 460 461 if (!cx->address) 462 return; 463 464 /* 465 * C2 latency must be less than or equal to 100 466 * microseconds. 467 */ 468 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 469 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 470 "latency too large [%d]\n", cx->latency)); 471 return; 472 } 473 474 /* 475 * Otherwise we've met all of our C2 requirements. 476 * Normalize the C2 latency to expidite policy 477 */ 478 cx->valid = 1; 479 480 cx->latency_ticks = cx->latency; 481 482 return; 483 } 484 485 static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 486 struct acpi_processor_cx *cx) 487 { 488 static int bm_check_flag; 489 490 491 if (!cx->address) 492 return; 493 494 /* 495 * C3 latency must be less than or equal to 1000 496 * microseconds. 497 */ 498 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 499 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 500 "latency too large [%d]\n", cx->latency)); 501 return; 502 } 503 504 /* 505 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 506 * DMA transfers are used by any ISA device to avoid livelock. 507 * Note that we could disable Type-F DMA (as recommended by 508 * the erratum), but this is known to disrupt certain ISA 509 * devices thus we take the conservative approach. 510 */ 511 else if (errata.piix4.fdma) { 512 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 513 "C3 not supported on PIIX4 with Type-F DMA\n")); 514 return; 515 } 516 517 /* All the logic here assumes flags.bm_check is same across all CPUs */ 518 if (!bm_check_flag) { 519 /* Determine whether bm_check is needed based on CPU */ 520 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 521 bm_check_flag = pr->flags.bm_check; 522 } else { 523 pr->flags.bm_check = bm_check_flag; 524 } 525 526 if (pr->flags.bm_check) { 527 if (!pr->flags.bm_control) { 528 if (pr->flags.has_cst != 1) { 529 /* bus mastering control is necessary */ 530 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 531 "C3 support requires BM control\n")); 532 return; 533 } else { 534 /* Here we enter C3 without bus mastering */ 535 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 536 "C3 support without BM control\n")); 537 } 538 } 539 } else { 540 /* 541 * WBINVD should be set in fadt, for C3 state to be 542 * supported on when bm_check is not required. 543 */ 544 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 545 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 546 "Cache invalidation should work properly" 547 " for C3 to be enabled on SMP systems\n")); 548 return; 549 } 550 } 551 552 /* 553 * Otherwise we've met all of our C3 requirements. 554 * Normalize the C3 latency to expidite policy. Enable 555 * checking of bus mastering status (bm_check) so we can 556 * use this in our C3 policy 557 */ 558 cx->valid = 1; 559 560 cx->latency_ticks = cx->latency; 561 /* 562 * On older chipsets, BM_RLD needs to be set 563 * in order for Bus Master activity to wake the 564 * system from C3. Newer chipsets handle DMA 565 * during C3 automatically and BM_RLD is a NOP. 566 * In either case, the proper way to 567 * handle BM_RLD is to set it and leave it set. 568 */ 569 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 570 571 return; 572 } 573 574 static int acpi_processor_power_verify(struct acpi_processor *pr) 575 { 576 unsigned int i; 577 unsigned int working = 0; 578 579 pr->power.timer_broadcast_on_state = INT_MAX; 580 581 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 582 struct acpi_processor_cx *cx = &pr->power.states[i]; 583 584 switch (cx->type) { 585 case ACPI_STATE_C1: 586 cx->valid = 1; 587 break; 588 589 case ACPI_STATE_C2: 590 acpi_processor_power_verify_c2(cx); 591 if (cx->valid) 592 acpi_timer_check_state(i, pr, cx); 593 break; 594 595 case ACPI_STATE_C3: 596 acpi_processor_power_verify_c3(pr, cx); 597 if (cx->valid) 598 acpi_timer_check_state(i, pr, cx); 599 break; 600 } 601 602 if (cx->valid) 603 working++; 604 } 605 606 acpi_propagate_timer_broadcast(pr); 607 608 return (working); 609 } 610 611 static int acpi_processor_get_power_info(struct acpi_processor *pr) 612 { 613 unsigned int i; 614 int result; 615 616 617 /* NOTE: the idle thread may not be running while calling 618 * this function */ 619 620 /* Zero initialize all the C-states info. */ 621 memset(pr->power.states, 0, sizeof(pr->power.states)); 622 623 result = acpi_processor_get_power_info_cst(pr); 624 if (result == -ENODEV) 625 result = acpi_processor_get_power_info_fadt(pr); 626 627 if (result) 628 return result; 629 630 acpi_processor_get_power_info_default(pr); 631 632 pr->power.count = acpi_processor_power_verify(pr); 633 634 /* 635 * if one state of type C2 or C3 is available, mark this 636 * CPU as being "idle manageable" 637 */ 638 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 639 if (pr->power.states[i].valid) { 640 pr->power.count = i; 641 if (pr->power.states[i].type >= ACPI_STATE_C2) 642 pr->flags.power = 1; 643 } 644 } 645 646 return 0; 647 } 648 649 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) 650 { 651 struct acpi_processor *pr = seq->private; 652 unsigned int i; 653 654 655 if (!pr) 656 goto end; 657 658 seq_printf(seq, "active state: C%zd\n" 659 "max_cstate: C%d\n" 660 "bus master activity: %08x\n" 661 "maximum allowed latency: %d usec\n", 662 pr->power.state ? pr->power.state - pr->power.states : 0, 663 max_cstate, (unsigned)pr->power.bm_activity, 664 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); 665 666 seq_puts(seq, "states:\n"); 667 668 for (i = 1; i <= pr->power.count; i++) { 669 seq_printf(seq, " %cC%d: ", 670 (&pr->power.states[i] == 671 pr->power.state ? '*' : ' '), i); 672 673 if (!pr->power.states[i].valid) { 674 seq_puts(seq, "<not supported>\n"); 675 continue; 676 } 677 678 switch (pr->power.states[i].type) { 679 case ACPI_STATE_C1: 680 seq_printf(seq, "type[C1] "); 681 break; 682 case ACPI_STATE_C2: 683 seq_printf(seq, "type[C2] "); 684 break; 685 case ACPI_STATE_C3: 686 seq_printf(seq, "type[C3] "); 687 break; 688 default: 689 seq_printf(seq, "type[--] "); 690 break; 691 } 692 693 if (pr->power.states[i].promotion.state) 694 seq_printf(seq, "promotion[C%zd] ", 695 (pr->power.states[i].promotion.state - 696 pr->power.states)); 697 else 698 seq_puts(seq, "promotion[--] "); 699 700 if (pr->power.states[i].demotion.state) 701 seq_printf(seq, "demotion[C%zd] ", 702 (pr->power.states[i].demotion.state - 703 pr->power.states)); 704 else 705 seq_puts(seq, "demotion[--] "); 706 707 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 708 pr->power.states[i].latency, 709 pr->power.states[i].usage, 710 (unsigned long long)pr->power.states[i].time); 711 } 712 713 end: 714 return 0; 715 } 716 717 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 718 { 719 return single_open(file, acpi_processor_power_seq_show, 720 PDE(inode)->data); 721 } 722 723 static const struct file_operations acpi_processor_power_fops = { 724 .owner = THIS_MODULE, 725 .open = acpi_processor_power_open_fs, 726 .read = seq_read, 727 .llseek = seq_lseek, 728 .release = single_release, 729 }; 730 731 732 /** 733 * acpi_idle_bm_check - checks if bus master activity was detected 734 */ 735 static int acpi_idle_bm_check(void) 736 { 737 u32 bm_status = 0; 738 739 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 740 if (bm_status) 741 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 742 /* 743 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 744 * the true state of bus mastering activity; forcing us to 745 * manually check the BMIDEA bit of each IDE channel. 746 */ 747 else if (errata.piix4.bmisx) { 748 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 749 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 750 bm_status = 1; 751 } 752 return bm_status; 753 } 754 755 /** 756 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 757 * @cx: cstate data 758 * 759 * Caller disables interrupt before call and enables interrupt after return. 760 */ 761 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 762 { 763 /* Don't trace irqs off for idle */ 764 stop_critical_timings(); 765 if (cx->entry_method == ACPI_CSTATE_FFH) { 766 /* Call into architectural FFH based C-state */ 767 acpi_processor_ffh_cstate_enter(cx); 768 } else if (cx->entry_method == ACPI_CSTATE_HALT) { 769 acpi_safe_halt(); 770 } else { 771 int unused; 772 /* IO port based C-state */ 773 inb(cx->address); 774 /* Dummy wait op - must do something useless after P_LVL2 read 775 because chipsets cannot guarantee that STPCLK# signal 776 gets asserted in time to freeze execution properly. */ 777 unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 778 } 779 start_critical_timings(); 780 } 781 782 /** 783 * acpi_idle_enter_c1 - enters an ACPI C1 state-type 784 * @dev: the target CPU 785 * @state: the state data 786 * 787 * This is equivalent to the HALT instruction. 788 */ 789 static int acpi_idle_enter_c1(struct cpuidle_device *dev, 790 struct cpuidle_state *state) 791 { 792 ktime_t kt1, kt2; 793 s64 idle_time; 794 struct acpi_processor *pr; 795 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 796 797 pr = __get_cpu_var(processors); 798 799 if (unlikely(!pr)) 800 return 0; 801 802 local_irq_disable(); 803 804 /* Do not access any ACPI IO ports in suspend path */ 805 if (acpi_idle_suspend) { 806 acpi_safe_halt(); 807 local_irq_enable(); 808 return 0; 809 } 810 811 kt1 = ktime_get_real(); 812 acpi_idle_do_entry(cx); 813 kt2 = ktime_get_real(); 814 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 815 816 local_irq_enable(); 817 cx->usage++; 818 819 return idle_time; 820 } 821 822 /** 823 * acpi_idle_enter_simple - enters an ACPI state without BM handling 824 * @dev: the target CPU 825 * @state: the state data 826 */ 827 static int acpi_idle_enter_simple(struct cpuidle_device *dev, 828 struct cpuidle_state *state) 829 { 830 struct acpi_processor *pr; 831 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 832 ktime_t kt1, kt2; 833 s64 idle_time; 834 s64 sleep_ticks = 0; 835 836 pr = __get_cpu_var(processors); 837 838 if (unlikely(!pr)) 839 return 0; 840 841 if (acpi_idle_suspend) 842 return(acpi_idle_enter_c1(dev, state)); 843 844 local_irq_disable(); 845 current_thread_info()->status &= ~TS_POLLING; 846 /* 847 * TS_POLLING-cleared state must be visible before we test 848 * NEED_RESCHED: 849 */ 850 smp_mb(); 851 852 if (unlikely(need_resched())) { 853 current_thread_info()->status |= TS_POLLING; 854 local_irq_enable(); 855 return 0; 856 } 857 858 /* 859 * Must be done before busmaster disable as we might need to 860 * access HPET ! 861 */ 862 acpi_state_timer_broadcast(pr, cx, 1); 863 864 if (cx->type == ACPI_STATE_C3) 865 ACPI_FLUSH_CPU_CACHE(); 866 867 kt1 = ktime_get_real(); 868 /* Tell the scheduler that we are going deep-idle: */ 869 sched_clock_idle_sleep_event(); 870 acpi_idle_do_entry(cx); 871 kt2 = ktime_get_real(); 872 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 873 874 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 875 /* TSC could halt in idle, so notify users */ 876 if (tsc_halts_in_c(cx->type)) 877 mark_tsc_unstable("TSC halts in idle");; 878 #endif 879 sleep_ticks = us_to_pm_timer_ticks(idle_time); 880 881 /* Tell the scheduler how much we idled: */ 882 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 883 884 local_irq_enable(); 885 current_thread_info()->status |= TS_POLLING; 886 887 cx->usage++; 888 889 acpi_state_timer_broadcast(pr, cx, 0); 890 cx->time += sleep_ticks; 891 return idle_time; 892 } 893 894 static int c3_cpu_count; 895 static DEFINE_SPINLOCK(c3_lock); 896 897 /** 898 * acpi_idle_enter_bm - enters C3 with proper BM handling 899 * @dev: the target CPU 900 * @state: the state data 901 * 902 * If BM is detected, the deepest non-C3 idle state is entered instead. 903 */ 904 static int acpi_idle_enter_bm(struct cpuidle_device *dev, 905 struct cpuidle_state *state) 906 { 907 struct acpi_processor *pr; 908 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 909 ktime_t kt1, kt2; 910 s64 idle_time; 911 s64 sleep_ticks = 0; 912 913 914 pr = __get_cpu_var(processors); 915 916 if (unlikely(!pr)) 917 return 0; 918 919 if (acpi_idle_suspend) 920 return(acpi_idle_enter_c1(dev, state)); 921 922 if (acpi_idle_bm_check()) { 923 if (dev->safe_state) { 924 dev->last_state = dev->safe_state; 925 return dev->safe_state->enter(dev, dev->safe_state); 926 } else { 927 local_irq_disable(); 928 acpi_safe_halt(); 929 local_irq_enable(); 930 return 0; 931 } 932 } 933 934 local_irq_disable(); 935 current_thread_info()->status &= ~TS_POLLING; 936 /* 937 * TS_POLLING-cleared state must be visible before we test 938 * NEED_RESCHED: 939 */ 940 smp_mb(); 941 942 if (unlikely(need_resched())) { 943 current_thread_info()->status |= TS_POLLING; 944 local_irq_enable(); 945 return 0; 946 } 947 948 acpi_unlazy_tlb(smp_processor_id()); 949 950 /* Tell the scheduler that we are going deep-idle: */ 951 sched_clock_idle_sleep_event(); 952 /* 953 * Must be done before busmaster disable as we might need to 954 * access HPET ! 955 */ 956 acpi_state_timer_broadcast(pr, cx, 1); 957 958 /* 959 * disable bus master 960 * bm_check implies we need ARB_DIS 961 * !bm_check implies we need cache flush 962 * bm_control implies whether we can do ARB_DIS 963 * 964 * That leaves a case where bm_check is set and bm_control is 965 * not set. In that case we cannot do much, we enter C3 966 * without doing anything. 967 */ 968 if (pr->flags.bm_check && pr->flags.bm_control) { 969 spin_lock(&c3_lock); 970 c3_cpu_count++; 971 /* Disable bus master arbitration when all CPUs are in C3 */ 972 if (c3_cpu_count == num_online_cpus()) 973 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 974 spin_unlock(&c3_lock); 975 } else if (!pr->flags.bm_check) { 976 ACPI_FLUSH_CPU_CACHE(); 977 } 978 979 kt1 = ktime_get_real(); 980 acpi_idle_do_entry(cx); 981 kt2 = ktime_get_real(); 982 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 983 984 /* Re-enable bus master arbitration */ 985 if (pr->flags.bm_check && pr->flags.bm_control) { 986 spin_lock(&c3_lock); 987 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 988 c3_cpu_count--; 989 spin_unlock(&c3_lock); 990 } 991 992 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 993 /* TSC could halt in idle, so notify users */ 994 if (tsc_halts_in_c(ACPI_STATE_C3)) 995 mark_tsc_unstable("TSC halts in idle"); 996 #endif 997 sleep_ticks = us_to_pm_timer_ticks(idle_time); 998 /* Tell the scheduler how much we idled: */ 999 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 1000 1001 local_irq_enable(); 1002 current_thread_info()->status |= TS_POLLING; 1003 1004 cx->usage++; 1005 1006 acpi_state_timer_broadcast(pr, cx, 0); 1007 cx->time += sleep_ticks; 1008 return idle_time; 1009 } 1010 1011 struct cpuidle_driver acpi_idle_driver = { 1012 .name = "acpi_idle", 1013 .owner = THIS_MODULE, 1014 }; 1015 1016 /** 1017 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 1018 * @pr: the ACPI processor 1019 */ 1020 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 1021 { 1022 int i, count = CPUIDLE_DRIVER_STATE_START; 1023 struct acpi_processor_cx *cx; 1024 struct cpuidle_state *state; 1025 struct cpuidle_device *dev = &pr->power.dev; 1026 1027 if (!pr->flags.power_setup_done) 1028 return -EINVAL; 1029 1030 if (pr->flags.power == 0) { 1031 return -EINVAL; 1032 } 1033 1034 dev->cpu = pr->id; 1035 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 1036 dev->states[i].name[0] = '\0'; 1037 dev->states[i].desc[0] = '\0'; 1038 } 1039 1040 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 1041 cx = &pr->power.states[i]; 1042 state = &dev->states[count]; 1043 1044 if (!cx->valid) 1045 continue; 1046 1047 #ifdef CONFIG_HOTPLUG_CPU 1048 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 1049 !pr->flags.has_cst && 1050 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 1051 continue; 1052 #endif 1053 cpuidle_set_statedata(state, cx); 1054 1055 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1056 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1057 state->exit_latency = cx->latency; 1058 state->target_residency = cx->latency * latency_factor; 1059 state->power_usage = cx->power; 1060 1061 state->flags = 0; 1062 switch (cx->type) { 1063 case ACPI_STATE_C1: 1064 state->flags |= CPUIDLE_FLAG_SHALLOW; 1065 if (cx->entry_method == ACPI_CSTATE_FFH) 1066 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1067 1068 state->enter = acpi_idle_enter_c1; 1069 dev->safe_state = state; 1070 break; 1071 1072 case ACPI_STATE_C2: 1073 state->flags |= CPUIDLE_FLAG_BALANCED; 1074 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1075 state->enter = acpi_idle_enter_simple; 1076 dev->safe_state = state; 1077 break; 1078 1079 case ACPI_STATE_C3: 1080 state->flags |= CPUIDLE_FLAG_DEEP; 1081 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1082 state->flags |= CPUIDLE_FLAG_CHECK_BM; 1083 state->enter = pr->flags.bm_check ? 1084 acpi_idle_enter_bm : 1085 acpi_idle_enter_simple; 1086 break; 1087 } 1088 1089 count++; 1090 if (count == CPUIDLE_STATE_MAX) 1091 break; 1092 } 1093 1094 dev->state_count = count; 1095 1096 if (!count) 1097 return -EINVAL; 1098 1099 return 0; 1100 } 1101 1102 int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1103 { 1104 int ret = 0; 1105 1106 if (boot_option_idle_override) 1107 return 0; 1108 1109 if (!pr) 1110 return -EINVAL; 1111 1112 if (nocst) { 1113 return -ENODEV; 1114 } 1115 1116 if (!pr->flags.power_setup_done) 1117 return -ENODEV; 1118 1119 cpuidle_pause_and_lock(); 1120 cpuidle_disable_device(&pr->power.dev); 1121 acpi_processor_get_power_info(pr); 1122 if (pr->flags.power) { 1123 acpi_processor_setup_cpuidle(pr); 1124 ret = cpuidle_enable_device(&pr->power.dev); 1125 } 1126 cpuidle_resume_and_unlock(); 1127 1128 return ret; 1129 } 1130 1131 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1132 struct acpi_device *device) 1133 { 1134 acpi_status status = 0; 1135 static int first_run; 1136 struct proc_dir_entry *entry = NULL; 1137 unsigned int i; 1138 1139 if (boot_option_idle_override) 1140 return 0; 1141 1142 if (!first_run) { 1143 if (idle_halt) { 1144 /* 1145 * When the boot option of "idle=halt" is added, halt 1146 * is used for CPU IDLE. 1147 * In such case C2/C3 is meaningless. So the max_cstate 1148 * is set to one. 1149 */ 1150 max_cstate = 1; 1151 } 1152 dmi_check_system(processor_power_dmi_table); 1153 max_cstate = acpi_processor_cstate_check(max_cstate); 1154 if (max_cstate < ACPI_C_STATES_MAX) 1155 printk(KERN_NOTICE 1156 "ACPI: processor limited to max C-state %d\n", 1157 max_cstate); 1158 first_run++; 1159 } 1160 1161 if (!pr) 1162 return -EINVAL; 1163 1164 if (acpi_gbl_FADT.cst_control && !nocst) { 1165 status = 1166 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 1167 if (ACPI_FAILURE(status)) { 1168 ACPI_EXCEPTION((AE_INFO, status, 1169 "Notifying BIOS of _CST ability failed")); 1170 } 1171 } 1172 1173 acpi_processor_get_power_info(pr); 1174 pr->flags.power_setup_done = 1; 1175 1176 /* 1177 * Install the idle handler if processor power management is supported. 1178 * Note that we use previously set idle handler will be used on 1179 * platforms that only support C1. 1180 */ 1181 if (pr->flags.power) { 1182 acpi_processor_setup_cpuidle(pr); 1183 if (cpuidle_register_device(&pr->power.dev)) 1184 return -EIO; 1185 1186 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 1187 for (i = 1; i <= pr->power.count; i++) 1188 if (pr->power.states[i].valid) 1189 printk(" C%d[C%d]", i, 1190 pr->power.states[i].type); 1191 printk(")\n"); 1192 } 1193 1194 /* 'power' [R] */ 1195 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER, 1196 S_IRUGO, acpi_device_dir(device), 1197 &acpi_processor_power_fops, 1198 acpi_driver_data(device)); 1199 if (!entry) 1200 return -EIO; 1201 return 0; 1202 } 1203 1204 int acpi_processor_power_exit(struct acpi_processor *pr, 1205 struct acpi_device *device) 1206 { 1207 if (boot_option_idle_override) 1208 return 0; 1209 1210 cpuidle_unregister_device(&pr->power.dev); 1211 pr->flags.power_setup_done = 0; 1212 1213 if (acpi_device_dir(device)) 1214 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1215 acpi_device_dir(device)); 1216 1217 return 0; 1218 } 1219