1 /* 2 * processor_idle - idle state submodule to the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 10 * - Added support for C3 on SMP 11 * 12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or (at 17 * your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 * 24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 */ 26 #define pr_fmt(fmt) "ACPI: " fmt 27 28 #include <linux/module.h> 29 #include <linux/acpi.h> 30 #include <linux/dmi.h> 31 #include <linux/sched.h> /* need_resched() */ 32 #include <linux/tick.h> 33 #include <linux/cpuidle.h> 34 #include <linux/cpu.h> 35 #include <acpi/processor.h> 36 37 /* 38 * Include the apic definitions for x86 to have the APIC timer related defines 39 * available also for UP (on SMP it gets magically included via linux/smp.h). 40 * asm/acpi.h is not an option, as it would require more include magic. Also 41 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 42 */ 43 #ifdef CONFIG_X86 44 #include <asm/apic.h> 45 #endif 46 47 #define ACPI_PROCESSOR_CLASS "processor" 48 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 49 ACPI_MODULE_NAME("processor_idle"); 50 51 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 52 module_param(max_cstate, uint, 0000); 53 static unsigned int nocst __read_mostly; 54 module_param(nocst, uint, 0000); 55 static int bm_check_disable __read_mostly; 56 module_param(bm_check_disable, uint, 0000); 57 58 static unsigned int latency_factor __read_mostly = 2; 59 module_param(latency_factor, uint, 0644); 60 61 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 62 63 struct cpuidle_driver acpi_idle_driver = { 64 .name = "acpi_idle", 65 .owner = THIS_MODULE, 66 }; 67 68 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 69 static 70 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); 71 72 static int disabled_by_idle_boot_param(void) 73 { 74 return boot_option_idle_override == IDLE_POLL || 75 boot_option_idle_override == IDLE_HALT; 76 } 77 78 /* 79 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 80 * For now disable this. Probably a bug somewhere else. 81 * 82 * To skip this limit, boot/load with a large max_cstate limit. 83 */ 84 static int set_max_cstate(const struct dmi_system_id *id) 85 { 86 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 87 return 0; 88 89 pr_notice("%s detected - limiting to C%ld max_cstate." 90 " Override with \"processor.max_cstate=%d\"\n", id->ident, 91 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 92 93 max_cstate = (long)id->driver_data; 94 95 return 0; 96 } 97 98 static const struct dmi_system_id processor_power_dmi_table[] = { 99 { set_max_cstate, "Clevo 5600D", { 100 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 101 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 102 (void *)2}, 103 { set_max_cstate, "Pavilion zv5000", { 104 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 105 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 106 (void *)1}, 107 { set_max_cstate, "Asus L8400B", { 108 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 109 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 110 (void *)1}, 111 {}, 112 }; 113 114 115 /* 116 * Callers should disable interrupts before the call and enable 117 * interrupts after return. 118 */ 119 static void __cpuidle acpi_safe_halt(void) 120 { 121 if (!tif_need_resched()) { 122 safe_halt(); 123 local_irq_disable(); 124 } 125 } 126 127 #ifdef ARCH_APICTIMER_STOPS_ON_C3 128 129 /* 130 * Some BIOS implementations switch to C3 in the published C2 state. 131 * This seems to be a common problem on AMD boxen, but other vendors 132 * are affected too. We pick the most conservative approach: we assume 133 * that the local APIC stops in both C2 and C3. 134 */ 135 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 136 struct acpi_processor_cx *cx) 137 { 138 struct acpi_processor_power *pwr = &pr->power; 139 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 140 141 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 142 return; 143 144 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) 145 type = ACPI_STATE_C1; 146 147 /* 148 * Check, if one of the previous states already marked the lapic 149 * unstable 150 */ 151 if (pwr->timer_broadcast_on_state < state) 152 return; 153 154 if (cx->type >= type) 155 pr->power.timer_broadcast_on_state = state; 156 } 157 158 static void __lapic_timer_propagate_broadcast(void *arg) 159 { 160 struct acpi_processor *pr = (struct acpi_processor *) arg; 161 162 if (pr->power.timer_broadcast_on_state < INT_MAX) 163 tick_broadcast_enable(); 164 else 165 tick_broadcast_disable(); 166 } 167 168 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 169 { 170 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, 171 (void *)pr, 1); 172 } 173 174 /* Power(C) State timer broadcast control */ 175 static void lapic_timer_state_broadcast(struct acpi_processor *pr, 176 struct acpi_processor_cx *cx, 177 int broadcast) 178 { 179 int state = cx - pr->power.states; 180 181 if (state >= pr->power.timer_broadcast_on_state) { 182 if (broadcast) 183 tick_broadcast_enter(); 184 else 185 tick_broadcast_exit(); 186 } 187 } 188 189 #else 190 191 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 192 struct acpi_processor_cx *cstate) { } 193 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } 194 static void lapic_timer_state_broadcast(struct acpi_processor *pr, 195 struct acpi_processor_cx *cx, 196 int broadcast) 197 { 198 } 199 200 #endif 201 202 #if defined(CONFIG_X86) 203 static void tsc_check_state(int state) 204 { 205 switch (boot_cpu_data.x86_vendor) { 206 case X86_VENDOR_AMD: 207 case X86_VENDOR_INTEL: 208 /* 209 * AMD Fam10h TSC will tick in all 210 * C/P/S0/S1 states when this bit is set. 211 */ 212 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 213 return; 214 215 /*FALL THROUGH*/ 216 default: 217 /* TSC could halt in idle, so notify users */ 218 if (state > ACPI_STATE_C1) 219 mark_tsc_unstable("TSC halts in idle"); 220 } 221 } 222 #else 223 static void tsc_check_state(int state) { return; } 224 #endif 225 226 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 227 { 228 229 if (!pr->pblk) 230 return -ENODEV; 231 232 /* if info is obtained from pblk/fadt, type equals state */ 233 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 234 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 235 236 #ifndef CONFIG_HOTPLUG_CPU 237 /* 238 * Check for P_LVL2_UP flag before entering C2 and above on 239 * an SMP system. 240 */ 241 if ((num_online_cpus() > 1) && 242 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 243 return -ENODEV; 244 #endif 245 246 /* determine C2 and C3 address from pblk */ 247 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 248 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 249 250 /* determine latencies from FADT */ 251 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; 252 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; 253 254 /* 255 * FADT specified C2 latency must be less than or equal to 256 * 100 microseconds. 257 */ 258 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 259 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 260 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency)); 261 /* invalidate C2 */ 262 pr->power.states[ACPI_STATE_C2].address = 0; 263 } 264 265 /* 266 * FADT supplied C3 latency must be less than or equal to 267 * 1000 microseconds. 268 */ 269 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 270 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 271 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency)); 272 /* invalidate C3 */ 273 pr->power.states[ACPI_STATE_C3].address = 0; 274 } 275 276 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 277 "lvl2[0x%08x] lvl3[0x%08x]\n", 278 pr->power.states[ACPI_STATE_C2].address, 279 pr->power.states[ACPI_STATE_C3].address)); 280 281 return 0; 282 } 283 284 static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 285 { 286 if (!pr->power.states[ACPI_STATE_C1].valid) { 287 /* set the first C-State to C1 */ 288 /* all processors need to support C1 */ 289 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 290 pr->power.states[ACPI_STATE_C1].valid = 1; 291 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 292 } 293 /* the C0 state only exists as a filler in our array */ 294 pr->power.states[ACPI_STATE_C0].valid = 1; 295 return 0; 296 } 297 298 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 299 { 300 acpi_status status; 301 u64 count; 302 int current_count; 303 int i, ret = 0; 304 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 305 union acpi_object *cst; 306 307 if (nocst) 308 return -ENODEV; 309 310 current_count = 0; 311 312 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 313 if (ACPI_FAILURE(status)) { 314 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 315 return -ENODEV; 316 } 317 318 cst = buffer.pointer; 319 320 /* There must be at least 2 elements */ 321 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 322 pr_err("not enough elements in _CST\n"); 323 ret = -EFAULT; 324 goto end; 325 } 326 327 count = cst->package.elements[0].integer.value; 328 329 /* Validate number of power states. */ 330 if (count < 1 || count != cst->package.count - 1) { 331 pr_err("count given by _CST is not valid\n"); 332 ret = -EFAULT; 333 goto end; 334 } 335 336 /* Tell driver that at least _CST is supported. */ 337 pr->flags.has_cst = 1; 338 339 for (i = 1; i <= count; i++) { 340 union acpi_object *element; 341 union acpi_object *obj; 342 struct acpi_power_register *reg; 343 struct acpi_processor_cx cx; 344 345 memset(&cx, 0, sizeof(cx)); 346 347 element = &(cst->package.elements[i]); 348 if (element->type != ACPI_TYPE_PACKAGE) 349 continue; 350 351 if (element->package.count != 4) 352 continue; 353 354 obj = &(element->package.elements[0]); 355 356 if (obj->type != ACPI_TYPE_BUFFER) 357 continue; 358 359 reg = (struct acpi_power_register *)obj->buffer.pointer; 360 361 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 362 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 363 continue; 364 365 /* There should be an easy way to extract an integer... */ 366 obj = &(element->package.elements[1]); 367 if (obj->type != ACPI_TYPE_INTEGER) 368 continue; 369 370 cx.type = obj->integer.value; 371 /* 372 * Some buggy BIOSes won't list C1 in _CST - 373 * Let acpi_processor_get_power_info_default() handle them later 374 */ 375 if (i == 1 && cx.type != ACPI_STATE_C1) 376 current_count++; 377 378 cx.address = reg->address; 379 cx.index = current_count + 1; 380 381 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 382 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 383 if (acpi_processor_ffh_cstate_probe 384 (pr->id, &cx, reg) == 0) { 385 cx.entry_method = ACPI_CSTATE_FFH; 386 } else if (cx.type == ACPI_STATE_C1) { 387 /* 388 * C1 is a special case where FIXED_HARDWARE 389 * can be handled in non-MWAIT way as well. 390 * In that case, save this _CST entry info. 391 * Otherwise, ignore this info and continue. 392 */ 393 cx.entry_method = ACPI_CSTATE_HALT; 394 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 395 } else { 396 continue; 397 } 398 if (cx.type == ACPI_STATE_C1 && 399 (boot_option_idle_override == IDLE_NOMWAIT)) { 400 /* 401 * In most cases the C1 space_id obtained from 402 * _CST object is FIXED_HARDWARE access mode. 403 * But when the option of idle=halt is added, 404 * the entry_method type should be changed from 405 * CSTATE_FFH to CSTATE_HALT. 406 * When the option of idle=nomwait is added, 407 * the C1 entry_method type should be 408 * CSTATE_HALT. 409 */ 410 cx.entry_method = ACPI_CSTATE_HALT; 411 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 412 } 413 } else { 414 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 415 cx.address); 416 } 417 418 if (cx.type == ACPI_STATE_C1) { 419 cx.valid = 1; 420 } 421 422 obj = &(element->package.elements[2]); 423 if (obj->type != ACPI_TYPE_INTEGER) 424 continue; 425 426 cx.latency = obj->integer.value; 427 428 obj = &(element->package.elements[3]); 429 if (obj->type != ACPI_TYPE_INTEGER) 430 continue; 431 432 current_count++; 433 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 434 435 /* 436 * We support total ACPI_PROCESSOR_MAX_POWER - 1 437 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 438 */ 439 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 440 pr_warn("Limiting number of power states to max (%d)\n", 441 ACPI_PROCESSOR_MAX_POWER); 442 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 443 break; 444 } 445 } 446 447 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 448 current_count)); 449 450 /* Validate number of power states discovered */ 451 if (current_count < 2) 452 ret = -EFAULT; 453 454 end: 455 kfree(buffer.pointer); 456 457 return ret; 458 } 459 460 static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 461 struct acpi_processor_cx *cx) 462 { 463 static int bm_check_flag = -1; 464 static int bm_control_flag = -1; 465 466 467 if (!cx->address) 468 return; 469 470 /* 471 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 472 * DMA transfers are used by any ISA device to avoid livelock. 473 * Note that we could disable Type-F DMA (as recommended by 474 * the erratum), but this is known to disrupt certain ISA 475 * devices thus we take the conservative approach. 476 */ 477 else if (errata.piix4.fdma) { 478 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 479 "C3 not supported on PIIX4 with Type-F DMA\n")); 480 return; 481 } 482 483 /* All the logic here assumes flags.bm_check is same across all CPUs */ 484 if (bm_check_flag == -1) { 485 /* Determine whether bm_check is needed based on CPU */ 486 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 487 bm_check_flag = pr->flags.bm_check; 488 bm_control_flag = pr->flags.bm_control; 489 } else { 490 pr->flags.bm_check = bm_check_flag; 491 pr->flags.bm_control = bm_control_flag; 492 } 493 494 if (pr->flags.bm_check) { 495 if (!pr->flags.bm_control) { 496 if (pr->flags.has_cst != 1) { 497 /* bus mastering control is necessary */ 498 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 499 "C3 support requires BM control\n")); 500 return; 501 } else { 502 /* Here we enter C3 without bus mastering */ 503 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 504 "C3 support without BM control\n")); 505 } 506 } 507 } else { 508 /* 509 * WBINVD should be set in fadt, for C3 state to be 510 * supported on when bm_check is not required. 511 */ 512 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 513 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 514 "Cache invalidation should work properly" 515 " for C3 to be enabled on SMP systems\n")); 516 return; 517 } 518 } 519 520 /* 521 * Otherwise we've met all of our C3 requirements. 522 * Normalize the C3 latency to expidite policy. Enable 523 * checking of bus mastering status (bm_check) so we can 524 * use this in our C3 policy 525 */ 526 cx->valid = 1; 527 528 /* 529 * On older chipsets, BM_RLD needs to be set 530 * in order for Bus Master activity to wake the 531 * system from C3. Newer chipsets handle DMA 532 * during C3 automatically and BM_RLD is a NOP. 533 * In either case, the proper way to 534 * handle BM_RLD is to set it and leave it set. 535 */ 536 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 537 538 return; 539 } 540 541 static int acpi_processor_power_verify(struct acpi_processor *pr) 542 { 543 unsigned int i; 544 unsigned int working = 0; 545 546 pr->power.timer_broadcast_on_state = INT_MAX; 547 548 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 549 struct acpi_processor_cx *cx = &pr->power.states[i]; 550 551 switch (cx->type) { 552 case ACPI_STATE_C1: 553 cx->valid = 1; 554 break; 555 556 case ACPI_STATE_C2: 557 if (!cx->address) 558 break; 559 cx->valid = 1; 560 break; 561 562 case ACPI_STATE_C3: 563 acpi_processor_power_verify_c3(pr, cx); 564 break; 565 } 566 if (!cx->valid) 567 continue; 568 569 lapic_timer_check_state(i, pr, cx); 570 tsc_check_state(cx->type); 571 working++; 572 } 573 574 lapic_timer_propagate_broadcast(pr); 575 576 return (working); 577 } 578 579 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 580 { 581 unsigned int i; 582 int result; 583 584 585 /* NOTE: the idle thread may not be running while calling 586 * this function */ 587 588 /* Zero initialize all the C-states info. */ 589 memset(pr->power.states, 0, sizeof(pr->power.states)); 590 591 result = acpi_processor_get_power_info_cst(pr); 592 if (result == -ENODEV) 593 result = acpi_processor_get_power_info_fadt(pr); 594 595 if (result) 596 return result; 597 598 acpi_processor_get_power_info_default(pr); 599 600 pr->power.count = acpi_processor_power_verify(pr); 601 602 /* 603 * if one state of type C2 or C3 is available, mark this 604 * CPU as being "idle manageable" 605 */ 606 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 607 if (pr->power.states[i].valid) { 608 pr->power.count = i; 609 if (pr->power.states[i].type >= ACPI_STATE_C2) 610 pr->flags.power = 1; 611 } 612 } 613 614 return 0; 615 } 616 617 /** 618 * acpi_idle_bm_check - checks if bus master activity was detected 619 */ 620 static int acpi_idle_bm_check(void) 621 { 622 u32 bm_status = 0; 623 624 if (bm_check_disable) 625 return 0; 626 627 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 628 if (bm_status) 629 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 630 /* 631 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 632 * the true state of bus mastering activity; forcing us to 633 * manually check the BMIDEA bit of each IDE channel. 634 */ 635 else if (errata.piix4.bmisx) { 636 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 637 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 638 bm_status = 1; 639 } 640 return bm_status; 641 } 642 643 /** 644 * acpi_idle_do_entry - enter idle state using the appropriate method 645 * @cx: cstate data 646 * 647 * Caller disables interrupt before call and enables interrupt after return. 648 */ 649 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) 650 { 651 if (cx->entry_method == ACPI_CSTATE_FFH) { 652 /* Call into architectural FFH based C-state */ 653 acpi_processor_ffh_cstate_enter(cx); 654 } else if (cx->entry_method == ACPI_CSTATE_HALT) { 655 acpi_safe_halt(); 656 } else { 657 /* IO port based C-state */ 658 inb(cx->address); 659 /* Dummy wait op - must do something useless after P_LVL2 read 660 because chipsets cannot guarantee that STPCLK# signal 661 gets asserted in time to freeze execution properly. */ 662 inl(acpi_gbl_FADT.xpm_timer_block.address); 663 } 664 } 665 666 /** 667 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 668 * @dev: the target CPU 669 * @index: the index of suggested state 670 */ 671 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 672 { 673 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 674 675 ACPI_FLUSH_CPU_CACHE(); 676 677 while (1) { 678 679 if (cx->entry_method == ACPI_CSTATE_HALT) 680 safe_halt(); 681 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 682 inb(cx->address); 683 /* See comment in acpi_idle_do_entry() */ 684 inl(acpi_gbl_FADT.xpm_timer_block.address); 685 } else 686 return -ENODEV; 687 } 688 689 /* Never reached */ 690 return 0; 691 } 692 693 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) 694 { 695 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && 696 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); 697 } 698 699 static int c3_cpu_count; 700 static DEFINE_RAW_SPINLOCK(c3_lock); 701 702 /** 703 * acpi_idle_enter_bm - enters C3 with proper BM handling 704 * @pr: Target processor 705 * @cx: Target state context 706 * @timer_bc: Whether or not to change timer mode to broadcast 707 */ 708 static void acpi_idle_enter_bm(struct acpi_processor *pr, 709 struct acpi_processor_cx *cx, bool timer_bc) 710 { 711 acpi_unlazy_tlb(smp_processor_id()); 712 713 /* 714 * Must be done before busmaster disable as we might need to 715 * access HPET ! 716 */ 717 if (timer_bc) 718 lapic_timer_state_broadcast(pr, cx, 1); 719 720 /* 721 * disable bus master 722 * bm_check implies we need ARB_DIS 723 * bm_control implies whether we can do ARB_DIS 724 * 725 * That leaves a case where bm_check is set and bm_control is 726 * not set. In that case we cannot do much, we enter C3 727 * without doing anything. 728 */ 729 if (pr->flags.bm_control) { 730 raw_spin_lock(&c3_lock); 731 c3_cpu_count++; 732 /* Disable bus master arbitration when all CPUs are in C3 */ 733 if (c3_cpu_count == num_online_cpus()) 734 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 735 raw_spin_unlock(&c3_lock); 736 } 737 738 acpi_idle_do_entry(cx); 739 740 /* Re-enable bus master arbitration */ 741 if (pr->flags.bm_control) { 742 raw_spin_lock(&c3_lock); 743 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 744 c3_cpu_count--; 745 raw_spin_unlock(&c3_lock); 746 } 747 748 if (timer_bc) 749 lapic_timer_state_broadcast(pr, cx, 0); 750 } 751 752 static int acpi_idle_enter(struct cpuidle_device *dev, 753 struct cpuidle_driver *drv, int index) 754 { 755 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 756 struct acpi_processor *pr; 757 758 pr = __this_cpu_read(processors); 759 if (unlikely(!pr)) 760 return -EINVAL; 761 762 if (cx->type != ACPI_STATE_C1) { 763 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { 764 index = CPUIDLE_DRIVER_STATE_START; 765 cx = per_cpu(acpi_cstate[index], dev->cpu); 766 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { 767 if (cx->bm_sts_skip || !acpi_idle_bm_check()) { 768 acpi_idle_enter_bm(pr, cx, true); 769 return index; 770 } else if (drv->safe_state_index >= 0) { 771 index = drv->safe_state_index; 772 cx = per_cpu(acpi_cstate[index], dev->cpu); 773 } else { 774 acpi_safe_halt(); 775 return -EBUSY; 776 } 777 } 778 } 779 780 lapic_timer_state_broadcast(pr, cx, 1); 781 782 if (cx->type == ACPI_STATE_C3) 783 ACPI_FLUSH_CPU_CACHE(); 784 785 acpi_idle_do_entry(cx); 786 787 lapic_timer_state_broadcast(pr, cx, 0); 788 789 return index; 790 } 791 792 static void acpi_idle_enter_freeze(struct cpuidle_device *dev, 793 struct cpuidle_driver *drv, int index) 794 { 795 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 796 797 if (cx->type == ACPI_STATE_C3) { 798 struct acpi_processor *pr = __this_cpu_read(processors); 799 800 if (unlikely(!pr)) 801 return; 802 803 if (pr->flags.bm_check) { 804 acpi_idle_enter_bm(pr, cx, false); 805 return; 806 } else { 807 ACPI_FLUSH_CPU_CACHE(); 808 } 809 } 810 acpi_idle_do_entry(cx); 811 } 812 813 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 814 struct cpuidle_device *dev) 815 { 816 int i, count = CPUIDLE_DRIVER_STATE_START; 817 struct acpi_processor_cx *cx; 818 819 if (max_cstate == 0) 820 max_cstate = 1; 821 822 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 823 cx = &pr->power.states[i]; 824 825 if (!cx->valid) 826 continue; 827 828 per_cpu(acpi_cstate[count], dev->cpu) = cx; 829 830 count++; 831 if (count == CPUIDLE_STATE_MAX) 832 break; 833 } 834 835 if (!count) 836 return -EINVAL; 837 838 return 0; 839 } 840 841 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 842 { 843 int i, count = CPUIDLE_DRIVER_STATE_START; 844 struct acpi_processor_cx *cx; 845 struct cpuidle_state *state; 846 struct cpuidle_driver *drv = &acpi_idle_driver; 847 848 if (max_cstate == 0) 849 max_cstate = 1; 850 851 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 852 cx = &pr->power.states[i]; 853 854 if (!cx->valid) 855 continue; 856 857 state = &drv->states[count]; 858 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 859 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 860 state->exit_latency = cx->latency; 861 state->target_residency = cx->latency * latency_factor; 862 state->enter = acpi_idle_enter; 863 864 state->flags = 0; 865 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { 866 state->enter_dead = acpi_idle_play_dead; 867 drv->safe_state_index = count; 868 } 869 /* 870 * Halt-induced C1 is not good for ->enter_freeze, because it 871 * re-enables interrupts on exit. Moreover, C1 is generally not 872 * particularly interesting from the suspend-to-idle angle, so 873 * avoid C1 and the situations in which we may need to fall back 874 * to it altogether. 875 */ 876 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) 877 state->enter_freeze = acpi_idle_enter_freeze; 878 879 count++; 880 if (count == CPUIDLE_STATE_MAX) 881 break; 882 } 883 884 drv->state_count = count; 885 886 if (!count) 887 return -EINVAL; 888 889 return 0; 890 } 891 892 static inline void acpi_processor_cstate_first_run_checks(void) 893 { 894 acpi_status status; 895 static int first_run; 896 897 if (first_run) 898 return; 899 dmi_check_system(processor_power_dmi_table); 900 max_cstate = acpi_processor_cstate_check(max_cstate); 901 if (max_cstate < ACPI_C_STATES_MAX) 902 pr_notice("ACPI: processor limited to max C-state %d\n", 903 max_cstate); 904 first_run++; 905 906 if (acpi_gbl_FADT.cst_control && !nocst) { 907 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 908 acpi_gbl_FADT.cst_control, 8); 909 if (ACPI_FAILURE(status)) 910 ACPI_EXCEPTION((AE_INFO, status, 911 "Notifying BIOS of _CST ability failed")); 912 } 913 } 914 #else 915 916 static inline int disabled_by_idle_boot_param(void) { return 0; } 917 static inline void acpi_processor_cstate_first_run_checks(void) { } 918 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 919 { 920 return -ENODEV; 921 } 922 923 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 924 struct cpuidle_device *dev) 925 { 926 return -EINVAL; 927 } 928 929 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 930 { 931 return -EINVAL; 932 } 933 934 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 935 936 struct acpi_lpi_states_array { 937 unsigned int size; 938 unsigned int composite_states_size; 939 struct acpi_lpi_state *entries; 940 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER]; 941 }; 942 943 static int obj_get_integer(union acpi_object *obj, u32 *value) 944 { 945 if (obj->type != ACPI_TYPE_INTEGER) 946 return -EINVAL; 947 948 *value = obj->integer.value; 949 return 0; 950 } 951 952 static int acpi_processor_evaluate_lpi(acpi_handle handle, 953 struct acpi_lpi_states_array *info) 954 { 955 acpi_status status; 956 int ret = 0; 957 int pkg_count, state_idx = 1, loop; 958 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 959 union acpi_object *lpi_data; 960 struct acpi_lpi_state *lpi_state; 961 962 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer); 963 if (ACPI_FAILURE(status)) { 964 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n")); 965 return -ENODEV; 966 } 967 968 lpi_data = buffer.pointer; 969 970 /* There must be at least 4 elements = 3 elements + 1 package */ 971 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE || 972 lpi_data->package.count < 4) { 973 pr_debug("not enough elements in _LPI\n"); 974 ret = -ENODATA; 975 goto end; 976 } 977 978 pkg_count = lpi_data->package.elements[2].integer.value; 979 980 /* Validate number of power states. */ 981 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) { 982 pr_debug("count given by _LPI is not valid\n"); 983 ret = -ENODATA; 984 goto end; 985 } 986 987 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL); 988 if (!lpi_state) { 989 ret = -ENOMEM; 990 goto end; 991 } 992 993 info->size = pkg_count; 994 info->entries = lpi_state; 995 996 /* LPI States start at index 3 */ 997 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) { 998 union acpi_object *element, *pkg_elem, *obj; 999 1000 element = &lpi_data->package.elements[loop]; 1001 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7) 1002 continue; 1003 1004 pkg_elem = element->package.elements; 1005 1006 obj = pkg_elem + 6; 1007 if (obj->type == ACPI_TYPE_BUFFER) { 1008 struct acpi_power_register *reg; 1009 1010 reg = (struct acpi_power_register *)obj->buffer.pointer; 1011 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 1012 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) 1013 continue; 1014 1015 lpi_state->address = reg->address; 1016 lpi_state->entry_method = 1017 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ? 1018 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO; 1019 } else if (obj->type == ACPI_TYPE_INTEGER) { 1020 lpi_state->entry_method = ACPI_CSTATE_INTEGER; 1021 lpi_state->address = obj->integer.value; 1022 } else { 1023 continue; 1024 } 1025 1026 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/ 1027 1028 obj = pkg_elem + 9; 1029 if (obj->type == ACPI_TYPE_STRING) 1030 strlcpy(lpi_state->desc, obj->string.pointer, 1031 ACPI_CX_DESC_LEN); 1032 1033 lpi_state->index = state_idx; 1034 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) { 1035 pr_debug("No min. residency found, assuming 10 us\n"); 1036 lpi_state->min_residency = 10; 1037 } 1038 1039 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) { 1040 pr_debug("No wakeup residency found, assuming 10 us\n"); 1041 lpi_state->wake_latency = 10; 1042 } 1043 1044 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags)) 1045 lpi_state->flags = 0; 1046 1047 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags)) 1048 lpi_state->arch_flags = 0; 1049 1050 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq)) 1051 lpi_state->res_cnt_freq = 1; 1052 1053 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state)) 1054 lpi_state->enable_parent_state = 0; 1055 } 1056 1057 acpi_handle_debug(handle, "Found %d power states\n", state_idx); 1058 end: 1059 kfree(buffer.pointer); 1060 return ret; 1061 } 1062 1063 /* 1064 * flat_state_cnt - the number of composite LPI states after the process of flattening 1065 */ 1066 static int flat_state_cnt; 1067 1068 /** 1069 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state 1070 * 1071 * @local: local LPI state 1072 * @parent: parent LPI state 1073 * @result: composite LPI state 1074 */ 1075 static bool combine_lpi_states(struct acpi_lpi_state *local, 1076 struct acpi_lpi_state *parent, 1077 struct acpi_lpi_state *result) 1078 { 1079 if (parent->entry_method == ACPI_CSTATE_INTEGER) { 1080 if (!parent->address) /* 0 means autopromotable */ 1081 return false; 1082 result->address = local->address + parent->address; 1083 } else { 1084 result->address = parent->address; 1085 } 1086 1087 result->min_residency = max(local->min_residency, parent->min_residency); 1088 result->wake_latency = local->wake_latency + parent->wake_latency; 1089 result->enable_parent_state = parent->enable_parent_state; 1090 result->entry_method = local->entry_method; 1091 1092 result->flags = parent->flags; 1093 result->arch_flags = parent->arch_flags; 1094 result->index = parent->index; 1095 1096 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN); 1097 strlcat(result->desc, "+", ACPI_CX_DESC_LEN); 1098 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); 1099 return true; 1100 } 1101 1102 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0) 1103 1104 static void stash_composite_state(struct acpi_lpi_states_array *curr_level, 1105 struct acpi_lpi_state *t) 1106 { 1107 curr_level->composite_states[curr_level->composite_states_size++] = t; 1108 } 1109 1110 static int flatten_lpi_states(struct acpi_processor *pr, 1111 struct acpi_lpi_states_array *curr_level, 1112 struct acpi_lpi_states_array *prev_level) 1113 { 1114 int i, j, state_count = curr_level->size; 1115 struct acpi_lpi_state *p, *t = curr_level->entries; 1116 1117 curr_level->composite_states_size = 0; 1118 for (j = 0; j < state_count; j++, t++) { 1119 struct acpi_lpi_state *flpi; 1120 1121 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED)) 1122 continue; 1123 1124 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) { 1125 pr_warn("Limiting number of LPI states to max (%d)\n", 1126 ACPI_PROCESSOR_MAX_POWER); 1127 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 1128 break; 1129 } 1130 1131 flpi = &pr->power.lpi_states[flat_state_cnt]; 1132 1133 if (!prev_level) { /* leaf/processor node */ 1134 memcpy(flpi, t, sizeof(*t)); 1135 stash_composite_state(curr_level, flpi); 1136 flat_state_cnt++; 1137 continue; 1138 } 1139 1140 for (i = 0; i < prev_level->composite_states_size; i++) { 1141 p = prev_level->composite_states[i]; 1142 if (t->index <= p->enable_parent_state && 1143 combine_lpi_states(p, t, flpi)) { 1144 stash_composite_state(curr_level, flpi); 1145 flat_state_cnt++; 1146 flpi++; 1147 } 1148 } 1149 } 1150 1151 kfree(curr_level->entries); 1152 return 0; 1153 } 1154 1155 static int acpi_processor_get_lpi_info(struct acpi_processor *pr) 1156 { 1157 int ret, i; 1158 acpi_status status; 1159 acpi_handle handle = pr->handle, pr_ahandle; 1160 struct acpi_device *d = NULL; 1161 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; 1162 1163 if (!osc_pc_lpi_support_confirmed) 1164 return -EOPNOTSUPP; 1165 1166 if (!acpi_has_method(handle, "_LPI")) 1167 return -EINVAL; 1168 1169 flat_state_cnt = 0; 1170 prev = &info[0]; 1171 curr = &info[1]; 1172 handle = pr->handle; 1173 ret = acpi_processor_evaluate_lpi(handle, prev); 1174 if (ret) 1175 return ret; 1176 flatten_lpi_states(pr, prev, NULL); 1177 1178 status = acpi_get_parent(handle, &pr_ahandle); 1179 while (ACPI_SUCCESS(status)) { 1180 acpi_bus_get_device(pr_ahandle, &d); 1181 handle = pr_ahandle; 1182 1183 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID)) 1184 break; 1185 1186 /* can be optional ? */ 1187 if (!acpi_has_method(handle, "_LPI")) 1188 break; 1189 1190 ret = acpi_processor_evaluate_lpi(handle, curr); 1191 if (ret) 1192 break; 1193 1194 /* flatten all the LPI states in this level of hierarchy */ 1195 flatten_lpi_states(pr, curr, prev); 1196 1197 tmp = prev, prev = curr, curr = tmp; 1198 1199 status = acpi_get_parent(handle, &pr_ahandle); 1200 } 1201 1202 pr->power.count = flat_state_cnt; 1203 /* reset the index after flattening */ 1204 for (i = 0; i < pr->power.count; i++) 1205 pr->power.lpi_states[i].index = i; 1206 1207 /* Tell driver that _LPI is supported. */ 1208 pr->flags.has_lpi = 1; 1209 pr->flags.power = 1; 1210 1211 return 0; 1212 } 1213 1214 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) 1215 { 1216 return -ENODEV; 1217 } 1218 1219 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) 1220 { 1221 return -ENODEV; 1222 } 1223 1224 /** 1225 * acpi_idle_lpi_enter - enters an ACPI any LPI state 1226 * @dev: the target CPU 1227 * @drv: cpuidle driver containing cpuidle state info 1228 * @index: index of target state 1229 * 1230 * Return: 0 for success or negative value for error 1231 */ 1232 static int acpi_idle_lpi_enter(struct cpuidle_device *dev, 1233 struct cpuidle_driver *drv, int index) 1234 { 1235 struct acpi_processor *pr; 1236 struct acpi_lpi_state *lpi; 1237 1238 pr = __this_cpu_read(processors); 1239 1240 if (unlikely(!pr)) 1241 return -EINVAL; 1242 1243 lpi = &pr->power.lpi_states[index]; 1244 if (lpi->entry_method == ACPI_CSTATE_FFH) 1245 return acpi_processor_ffh_lpi_enter(lpi); 1246 1247 return -EINVAL; 1248 } 1249 1250 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) 1251 { 1252 int i; 1253 struct acpi_lpi_state *lpi; 1254 struct cpuidle_state *state; 1255 struct cpuidle_driver *drv = &acpi_idle_driver; 1256 1257 if (!pr->flags.has_lpi) 1258 return -EOPNOTSUPP; 1259 1260 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) { 1261 lpi = &pr->power.lpi_states[i]; 1262 1263 state = &drv->states[i]; 1264 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); 1265 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); 1266 state->exit_latency = lpi->wake_latency; 1267 state->target_residency = lpi->min_residency; 1268 if (lpi->arch_flags) 1269 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 1270 state->enter = acpi_idle_lpi_enter; 1271 drv->safe_state_index = i; 1272 } 1273 1274 drv->state_count = i; 1275 1276 return 0; 1277 } 1278 1279 /** 1280 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle 1281 * global state data i.e. idle routines 1282 * 1283 * @pr: the ACPI processor 1284 */ 1285 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) 1286 { 1287 int i; 1288 struct cpuidle_driver *drv = &acpi_idle_driver; 1289 1290 if (!pr->flags.power_setup_done || !pr->flags.power) 1291 return -EINVAL; 1292 1293 drv->safe_state_index = -1; 1294 for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { 1295 drv->states[i].name[0] = '\0'; 1296 drv->states[i].desc[0] = '\0'; 1297 } 1298 1299 if (pr->flags.has_lpi) 1300 return acpi_processor_setup_lpi_states(pr); 1301 1302 return acpi_processor_setup_cstates(pr); 1303 } 1304 1305 /** 1306 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE 1307 * device i.e. per-cpu data 1308 * 1309 * @pr: the ACPI processor 1310 * @dev : the cpuidle device 1311 */ 1312 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, 1313 struct cpuidle_device *dev) 1314 { 1315 if (!pr->flags.power_setup_done || !pr->flags.power || !dev) 1316 return -EINVAL; 1317 1318 dev->cpu = pr->id; 1319 if (pr->flags.has_lpi) 1320 return acpi_processor_ffh_lpi_probe(pr->id); 1321 1322 return acpi_processor_setup_cpuidle_cx(pr, dev); 1323 } 1324 1325 static int acpi_processor_get_power_info(struct acpi_processor *pr) 1326 { 1327 int ret; 1328 1329 ret = acpi_processor_get_lpi_info(pr); 1330 if (ret) 1331 ret = acpi_processor_get_cstate_info(pr); 1332 1333 return ret; 1334 } 1335 1336 int acpi_processor_hotplug(struct acpi_processor *pr) 1337 { 1338 int ret = 0; 1339 struct cpuidle_device *dev; 1340 1341 if (disabled_by_idle_boot_param()) 1342 return 0; 1343 1344 if (!pr->flags.power_setup_done) 1345 return -ENODEV; 1346 1347 dev = per_cpu(acpi_cpuidle_device, pr->id); 1348 cpuidle_pause_and_lock(); 1349 cpuidle_disable_device(dev); 1350 ret = acpi_processor_get_power_info(pr); 1351 if (!ret && pr->flags.power) { 1352 acpi_processor_setup_cpuidle_dev(pr, dev); 1353 ret = cpuidle_enable_device(dev); 1354 } 1355 cpuidle_resume_and_unlock(); 1356 1357 return ret; 1358 } 1359 1360 int acpi_processor_power_state_has_changed(struct acpi_processor *pr) 1361 { 1362 int cpu; 1363 struct acpi_processor *_pr; 1364 struct cpuidle_device *dev; 1365 1366 if (disabled_by_idle_boot_param()) 1367 return 0; 1368 1369 if (!pr->flags.power_setup_done) 1370 return -ENODEV; 1371 1372 /* 1373 * FIXME: Design the ACPI notification to make it once per 1374 * system instead of once per-cpu. This condition is a hack 1375 * to make the code that updates C-States be called once. 1376 */ 1377 1378 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { 1379 1380 /* Protect against cpu-hotplug */ 1381 get_online_cpus(); 1382 cpuidle_pause_and_lock(); 1383 1384 /* Disable all cpuidle devices */ 1385 for_each_online_cpu(cpu) { 1386 _pr = per_cpu(processors, cpu); 1387 if (!_pr || !_pr->flags.power_setup_done) 1388 continue; 1389 dev = per_cpu(acpi_cpuidle_device, cpu); 1390 cpuidle_disable_device(dev); 1391 } 1392 1393 /* Populate Updated C-state information */ 1394 acpi_processor_get_power_info(pr); 1395 acpi_processor_setup_cpuidle_states(pr); 1396 1397 /* Enable all cpuidle devices */ 1398 for_each_online_cpu(cpu) { 1399 _pr = per_cpu(processors, cpu); 1400 if (!_pr || !_pr->flags.power_setup_done) 1401 continue; 1402 acpi_processor_get_power_info(_pr); 1403 if (_pr->flags.power) { 1404 dev = per_cpu(acpi_cpuidle_device, cpu); 1405 acpi_processor_setup_cpuidle_dev(_pr, dev); 1406 cpuidle_enable_device(dev); 1407 } 1408 } 1409 cpuidle_resume_and_unlock(); 1410 put_online_cpus(); 1411 } 1412 1413 return 0; 1414 } 1415 1416 static int acpi_processor_registered; 1417 1418 int acpi_processor_power_init(struct acpi_processor *pr) 1419 { 1420 int retval; 1421 struct cpuidle_device *dev; 1422 1423 if (disabled_by_idle_boot_param()) 1424 return 0; 1425 1426 acpi_processor_cstate_first_run_checks(); 1427 1428 if (!acpi_processor_get_power_info(pr)) 1429 pr->flags.power_setup_done = 1; 1430 1431 /* 1432 * Install the idle handler if processor power management is supported. 1433 * Note that we use previously set idle handler will be used on 1434 * platforms that only support C1. 1435 */ 1436 if (pr->flags.power) { 1437 /* Register acpi_idle_driver if not already registered */ 1438 if (!acpi_processor_registered) { 1439 acpi_processor_setup_cpuidle_states(pr); 1440 retval = cpuidle_register_driver(&acpi_idle_driver); 1441 if (retval) 1442 return retval; 1443 pr_debug("%s registered with cpuidle\n", 1444 acpi_idle_driver.name); 1445 } 1446 1447 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1448 if (!dev) 1449 return -ENOMEM; 1450 per_cpu(acpi_cpuidle_device, pr->id) = dev; 1451 1452 acpi_processor_setup_cpuidle_dev(pr, dev); 1453 1454 /* Register per-cpu cpuidle_device. Cpuidle driver 1455 * must already be registered before registering device 1456 */ 1457 retval = cpuidle_register_device(dev); 1458 if (retval) { 1459 if (acpi_processor_registered == 0) 1460 cpuidle_unregister_driver(&acpi_idle_driver); 1461 return retval; 1462 } 1463 acpi_processor_registered++; 1464 } 1465 return 0; 1466 } 1467 1468 int acpi_processor_power_exit(struct acpi_processor *pr) 1469 { 1470 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 1471 1472 if (disabled_by_idle_boot_param()) 1473 return 0; 1474 1475 if (pr->flags.power) { 1476 cpuidle_unregister_device(dev); 1477 acpi_processor_registered--; 1478 if (acpi_processor_registered == 0) 1479 cpuidle_unregister_driver(&acpi_idle_driver); 1480 } 1481 1482 pr->flags.power_setup_done = 0; 1483 return 0; 1484 } 1485