1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * processor_idle - idle state submodule to the ACPI processor driver 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * - Added processor hotplug support 10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 11 * - Added support for C3 on SMP 12 */ 13 #define pr_fmt(fmt) "ACPI: " fmt 14 15 #include <linux/module.h> 16 #include <linux/acpi.h> 17 #include <linux/dmi.h> 18 #include <linux/sched.h> /* need_resched() */ 19 #include <linux/tick.h> 20 #include <linux/cpuidle.h> 21 #include <linux/cpu.h> 22 #include <acpi/processor.h> 23 24 /* 25 * Include the apic definitions for x86 to have the APIC timer related defines 26 * available also for UP (on SMP it gets magically included via linux/smp.h). 27 * asm/acpi.h is not an option, as it would require more include magic. Also 28 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 29 */ 30 #ifdef CONFIG_X86 31 #include <asm/apic.h> 32 #include <asm/cpu.h> 33 #endif 34 35 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 36 ACPI_MODULE_NAME("processor_idle"); 37 38 #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) 39 40 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 41 module_param(max_cstate, uint, 0000); 42 static unsigned int nocst __read_mostly; 43 module_param(nocst, uint, 0000); 44 static int bm_check_disable __read_mostly; 45 module_param(bm_check_disable, uint, 0000); 46 47 static unsigned int latency_factor __read_mostly = 2; 48 module_param(latency_factor, uint, 0644); 49 50 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 51 52 struct cpuidle_driver acpi_idle_driver = { 53 .name = "acpi_idle", 54 .owner = THIS_MODULE, 55 }; 56 57 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 58 static 59 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); 60 61 static int disabled_by_idle_boot_param(void) 62 { 63 return boot_option_idle_override == IDLE_POLL || 64 boot_option_idle_override == IDLE_HALT; 65 } 66 67 /* 68 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 69 * For now disable this. Probably a bug somewhere else. 70 * 71 * To skip this limit, boot/load with a large max_cstate limit. 72 */ 73 static int set_max_cstate(const struct dmi_system_id *id) 74 { 75 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 76 return 0; 77 78 pr_notice("%s detected - limiting to C%ld max_cstate." 79 " Override with \"processor.max_cstate=%d\"\n", id->ident, 80 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 81 82 max_cstate = (long)id->driver_data; 83 84 return 0; 85 } 86 87 static const struct dmi_system_id processor_power_dmi_table[] = { 88 { set_max_cstate, "Clevo 5600D", { 89 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 90 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 91 (void *)2}, 92 { set_max_cstate, "Pavilion zv5000", { 93 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 94 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 95 (void *)1}, 96 { set_max_cstate, "Asus L8400B", { 97 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 98 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 99 (void *)1}, 100 {}, 101 }; 102 103 104 /* 105 * Callers should disable interrupts before the call and enable 106 * interrupts after return. 107 */ 108 static void __cpuidle acpi_safe_halt(void) 109 { 110 if (!tif_need_resched()) { 111 safe_halt(); 112 local_irq_disable(); 113 } 114 } 115 116 #ifdef ARCH_APICTIMER_STOPS_ON_C3 117 118 /* 119 * Some BIOS implementations switch to C3 in the published C2 state. 120 * This seems to be a common problem on AMD boxen, but other vendors 121 * are affected too. We pick the most conservative approach: we assume 122 * that the local APIC stops in both C2 and C3. 123 */ 124 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 125 struct acpi_processor_cx *cx) 126 { 127 struct acpi_processor_power *pwr = &pr->power; 128 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 129 130 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 131 return; 132 133 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) 134 type = ACPI_STATE_C1; 135 136 /* 137 * Check, if one of the previous states already marked the lapic 138 * unstable 139 */ 140 if (pwr->timer_broadcast_on_state < state) 141 return; 142 143 if (cx->type >= type) 144 pr->power.timer_broadcast_on_state = state; 145 } 146 147 static void __lapic_timer_propagate_broadcast(void *arg) 148 { 149 struct acpi_processor *pr = (struct acpi_processor *) arg; 150 151 if (pr->power.timer_broadcast_on_state < INT_MAX) 152 tick_broadcast_enable(); 153 else 154 tick_broadcast_disable(); 155 } 156 157 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 158 { 159 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, 160 (void *)pr, 1); 161 } 162 163 /* Power(C) State timer broadcast control */ 164 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, 165 struct acpi_processor_cx *cx) 166 { 167 return cx - pr->power.states >= pr->power.timer_broadcast_on_state; 168 } 169 170 #else 171 172 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 173 struct acpi_processor_cx *cstate) { } 174 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } 175 176 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, 177 struct acpi_processor_cx *cx) 178 { 179 return false; 180 } 181 182 #endif 183 184 #if defined(CONFIG_X86) 185 static void tsc_check_state(int state) 186 { 187 switch (boot_cpu_data.x86_vendor) { 188 case X86_VENDOR_HYGON: 189 case X86_VENDOR_AMD: 190 case X86_VENDOR_INTEL: 191 case X86_VENDOR_CENTAUR: 192 case X86_VENDOR_ZHAOXIN: 193 /* 194 * AMD Fam10h TSC will tick in all 195 * C/P/S0/S1 states when this bit is set. 196 */ 197 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 198 return; 199 fallthrough; 200 default: 201 /* TSC could halt in idle, so notify users */ 202 if (state > ACPI_STATE_C1) 203 mark_tsc_unstable("TSC halts in idle"); 204 } 205 } 206 #else 207 static void tsc_check_state(int state) { return; } 208 #endif 209 210 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 211 { 212 213 if (!pr->pblk) 214 return -ENODEV; 215 216 /* if info is obtained from pblk/fadt, type equals state */ 217 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 218 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 219 220 #ifndef CONFIG_HOTPLUG_CPU 221 /* 222 * Check for P_LVL2_UP flag before entering C2 and above on 223 * an SMP system. 224 */ 225 if ((num_online_cpus() > 1) && 226 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 227 return -ENODEV; 228 #endif 229 230 /* determine C2 and C3 address from pblk */ 231 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 232 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 233 234 /* determine latencies from FADT */ 235 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; 236 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; 237 238 /* 239 * FADT specified C2 latency must be less than or equal to 240 * 100 microseconds. 241 */ 242 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 243 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 244 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency)); 245 /* invalidate C2 */ 246 pr->power.states[ACPI_STATE_C2].address = 0; 247 } 248 249 /* 250 * FADT supplied C3 latency must be less than or equal to 251 * 1000 microseconds. 252 */ 253 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 254 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 255 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency)); 256 /* invalidate C3 */ 257 pr->power.states[ACPI_STATE_C3].address = 0; 258 } 259 260 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 261 "lvl2[0x%08x] lvl3[0x%08x]\n", 262 pr->power.states[ACPI_STATE_C2].address, 263 pr->power.states[ACPI_STATE_C3].address)); 264 265 snprintf(pr->power.states[ACPI_STATE_C2].desc, 266 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x", 267 pr->power.states[ACPI_STATE_C2].address); 268 snprintf(pr->power.states[ACPI_STATE_C3].desc, 269 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", 270 pr->power.states[ACPI_STATE_C3].address); 271 272 return 0; 273 } 274 275 static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 276 { 277 if (!pr->power.states[ACPI_STATE_C1].valid) { 278 /* set the first C-State to C1 */ 279 /* all processors need to support C1 */ 280 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 281 pr->power.states[ACPI_STATE_C1].valid = 1; 282 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 283 284 snprintf(pr->power.states[ACPI_STATE_C1].desc, 285 ACPI_CX_DESC_LEN, "ACPI HLT"); 286 } 287 /* the C0 state only exists as a filler in our array */ 288 pr->power.states[ACPI_STATE_C0].valid = 1; 289 return 0; 290 } 291 292 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 293 { 294 int ret; 295 296 if (nocst) 297 return -ENODEV; 298 299 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power); 300 if (ret) 301 return ret; 302 303 if (!pr->power.count) 304 return -EFAULT; 305 306 pr->flags.has_cst = 1; 307 return 0; 308 } 309 310 static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 311 struct acpi_processor_cx *cx) 312 { 313 static int bm_check_flag = -1; 314 static int bm_control_flag = -1; 315 316 317 if (!cx->address) 318 return; 319 320 /* 321 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 322 * DMA transfers are used by any ISA device to avoid livelock. 323 * Note that we could disable Type-F DMA (as recommended by 324 * the erratum), but this is known to disrupt certain ISA 325 * devices thus we take the conservative approach. 326 */ 327 else if (errata.piix4.fdma) { 328 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 329 "C3 not supported on PIIX4 with Type-F DMA\n")); 330 return; 331 } 332 333 /* All the logic here assumes flags.bm_check is same across all CPUs */ 334 if (bm_check_flag == -1) { 335 /* Determine whether bm_check is needed based on CPU */ 336 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 337 bm_check_flag = pr->flags.bm_check; 338 bm_control_flag = pr->flags.bm_control; 339 } else { 340 pr->flags.bm_check = bm_check_flag; 341 pr->flags.bm_control = bm_control_flag; 342 } 343 344 if (pr->flags.bm_check) { 345 if (!pr->flags.bm_control) { 346 if (pr->flags.has_cst != 1) { 347 /* bus mastering control is necessary */ 348 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 349 "C3 support requires BM control\n")); 350 return; 351 } else { 352 /* Here we enter C3 without bus mastering */ 353 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 354 "C3 support without BM control\n")); 355 } 356 } 357 } else { 358 /* 359 * WBINVD should be set in fadt, for C3 state to be 360 * supported on when bm_check is not required. 361 */ 362 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 363 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 364 "Cache invalidation should work properly" 365 " for C3 to be enabled on SMP systems\n")); 366 return; 367 } 368 } 369 370 /* 371 * Otherwise we've met all of our C3 requirements. 372 * Normalize the C3 latency to expidite policy. Enable 373 * checking of bus mastering status (bm_check) so we can 374 * use this in our C3 policy 375 */ 376 cx->valid = 1; 377 378 /* 379 * On older chipsets, BM_RLD needs to be set 380 * in order for Bus Master activity to wake the 381 * system from C3. Newer chipsets handle DMA 382 * during C3 automatically and BM_RLD is a NOP. 383 * In either case, the proper way to 384 * handle BM_RLD is to set it and leave it set. 385 */ 386 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 387 388 return; 389 } 390 391 static int acpi_processor_power_verify(struct acpi_processor *pr) 392 { 393 unsigned int i; 394 unsigned int working = 0; 395 396 pr->power.timer_broadcast_on_state = INT_MAX; 397 398 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 399 struct acpi_processor_cx *cx = &pr->power.states[i]; 400 401 switch (cx->type) { 402 case ACPI_STATE_C1: 403 cx->valid = 1; 404 break; 405 406 case ACPI_STATE_C2: 407 if (!cx->address) 408 break; 409 cx->valid = 1; 410 break; 411 412 case ACPI_STATE_C3: 413 acpi_processor_power_verify_c3(pr, cx); 414 break; 415 } 416 if (!cx->valid) 417 continue; 418 419 lapic_timer_check_state(i, pr, cx); 420 tsc_check_state(cx->type); 421 working++; 422 } 423 424 lapic_timer_propagate_broadcast(pr); 425 426 return (working); 427 } 428 429 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 430 { 431 unsigned int i; 432 int result; 433 434 435 /* NOTE: the idle thread may not be running while calling 436 * this function */ 437 438 /* Zero initialize all the C-states info. */ 439 memset(pr->power.states, 0, sizeof(pr->power.states)); 440 441 result = acpi_processor_get_power_info_cst(pr); 442 if (result == -ENODEV) 443 result = acpi_processor_get_power_info_fadt(pr); 444 445 if (result) 446 return result; 447 448 acpi_processor_get_power_info_default(pr); 449 450 pr->power.count = acpi_processor_power_verify(pr); 451 452 /* 453 * if one state of type C2 or C3 is available, mark this 454 * CPU as being "idle manageable" 455 */ 456 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 457 if (pr->power.states[i].valid) { 458 pr->power.count = i; 459 pr->flags.power = 1; 460 } 461 } 462 463 return 0; 464 } 465 466 /** 467 * acpi_idle_bm_check - checks if bus master activity was detected 468 */ 469 static int acpi_idle_bm_check(void) 470 { 471 u32 bm_status = 0; 472 473 if (bm_check_disable) 474 return 0; 475 476 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 477 if (bm_status) 478 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 479 /* 480 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 481 * the true state of bus mastering activity; forcing us to 482 * manually check the BMIDEA bit of each IDE channel. 483 */ 484 else if (errata.piix4.bmisx) { 485 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 486 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 487 bm_status = 1; 488 } 489 return bm_status; 490 } 491 492 static void wait_for_freeze(void) 493 { 494 #ifdef CONFIG_X86 495 /* No delay is needed if we are in guest */ 496 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 497 return; 498 #endif 499 /* Dummy wait op - must do something useless after P_LVL2 read 500 because chipsets cannot guarantee that STPCLK# signal 501 gets asserted in time to freeze execution properly. */ 502 inl(acpi_gbl_FADT.xpm_timer_block.address); 503 } 504 505 /** 506 * acpi_idle_do_entry - enter idle state using the appropriate method 507 * @cx: cstate data 508 * 509 * Caller disables interrupt before call and enables interrupt after return. 510 */ 511 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) 512 { 513 if (cx->entry_method == ACPI_CSTATE_FFH) { 514 /* Call into architectural FFH based C-state */ 515 acpi_processor_ffh_cstate_enter(cx); 516 } else if (cx->entry_method == ACPI_CSTATE_HALT) { 517 acpi_safe_halt(); 518 } else { 519 /* IO port based C-state */ 520 inb(cx->address); 521 wait_for_freeze(); 522 } 523 } 524 525 /** 526 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 527 * @dev: the target CPU 528 * @index: the index of suggested state 529 */ 530 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 531 { 532 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 533 534 ACPI_FLUSH_CPU_CACHE(); 535 536 while (1) { 537 538 if (cx->entry_method == ACPI_CSTATE_HALT) 539 safe_halt(); 540 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 541 inb(cx->address); 542 wait_for_freeze(); 543 } else 544 return -ENODEV; 545 546 #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU) 547 /* If NMI wants to wake up CPU0, start CPU0. */ 548 if (wakeup_cpu0()) 549 start_cpu0(); 550 #endif 551 } 552 553 /* Never reached */ 554 return 0; 555 } 556 557 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) 558 { 559 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && 560 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); 561 } 562 563 static int c3_cpu_count; 564 static DEFINE_RAW_SPINLOCK(c3_lock); 565 566 /** 567 * acpi_idle_enter_bm - enters C3 with proper BM handling 568 * @drv: cpuidle driver 569 * @pr: Target processor 570 * @cx: Target state context 571 * @index: index of target state 572 */ 573 static int acpi_idle_enter_bm(struct cpuidle_driver *drv, 574 struct acpi_processor *pr, 575 struct acpi_processor_cx *cx, 576 int index) 577 { 578 static struct acpi_processor_cx safe_cx = { 579 .entry_method = ACPI_CSTATE_HALT, 580 }; 581 582 /* 583 * disable bus master 584 * bm_check implies we need ARB_DIS 585 * bm_control implies whether we can do ARB_DIS 586 * 587 * That leaves a case where bm_check is set and bm_control is not set. 588 * In that case we cannot do much, we enter C3 without doing anything. 589 */ 590 bool dis_bm = pr->flags.bm_control; 591 592 /* If we can skip BM, demote to a safe state. */ 593 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 594 dis_bm = false; 595 index = drv->safe_state_index; 596 if (index >= 0) { 597 cx = this_cpu_read(acpi_cstate[index]); 598 } else { 599 cx = &safe_cx; 600 index = -EBUSY; 601 } 602 } 603 604 if (dis_bm) { 605 raw_spin_lock(&c3_lock); 606 c3_cpu_count++; 607 /* Disable bus master arbitration when all CPUs are in C3 */ 608 if (c3_cpu_count == num_online_cpus()) 609 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 610 raw_spin_unlock(&c3_lock); 611 } 612 613 rcu_idle_enter(); 614 615 acpi_idle_do_entry(cx); 616 617 rcu_idle_exit(); 618 619 /* Re-enable bus master arbitration */ 620 if (dis_bm) { 621 raw_spin_lock(&c3_lock); 622 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 623 c3_cpu_count--; 624 raw_spin_unlock(&c3_lock); 625 } 626 627 return index; 628 } 629 630 static int acpi_idle_enter(struct cpuidle_device *dev, 631 struct cpuidle_driver *drv, int index) 632 { 633 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 634 struct acpi_processor *pr; 635 636 pr = __this_cpu_read(processors); 637 if (unlikely(!pr)) 638 return -EINVAL; 639 640 if (cx->type != ACPI_STATE_C1) { 641 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) 642 return acpi_idle_enter_bm(drv, pr, cx, index); 643 644 /* C2 to C1 demotion. */ 645 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { 646 index = ACPI_IDLE_STATE_START; 647 cx = per_cpu(acpi_cstate[index], dev->cpu); 648 } 649 } 650 651 if (cx->type == ACPI_STATE_C3) 652 ACPI_FLUSH_CPU_CACHE(); 653 654 acpi_idle_do_entry(cx); 655 656 return index; 657 } 658 659 static int acpi_idle_enter_s2idle(struct cpuidle_device *dev, 660 struct cpuidle_driver *drv, int index) 661 { 662 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 663 664 if (cx->type == ACPI_STATE_C3) { 665 struct acpi_processor *pr = __this_cpu_read(processors); 666 667 if (unlikely(!pr)) 668 return 0; 669 670 if (pr->flags.bm_check) { 671 u8 bm_sts_skip = cx->bm_sts_skip; 672 673 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */ 674 cx->bm_sts_skip = 1; 675 acpi_idle_enter_bm(drv, pr, cx, index); 676 cx->bm_sts_skip = bm_sts_skip; 677 678 return 0; 679 } else { 680 ACPI_FLUSH_CPU_CACHE(); 681 } 682 } 683 acpi_idle_do_entry(cx); 684 685 return 0; 686 } 687 688 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 689 struct cpuidle_device *dev) 690 { 691 int i, count = ACPI_IDLE_STATE_START; 692 struct acpi_processor_cx *cx; 693 struct cpuidle_state *state; 694 695 if (max_cstate == 0) 696 max_cstate = 1; 697 698 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 699 state = &acpi_idle_driver.states[count]; 700 cx = &pr->power.states[i]; 701 702 if (!cx->valid) 703 continue; 704 705 per_cpu(acpi_cstate[count], dev->cpu) = cx; 706 707 if (lapic_timer_needs_broadcast(pr, cx)) 708 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 709 710 if (cx->type == ACPI_STATE_C3) { 711 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; 712 if (pr->flags.bm_check) 713 state->flags |= CPUIDLE_FLAG_RCU_IDLE; 714 } 715 716 count++; 717 if (count == CPUIDLE_STATE_MAX) 718 break; 719 } 720 721 if (!count) 722 return -EINVAL; 723 724 return 0; 725 } 726 727 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 728 { 729 int i, count; 730 struct acpi_processor_cx *cx; 731 struct cpuidle_state *state; 732 struct cpuidle_driver *drv = &acpi_idle_driver; 733 734 if (max_cstate == 0) 735 max_cstate = 1; 736 737 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) { 738 cpuidle_poll_state_init(drv); 739 count = 1; 740 } else { 741 count = 0; 742 } 743 744 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 745 cx = &pr->power.states[i]; 746 747 if (!cx->valid) 748 continue; 749 750 state = &drv->states[count]; 751 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 752 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 753 state->exit_latency = cx->latency; 754 state->target_residency = cx->latency * latency_factor; 755 state->enter = acpi_idle_enter; 756 757 state->flags = 0; 758 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { 759 state->enter_dead = acpi_idle_play_dead; 760 drv->safe_state_index = count; 761 } 762 /* 763 * Halt-induced C1 is not good for ->enter_s2idle, because it 764 * re-enables interrupts on exit. Moreover, C1 is generally not 765 * particularly interesting from the suspend-to-idle angle, so 766 * avoid C1 and the situations in which we may need to fall back 767 * to it altogether. 768 */ 769 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) 770 state->enter_s2idle = acpi_idle_enter_s2idle; 771 772 count++; 773 if (count == CPUIDLE_STATE_MAX) 774 break; 775 } 776 777 drv->state_count = count; 778 779 if (!count) 780 return -EINVAL; 781 782 return 0; 783 } 784 785 static inline void acpi_processor_cstate_first_run_checks(void) 786 { 787 static int first_run; 788 789 if (first_run) 790 return; 791 dmi_check_system(processor_power_dmi_table); 792 max_cstate = acpi_processor_cstate_check(max_cstate); 793 if (max_cstate < ACPI_C_STATES_MAX) 794 pr_notice("ACPI: processor limited to max C-state %d\n", 795 max_cstate); 796 first_run++; 797 798 if (nocst) 799 return; 800 801 acpi_processor_claim_cst_control(); 802 } 803 #else 804 805 static inline int disabled_by_idle_boot_param(void) { return 0; } 806 static inline void acpi_processor_cstate_first_run_checks(void) { } 807 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 808 { 809 return -ENODEV; 810 } 811 812 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 813 struct cpuidle_device *dev) 814 { 815 return -EINVAL; 816 } 817 818 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 819 { 820 return -EINVAL; 821 } 822 823 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 824 825 struct acpi_lpi_states_array { 826 unsigned int size; 827 unsigned int composite_states_size; 828 struct acpi_lpi_state *entries; 829 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER]; 830 }; 831 832 static int obj_get_integer(union acpi_object *obj, u32 *value) 833 { 834 if (obj->type != ACPI_TYPE_INTEGER) 835 return -EINVAL; 836 837 *value = obj->integer.value; 838 return 0; 839 } 840 841 static int acpi_processor_evaluate_lpi(acpi_handle handle, 842 struct acpi_lpi_states_array *info) 843 { 844 acpi_status status; 845 int ret = 0; 846 int pkg_count, state_idx = 1, loop; 847 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 848 union acpi_object *lpi_data; 849 struct acpi_lpi_state *lpi_state; 850 851 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer); 852 if (ACPI_FAILURE(status)) { 853 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n")); 854 return -ENODEV; 855 } 856 857 lpi_data = buffer.pointer; 858 859 /* There must be at least 4 elements = 3 elements + 1 package */ 860 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE || 861 lpi_data->package.count < 4) { 862 pr_debug("not enough elements in _LPI\n"); 863 ret = -ENODATA; 864 goto end; 865 } 866 867 pkg_count = lpi_data->package.elements[2].integer.value; 868 869 /* Validate number of power states. */ 870 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) { 871 pr_debug("count given by _LPI is not valid\n"); 872 ret = -ENODATA; 873 goto end; 874 } 875 876 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL); 877 if (!lpi_state) { 878 ret = -ENOMEM; 879 goto end; 880 } 881 882 info->size = pkg_count; 883 info->entries = lpi_state; 884 885 /* LPI States start at index 3 */ 886 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) { 887 union acpi_object *element, *pkg_elem, *obj; 888 889 element = &lpi_data->package.elements[loop]; 890 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7) 891 continue; 892 893 pkg_elem = element->package.elements; 894 895 obj = pkg_elem + 6; 896 if (obj->type == ACPI_TYPE_BUFFER) { 897 struct acpi_power_register *reg; 898 899 reg = (struct acpi_power_register *)obj->buffer.pointer; 900 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 901 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) 902 continue; 903 904 lpi_state->address = reg->address; 905 lpi_state->entry_method = 906 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ? 907 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO; 908 } else if (obj->type == ACPI_TYPE_INTEGER) { 909 lpi_state->entry_method = ACPI_CSTATE_INTEGER; 910 lpi_state->address = obj->integer.value; 911 } else { 912 continue; 913 } 914 915 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/ 916 917 obj = pkg_elem + 9; 918 if (obj->type == ACPI_TYPE_STRING) 919 strlcpy(lpi_state->desc, obj->string.pointer, 920 ACPI_CX_DESC_LEN); 921 922 lpi_state->index = state_idx; 923 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) { 924 pr_debug("No min. residency found, assuming 10 us\n"); 925 lpi_state->min_residency = 10; 926 } 927 928 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) { 929 pr_debug("No wakeup residency found, assuming 10 us\n"); 930 lpi_state->wake_latency = 10; 931 } 932 933 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags)) 934 lpi_state->flags = 0; 935 936 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags)) 937 lpi_state->arch_flags = 0; 938 939 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq)) 940 lpi_state->res_cnt_freq = 1; 941 942 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state)) 943 lpi_state->enable_parent_state = 0; 944 } 945 946 acpi_handle_debug(handle, "Found %d power states\n", state_idx); 947 end: 948 kfree(buffer.pointer); 949 return ret; 950 } 951 952 /* 953 * flat_state_cnt - the number of composite LPI states after the process of flattening 954 */ 955 static int flat_state_cnt; 956 957 /** 958 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state 959 * 960 * @local: local LPI state 961 * @parent: parent LPI state 962 * @result: composite LPI state 963 */ 964 static bool combine_lpi_states(struct acpi_lpi_state *local, 965 struct acpi_lpi_state *parent, 966 struct acpi_lpi_state *result) 967 { 968 if (parent->entry_method == ACPI_CSTATE_INTEGER) { 969 if (!parent->address) /* 0 means autopromotable */ 970 return false; 971 result->address = local->address + parent->address; 972 } else { 973 result->address = parent->address; 974 } 975 976 result->min_residency = max(local->min_residency, parent->min_residency); 977 result->wake_latency = local->wake_latency + parent->wake_latency; 978 result->enable_parent_state = parent->enable_parent_state; 979 result->entry_method = local->entry_method; 980 981 result->flags = parent->flags; 982 result->arch_flags = parent->arch_flags; 983 result->index = parent->index; 984 985 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN); 986 strlcat(result->desc, "+", ACPI_CX_DESC_LEN); 987 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); 988 return true; 989 } 990 991 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0) 992 993 static void stash_composite_state(struct acpi_lpi_states_array *curr_level, 994 struct acpi_lpi_state *t) 995 { 996 curr_level->composite_states[curr_level->composite_states_size++] = t; 997 } 998 999 static int flatten_lpi_states(struct acpi_processor *pr, 1000 struct acpi_lpi_states_array *curr_level, 1001 struct acpi_lpi_states_array *prev_level) 1002 { 1003 int i, j, state_count = curr_level->size; 1004 struct acpi_lpi_state *p, *t = curr_level->entries; 1005 1006 curr_level->composite_states_size = 0; 1007 for (j = 0; j < state_count; j++, t++) { 1008 struct acpi_lpi_state *flpi; 1009 1010 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED)) 1011 continue; 1012 1013 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) { 1014 pr_warn("Limiting number of LPI states to max (%d)\n", 1015 ACPI_PROCESSOR_MAX_POWER); 1016 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 1017 break; 1018 } 1019 1020 flpi = &pr->power.lpi_states[flat_state_cnt]; 1021 1022 if (!prev_level) { /* leaf/processor node */ 1023 memcpy(flpi, t, sizeof(*t)); 1024 stash_composite_state(curr_level, flpi); 1025 flat_state_cnt++; 1026 continue; 1027 } 1028 1029 for (i = 0; i < prev_level->composite_states_size; i++) { 1030 p = prev_level->composite_states[i]; 1031 if (t->index <= p->enable_parent_state && 1032 combine_lpi_states(p, t, flpi)) { 1033 stash_composite_state(curr_level, flpi); 1034 flat_state_cnt++; 1035 flpi++; 1036 } 1037 } 1038 } 1039 1040 kfree(curr_level->entries); 1041 return 0; 1042 } 1043 1044 static int acpi_processor_get_lpi_info(struct acpi_processor *pr) 1045 { 1046 int ret, i; 1047 acpi_status status; 1048 acpi_handle handle = pr->handle, pr_ahandle; 1049 struct acpi_device *d = NULL; 1050 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; 1051 1052 if (!osc_pc_lpi_support_confirmed) 1053 return -EOPNOTSUPP; 1054 1055 if (!acpi_has_method(handle, "_LPI")) 1056 return -EINVAL; 1057 1058 flat_state_cnt = 0; 1059 prev = &info[0]; 1060 curr = &info[1]; 1061 handle = pr->handle; 1062 ret = acpi_processor_evaluate_lpi(handle, prev); 1063 if (ret) 1064 return ret; 1065 flatten_lpi_states(pr, prev, NULL); 1066 1067 status = acpi_get_parent(handle, &pr_ahandle); 1068 while (ACPI_SUCCESS(status)) { 1069 acpi_bus_get_device(pr_ahandle, &d); 1070 handle = pr_ahandle; 1071 1072 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID)) 1073 break; 1074 1075 /* can be optional ? */ 1076 if (!acpi_has_method(handle, "_LPI")) 1077 break; 1078 1079 ret = acpi_processor_evaluate_lpi(handle, curr); 1080 if (ret) 1081 break; 1082 1083 /* flatten all the LPI states in this level of hierarchy */ 1084 flatten_lpi_states(pr, curr, prev); 1085 1086 tmp = prev, prev = curr, curr = tmp; 1087 1088 status = acpi_get_parent(handle, &pr_ahandle); 1089 } 1090 1091 pr->power.count = flat_state_cnt; 1092 /* reset the index after flattening */ 1093 for (i = 0; i < pr->power.count; i++) 1094 pr->power.lpi_states[i].index = i; 1095 1096 /* Tell driver that _LPI is supported. */ 1097 pr->flags.has_lpi = 1; 1098 pr->flags.power = 1; 1099 1100 return 0; 1101 } 1102 1103 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) 1104 { 1105 return -ENODEV; 1106 } 1107 1108 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) 1109 { 1110 return -ENODEV; 1111 } 1112 1113 /** 1114 * acpi_idle_lpi_enter - enters an ACPI any LPI state 1115 * @dev: the target CPU 1116 * @drv: cpuidle driver containing cpuidle state info 1117 * @index: index of target state 1118 * 1119 * Return: 0 for success or negative value for error 1120 */ 1121 static int acpi_idle_lpi_enter(struct cpuidle_device *dev, 1122 struct cpuidle_driver *drv, int index) 1123 { 1124 struct acpi_processor *pr; 1125 struct acpi_lpi_state *lpi; 1126 1127 pr = __this_cpu_read(processors); 1128 1129 if (unlikely(!pr)) 1130 return -EINVAL; 1131 1132 lpi = &pr->power.lpi_states[index]; 1133 if (lpi->entry_method == ACPI_CSTATE_FFH) 1134 return acpi_processor_ffh_lpi_enter(lpi); 1135 1136 return -EINVAL; 1137 } 1138 1139 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) 1140 { 1141 int i; 1142 struct acpi_lpi_state *lpi; 1143 struct cpuidle_state *state; 1144 struct cpuidle_driver *drv = &acpi_idle_driver; 1145 1146 if (!pr->flags.has_lpi) 1147 return -EOPNOTSUPP; 1148 1149 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) { 1150 lpi = &pr->power.lpi_states[i]; 1151 1152 state = &drv->states[i]; 1153 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); 1154 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); 1155 state->exit_latency = lpi->wake_latency; 1156 state->target_residency = lpi->min_residency; 1157 if (lpi->arch_flags) 1158 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 1159 state->enter = acpi_idle_lpi_enter; 1160 drv->safe_state_index = i; 1161 } 1162 1163 drv->state_count = i; 1164 1165 return 0; 1166 } 1167 1168 /** 1169 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle 1170 * global state data i.e. idle routines 1171 * 1172 * @pr: the ACPI processor 1173 */ 1174 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) 1175 { 1176 int i; 1177 struct cpuidle_driver *drv = &acpi_idle_driver; 1178 1179 if (!pr->flags.power_setup_done || !pr->flags.power) 1180 return -EINVAL; 1181 1182 drv->safe_state_index = -1; 1183 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) { 1184 drv->states[i].name[0] = '\0'; 1185 drv->states[i].desc[0] = '\0'; 1186 } 1187 1188 if (pr->flags.has_lpi) 1189 return acpi_processor_setup_lpi_states(pr); 1190 1191 return acpi_processor_setup_cstates(pr); 1192 } 1193 1194 /** 1195 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE 1196 * device i.e. per-cpu data 1197 * 1198 * @pr: the ACPI processor 1199 * @dev : the cpuidle device 1200 */ 1201 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, 1202 struct cpuidle_device *dev) 1203 { 1204 if (!pr->flags.power_setup_done || !pr->flags.power || !dev) 1205 return -EINVAL; 1206 1207 dev->cpu = pr->id; 1208 if (pr->flags.has_lpi) 1209 return acpi_processor_ffh_lpi_probe(pr->id); 1210 1211 return acpi_processor_setup_cpuidle_cx(pr, dev); 1212 } 1213 1214 static int acpi_processor_get_power_info(struct acpi_processor *pr) 1215 { 1216 int ret; 1217 1218 ret = acpi_processor_get_lpi_info(pr); 1219 if (ret) 1220 ret = acpi_processor_get_cstate_info(pr); 1221 1222 return ret; 1223 } 1224 1225 int acpi_processor_hotplug(struct acpi_processor *pr) 1226 { 1227 int ret = 0; 1228 struct cpuidle_device *dev; 1229 1230 if (disabled_by_idle_boot_param()) 1231 return 0; 1232 1233 if (!pr->flags.power_setup_done) 1234 return -ENODEV; 1235 1236 dev = per_cpu(acpi_cpuidle_device, pr->id); 1237 cpuidle_pause_and_lock(); 1238 cpuidle_disable_device(dev); 1239 ret = acpi_processor_get_power_info(pr); 1240 if (!ret && pr->flags.power) { 1241 acpi_processor_setup_cpuidle_dev(pr, dev); 1242 ret = cpuidle_enable_device(dev); 1243 } 1244 cpuidle_resume_and_unlock(); 1245 1246 return ret; 1247 } 1248 1249 int acpi_processor_power_state_has_changed(struct acpi_processor *pr) 1250 { 1251 int cpu; 1252 struct acpi_processor *_pr; 1253 struct cpuidle_device *dev; 1254 1255 if (disabled_by_idle_boot_param()) 1256 return 0; 1257 1258 if (!pr->flags.power_setup_done) 1259 return -ENODEV; 1260 1261 /* 1262 * FIXME: Design the ACPI notification to make it once per 1263 * system instead of once per-cpu. This condition is a hack 1264 * to make the code that updates C-States be called once. 1265 */ 1266 1267 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { 1268 1269 /* Protect against cpu-hotplug */ 1270 get_online_cpus(); 1271 cpuidle_pause_and_lock(); 1272 1273 /* Disable all cpuidle devices */ 1274 for_each_online_cpu(cpu) { 1275 _pr = per_cpu(processors, cpu); 1276 if (!_pr || !_pr->flags.power_setup_done) 1277 continue; 1278 dev = per_cpu(acpi_cpuidle_device, cpu); 1279 cpuidle_disable_device(dev); 1280 } 1281 1282 /* Populate Updated C-state information */ 1283 acpi_processor_get_power_info(pr); 1284 acpi_processor_setup_cpuidle_states(pr); 1285 1286 /* Enable all cpuidle devices */ 1287 for_each_online_cpu(cpu) { 1288 _pr = per_cpu(processors, cpu); 1289 if (!_pr || !_pr->flags.power_setup_done) 1290 continue; 1291 acpi_processor_get_power_info(_pr); 1292 if (_pr->flags.power) { 1293 dev = per_cpu(acpi_cpuidle_device, cpu); 1294 acpi_processor_setup_cpuidle_dev(_pr, dev); 1295 cpuidle_enable_device(dev); 1296 } 1297 } 1298 cpuidle_resume_and_unlock(); 1299 put_online_cpus(); 1300 } 1301 1302 return 0; 1303 } 1304 1305 static int acpi_processor_registered; 1306 1307 int acpi_processor_power_init(struct acpi_processor *pr) 1308 { 1309 int retval; 1310 struct cpuidle_device *dev; 1311 1312 if (disabled_by_idle_boot_param()) 1313 return 0; 1314 1315 acpi_processor_cstate_first_run_checks(); 1316 1317 if (!acpi_processor_get_power_info(pr)) 1318 pr->flags.power_setup_done = 1; 1319 1320 /* 1321 * Install the idle handler if processor power management is supported. 1322 * Note that we use previously set idle handler will be used on 1323 * platforms that only support C1. 1324 */ 1325 if (pr->flags.power) { 1326 /* Register acpi_idle_driver if not already registered */ 1327 if (!acpi_processor_registered) { 1328 acpi_processor_setup_cpuidle_states(pr); 1329 retval = cpuidle_register_driver(&acpi_idle_driver); 1330 if (retval) 1331 return retval; 1332 pr_debug("%s registered with cpuidle\n", 1333 acpi_idle_driver.name); 1334 } 1335 1336 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1337 if (!dev) 1338 return -ENOMEM; 1339 per_cpu(acpi_cpuidle_device, pr->id) = dev; 1340 1341 acpi_processor_setup_cpuidle_dev(pr, dev); 1342 1343 /* Register per-cpu cpuidle_device. Cpuidle driver 1344 * must already be registered before registering device 1345 */ 1346 retval = cpuidle_register_device(dev); 1347 if (retval) { 1348 if (acpi_processor_registered == 0) 1349 cpuidle_unregister_driver(&acpi_idle_driver); 1350 return retval; 1351 } 1352 acpi_processor_registered++; 1353 } 1354 return 0; 1355 } 1356 1357 int acpi_processor_power_exit(struct acpi_processor *pr) 1358 { 1359 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 1360 1361 if (disabled_by_idle_boot_param()) 1362 return 0; 1363 1364 if (pr->flags.power) { 1365 cpuidle_unregister_device(dev); 1366 acpi_processor_registered--; 1367 if (acpi_processor_registered == 0) 1368 cpuidle_unregister_driver(&acpi_idle_driver); 1369 } 1370 1371 pr->flags.power_setup_done = 0; 1372 return 0; 1373 } 1374