1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * processor_idle - idle state submodule to the ACPI processor driver 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * - Added processor hotplug support 10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 11 * - Added support for C3 on SMP 12 */ 13 #define pr_fmt(fmt) "ACPI: " fmt 14 15 #include <linux/module.h> 16 #include <linux/acpi.h> 17 #include <linux/dmi.h> 18 #include <linux/sched.h> /* need_resched() */ 19 #include <linux/tick.h> 20 #include <linux/cpuidle.h> 21 #include <linux/cpu.h> 22 #include <acpi/processor.h> 23 24 /* 25 * Include the apic definitions for x86 to have the APIC timer related defines 26 * available also for UP (on SMP it gets magically included via linux/smp.h). 27 * asm/acpi.h is not an option, as it would require more include magic. Also 28 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 29 */ 30 #ifdef CONFIG_X86 31 #include <asm/apic.h> 32 #endif 33 34 #define ACPI_PROCESSOR_CLASS "processor" 35 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 36 ACPI_MODULE_NAME("processor_idle"); 37 38 #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) 39 40 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 41 module_param(max_cstate, uint, 0000); 42 static unsigned int nocst __read_mostly; 43 module_param(nocst, uint, 0000); 44 static int bm_check_disable __read_mostly; 45 module_param(bm_check_disable, uint, 0000); 46 47 static unsigned int latency_factor __read_mostly = 2; 48 module_param(latency_factor, uint, 0644); 49 50 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 51 52 struct cpuidle_driver acpi_idle_driver = { 53 .name = "acpi_idle", 54 .owner = THIS_MODULE, 55 }; 56 57 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 58 static 59 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); 60 61 static int disabled_by_idle_boot_param(void) 62 { 63 return boot_option_idle_override == IDLE_POLL || 64 boot_option_idle_override == IDLE_HALT; 65 } 66 67 /* 68 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 69 * For now disable this. Probably a bug somewhere else. 70 * 71 * To skip this limit, boot/load with a large max_cstate limit. 72 */ 73 static int set_max_cstate(const struct dmi_system_id *id) 74 { 75 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 76 return 0; 77 78 pr_notice("%s detected - limiting to C%ld max_cstate." 79 " Override with \"processor.max_cstate=%d\"\n", id->ident, 80 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 81 82 max_cstate = (long)id->driver_data; 83 84 return 0; 85 } 86 87 static const struct dmi_system_id processor_power_dmi_table[] = { 88 { set_max_cstate, "Clevo 5600D", { 89 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 90 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 91 (void *)2}, 92 { set_max_cstate, "Pavilion zv5000", { 93 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 94 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 95 (void *)1}, 96 { set_max_cstate, "Asus L8400B", { 97 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 98 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 99 (void *)1}, 100 {}, 101 }; 102 103 104 /* 105 * Callers should disable interrupts before the call and enable 106 * interrupts after return. 107 */ 108 static void __cpuidle acpi_safe_halt(void) 109 { 110 if (!tif_need_resched()) { 111 safe_halt(); 112 local_irq_disable(); 113 } 114 } 115 116 #ifdef ARCH_APICTIMER_STOPS_ON_C3 117 118 /* 119 * Some BIOS implementations switch to C3 in the published C2 state. 120 * This seems to be a common problem on AMD boxen, but other vendors 121 * are affected too. We pick the most conservative approach: we assume 122 * that the local APIC stops in both C2 and C3. 123 */ 124 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 125 struct acpi_processor_cx *cx) 126 { 127 struct acpi_processor_power *pwr = &pr->power; 128 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 129 130 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 131 return; 132 133 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) 134 type = ACPI_STATE_C1; 135 136 /* 137 * Check, if one of the previous states already marked the lapic 138 * unstable 139 */ 140 if (pwr->timer_broadcast_on_state < state) 141 return; 142 143 if (cx->type >= type) 144 pr->power.timer_broadcast_on_state = state; 145 } 146 147 static void __lapic_timer_propagate_broadcast(void *arg) 148 { 149 struct acpi_processor *pr = (struct acpi_processor *) arg; 150 151 if (pr->power.timer_broadcast_on_state < INT_MAX) 152 tick_broadcast_enable(); 153 else 154 tick_broadcast_disable(); 155 } 156 157 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 158 { 159 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, 160 (void *)pr, 1); 161 } 162 163 /* Power(C) State timer broadcast control */ 164 static void lapic_timer_state_broadcast(struct acpi_processor *pr, 165 struct acpi_processor_cx *cx, 166 int broadcast) 167 { 168 int state = cx - pr->power.states; 169 170 if (state >= pr->power.timer_broadcast_on_state) { 171 if (broadcast) 172 tick_broadcast_enter(); 173 else 174 tick_broadcast_exit(); 175 } 176 } 177 178 #else 179 180 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 181 struct acpi_processor_cx *cstate) { } 182 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } 183 static void lapic_timer_state_broadcast(struct acpi_processor *pr, 184 struct acpi_processor_cx *cx, 185 int broadcast) 186 { 187 } 188 189 #endif 190 191 #if defined(CONFIG_X86) 192 static void tsc_check_state(int state) 193 { 194 switch (boot_cpu_data.x86_vendor) { 195 case X86_VENDOR_HYGON: 196 case X86_VENDOR_AMD: 197 case X86_VENDOR_INTEL: 198 case X86_VENDOR_CENTAUR: 199 /* 200 * AMD Fam10h TSC will tick in all 201 * C/P/S0/S1 states when this bit is set. 202 */ 203 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 204 return; 205 206 /*FALL THROUGH*/ 207 default: 208 /* TSC could halt in idle, so notify users */ 209 if (state > ACPI_STATE_C1) 210 mark_tsc_unstable("TSC halts in idle"); 211 } 212 } 213 #else 214 static void tsc_check_state(int state) { return; } 215 #endif 216 217 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 218 { 219 220 if (!pr->pblk) 221 return -ENODEV; 222 223 /* if info is obtained from pblk/fadt, type equals state */ 224 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 225 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 226 227 #ifndef CONFIG_HOTPLUG_CPU 228 /* 229 * Check for P_LVL2_UP flag before entering C2 and above on 230 * an SMP system. 231 */ 232 if ((num_online_cpus() > 1) && 233 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 234 return -ENODEV; 235 #endif 236 237 /* determine C2 and C3 address from pblk */ 238 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 239 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 240 241 /* determine latencies from FADT */ 242 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; 243 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; 244 245 /* 246 * FADT specified C2 latency must be less than or equal to 247 * 100 microseconds. 248 */ 249 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 250 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 251 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency)); 252 /* invalidate C2 */ 253 pr->power.states[ACPI_STATE_C2].address = 0; 254 } 255 256 /* 257 * FADT supplied C3 latency must be less than or equal to 258 * 1000 microseconds. 259 */ 260 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 261 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 262 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency)); 263 /* invalidate C3 */ 264 pr->power.states[ACPI_STATE_C3].address = 0; 265 } 266 267 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 268 "lvl2[0x%08x] lvl3[0x%08x]\n", 269 pr->power.states[ACPI_STATE_C2].address, 270 pr->power.states[ACPI_STATE_C3].address)); 271 272 snprintf(pr->power.states[ACPI_STATE_C2].desc, 273 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x", 274 pr->power.states[ACPI_STATE_C2].address); 275 snprintf(pr->power.states[ACPI_STATE_C3].desc, 276 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", 277 pr->power.states[ACPI_STATE_C3].address); 278 279 return 0; 280 } 281 282 static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 283 { 284 if (!pr->power.states[ACPI_STATE_C1].valid) { 285 /* set the first C-State to C1 */ 286 /* all processors need to support C1 */ 287 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 288 pr->power.states[ACPI_STATE_C1].valid = 1; 289 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 290 291 snprintf(pr->power.states[ACPI_STATE_C1].desc, 292 ACPI_CX_DESC_LEN, "ACPI HLT"); 293 } 294 /* the C0 state only exists as a filler in our array */ 295 pr->power.states[ACPI_STATE_C0].valid = 1; 296 return 0; 297 } 298 299 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 300 { 301 acpi_status status; 302 u64 count; 303 int current_count; 304 int i, ret = 0; 305 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 306 union acpi_object *cst; 307 308 if (nocst) 309 return -ENODEV; 310 311 current_count = 0; 312 313 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 314 if (ACPI_FAILURE(status)) { 315 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 316 return -ENODEV; 317 } 318 319 cst = buffer.pointer; 320 321 /* There must be at least 2 elements */ 322 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 323 pr_err("not enough elements in _CST\n"); 324 ret = -EFAULT; 325 goto end; 326 } 327 328 count = cst->package.elements[0].integer.value; 329 330 /* Validate number of power states. */ 331 if (count < 1 || count != cst->package.count - 1) { 332 pr_err("count given by _CST is not valid\n"); 333 ret = -EFAULT; 334 goto end; 335 } 336 337 /* Tell driver that at least _CST is supported. */ 338 pr->flags.has_cst = 1; 339 340 for (i = 1; i <= count; i++) { 341 union acpi_object *element; 342 union acpi_object *obj; 343 struct acpi_power_register *reg; 344 struct acpi_processor_cx cx; 345 346 memset(&cx, 0, sizeof(cx)); 347 348 element = &(cst->package.elements[i]); 349 if (element->type != ACPI_TYPE_PACKAGE) 350 continue; 351 352 if (element->package.count != 4) 353 continue; 354 355 obj = &(element->package.elements[0]); 356 357 if (obj->type != ACPI_TYPE_BUFFER) 358 continue; 359 360 reg = (struct acpi_power_register *)obj->buffer.pointer; 361 362 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 363 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 364 continue; 365 366 /* There should be an easy way to extract an integer... */ 367 obj = &(element->package.elements[1]); 368 if (obj->type != ACPI_TYPE_INTEGER) 369 continue; 370 371 cx.type = obj->integer.value; 372 /* 373 * Some buggy BIOSes won't list C1 in _CST - 374 * Let acpi_processor_get_power_info_default() handle them later 375 */ 376 if (i == 1 && cx.type != ACPI_STATE_C1) 377 current_count++; 378 379 cx.address = reg->address; 380 cx.index = current_count + 1; 381 382 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 383 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 384 if (acpi_processor_ffh_cstate_probe 385 (pr->id, &cx, reg) == 0) { 386 cx.entry_method = ACPI_CSTATE_FFH; 387 } else if (cx.type == ACPI_STATE_C1) { 388 /* 389 * C1 is a special case where FIXED_HARDWARE 390 * can be handled in non-MWAIT way as well. 391 * In that case, save this _CST entry info. 392 * Otherwise, ignore this info and continue. 393 */ 394 cx.entry_method = ACPI_CSTATE_HALT; 395 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 396 } else { 397 continue; 398 } 399 if (cx.type == ACPI_STATE_C1 && 400 (boot_option_idle_override == IDLE_NOMWAIT)) { 401 /* 402 * In most cases the C1 space_id obtained from 403 * _CST object is FIXED_HARDWARE access mode. 404 * But when the option of idle=halt is added, 405 * the entry_method type should be changed from 406 * CSTATE_FFH to CSTATE_HALT. 407 * When the option of idle=nomwait is added, 408 * the C1 entry_method type should be 409 * CSTATE_HALT. 410 */ 411 cx.entry_method = ACPI_CSTATE_HALT; 412 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 413 } 414 } else { 415 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 416 cx.address); 417 } 418 419 if (cx.type == ACPI_STATE_C1) { 420 cx.valid = 1; 421 } 422 423 obj = &(element->package.elements[2]); 424 if (obj->type != ACPI_TYPE_INTEGER) 425 continue; 426 427 cx.latency = obj->integer.value; 428 429 obj = &(element->package.elements[3]); 430 if (obj->type != ACPI_TYPE_INTEGER) 431 continue; 432 433 current_count++; 434 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 435 436 /* 437 * We support total ACPI_PROCESSOR_MAX_POWER - 1 438 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 439 */ 440 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 441 pr_warn("Limiting number of power states to max (%d)\n", 442 ACPI_PROCESSOR_MAX_POWER); 443 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 444 break; 445 } 446 } 447 448 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 449 current_count)); 450 451 /* Validate number of power states discovered */ 452 if (current_count < 2) 453 ret = -EFAULT; 454 455 end: 456 kfree(buffer.pointer); 457 458 return ret; 459 } 460 461 static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 462 struct acpi_processor_cx *cx) 463 { 464 static int bm_check_flag = -1; 465 static int bm_control_flag = -1; 466 467 468 if (!cx->address) 469 return; 470 471 /* 472 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 473 * DMA transfers are used by any ISA device to avoid livelock. 474 * Note that we could disable Type-F DMA (as recommended by 475 * the erratum), but this is known to disrupt certain ISA 476 * devices thus we take the conservative approach. 477 */ 478 else if (errata.piix4.fdma) { 479 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 480 "C3 not supported on PIIX4 with Type-F DMA\n")); 481 return; 482 } 483 484 /* All the logic here assumes flags.bm_check is same across all CPUs */ 485 if (bm_check_flag == -1) { 486 /* Determine whether bm_check is needed based on CPU */ 487 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 488 bm_check_flag = pr->flags.bm_check; 489 bm_control_flag = pr->flags.bm_control; 490 } else { 491 pr->flags.bm_check = bm_check_flag; 492 pr->flags.bm_control = bm_control_flag; 493 } 494 495 if (pr->flags.bm_check) { 496 if (!pr->flags.bm_control) { 497 if (pr->flags.has_cst != 1) { 498 /* bus mastering control is necessary */ 499 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 500 "C3 support requires BM control\n")); 501 return; 502 } else { 503 /* Here we enter C3 without bus mastering */ 504 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 505 "C3 support without BM control\n")); 506 } 507 } 508 } else { 509 /* 510 * WBINVD should be set in fadt, for C3 state to be 511 * supported on when bm_check is not required. 512 */ 513 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 514 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 515 "Cache invalidation should work properly" 516 " for C3 to be enabled on SMP systems\n")); 517 return; 518 } 519 } 520 521 /* 522 * Otherwise we've met all of our C3 requirements. 523 * Normalize the C3 latency to expidite policy. Enable 524 * checking of bus mastering status (bm_check) so we can 525 * use this in our C3 policy 526 */ 527 cx->valid = 1; 528 529 /* 530 * On older chipsets, BM_RLD needs to be set 531 * in order for Bus Master activity to wake the 532 * system from C3. Newer chipsets handle DMA 533 * during C3 automatically and BM_RLD is a NOP. 534 * In either case, the proper way to 535 * handle BM_RLD is to set it and leave it set. 536 */ 537 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 538 539 return; 540 } 541 542 static int acpi_processor_power_verify(struct acpi_processor *pr) 543 { 544 unsigned int i; 545 unsigned int working = 0; 546 547 pr->power.timer_broadcast_on_state = INT_MAX; 548 549 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 550 struct acpi_processor_cx *cx = &pr->power.states[i]; 551 552 switch (cx->type) { 553 case ACPI_STATE_C1: 554 cx->valid = 1; 555 break; 556 557 case ACPI_STATE_C2: 558 if (!cx->address) 559 break; 560 cx->valid = 1; 561 break; 562 563 case ACPI_STATE_C3: 564 acpi_processor_power_verify_c3(pr, cx); 565 break; 566 } 567 if (!cx->valid) 568 continue; 569 570 lapic_timer_check_state(i, pr, cx); 571 tsc_check_state(cx->type); 572 working++; 573 } 574 575 lapic_timer_propagate_broadcast(pr); 576 577 return (working); 578 } 579 580 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 581 { 582 unsigned int i; 583 int result; 584 585 586 /* NOTE: the idle thread may not be running while calling 587 * this function */ 588 589 /* Zero initialize all the C-states info. */ 590 memset(pr->power.states, 0, sizeof(pr->power.states)); 591 592 result = acpi_processor_get_power_info_cst(pr); 593 if (result == -ENODEV) 594 result = acpi_processor_get_power_info_fadt(pr); 595 596 if (result) 597 return result; 598 599 acpi_processor_get_power_info_default(pr); 600 601 pr->power.count = acpi_processor_power_verify(pr); 602 603 /* 604 * if one state of type C2 or C3 is available, mark this 605 * CPU as being "idle manageable" 606 */ 607 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 608 if (pr->power.states[i].valid) { 609 pr->power.count = i; 610 if (pr->power.states[i].type >= ACPI_STATE_C2) 611 pr->flags.power = 1; 612 } 613 } 614 615 return 0; 616 } 617 618 /** 619 * acpi_idle_bm_check - checks if bus master activity was detected 620 */ 621 static int acpi_idle_bm_check(void) 622 { 623 u32 bm_status = 0; 624 625 if (bm_check_disable) 626 return 0; 627 628 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 629 if (bm_status) 630 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 631 /* 632 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 633 * the true state of bus mastering activity; forcing us to 634 * manually check the BMIDEA bit of each IDE channel. 635 */ 636 else if (errata.piix4.bmisx) { 637 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 638 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 639 bm_status = 1; 640 } 641 return bm_status; 642 } 643 644 /** 645 * acpi_idle_do_entry - enter idle state using the appropriate method 646 * @cx: cstate data 647 * 648 * Caller disables interrupt before call and enables interrupt after return. 649 */ 650 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) 651 { 652 if (cx->entry_method == ACPI_CSTATE_FFH) { 653 /* Call into architectural FFH based C-state */ 654 acpi_processor_ffh_cstate_enter(cx); 655 } else if (cx->entry_method == ACPI_CSTATE_HALT) { 656 acpi_safe_halt(); 657 } else { 658 /* IO port based C-state */ 659 inb(cx->address); 660 /* Dummy wait op - must do something useless after P_LVL2 read 661 because chipsets cannot guarantee that STPCLK# signal 662 gets asserted in time to freeze execution properly. */ 663 inl(acpi_gbl_FADT.xpm_timer_block.address); 664 } 665 } 666 667 /** 668 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 669 * @dev: the target CPU 670 * @index: the index of suggested state 671 */ 672 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 673 { 674 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 675 676 ACPI_FLUSH_CPU_CACHE(); 677 678 while (1) { 679 680 if (cx->entry_method == ACPI_CSTATE_HALT) 681 safe_halt(); 682 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 683 inb(cx->address); 684 /* See comment in acpi_idle_do_entry() */ 685 inl(acpi_gbl_FADT.xpm_timer_block.address); 686 } else 687 return -ENODEV; 688 } 689 690 /* Never reached */ 691 return 0; 692 } 693 694 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) 695 { 696 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && 697 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); 698 } 699 700 static int c3_cpu_count; 701 static DEFINE_RAW_SPINLOCK(c3_lock); 702 703 /** 704 * acpi_idle_enter_bm - enters C3 with proper BM handling 705 * @pr: Target processor 706 * @cx: Target state context 707 * @timer_bc: Whether or not to change timer mode to broadcast 708 */ 709 static void acpi_idle_enter_bm(struct acpi_processor *pr, 710 struct acpi_processor_cx *cx, bool timer_bc) 711 { 712 acpi_unlazy_tlb(smp_processor_id()); 713 714 /* 715 * Must be done before busmaster disable as we might need to 716 * access HPET ! 717 */ 718 if (timer_bc) 719 lapic_timer_state_broadcast(pr, cx, 1); 720 721 /* 722 * disable bus master 723 * bm_check implies we need ARB_DIS 724 * bm_control implies whether we can do ARB_DIS 725 * 726 * That leaves a case where bm_check is set and bm_control is 727 * not set. In that case we cannot do much, we enter C3 728 * without doing anything. 729 */ 730 if (pr->flags.bm_control) { 731 raw_spin_lock(&c3_lock); 732 c3_cpu_count++; 733 /* Disable bus master arbitration when all CPUs are in C3 */ 734 if (c3_cpu_count == num_online_cpus()) 735 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 736 raw_spin_unlock(&c3_lock); 737 } 738 739 acpi_idle_do_entry(cx); 740 741 /* Re-enable bus master arbitration */ 742 if (pr->flags.bm_control) { 743 raw_spin_lock(&c3_lock); 744 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 745 c3_cpu_count--; 746 raw_spin_unlock(&c3_lock); 747 } 748 749 if (timer_bc) 750 lapic_timer_state_broadcast(pr, cx, 0); 751 } 752 753 static int acpi_idle_enter(struct cpuidle_device *dev, 754 struct cpuidle_driver *drv, int index) 755 { 756 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 757 struct acpi_processor *pr; 758 759 pr = __this_cpu_read(processors); 760 if (unlikely(!pr)) 761 return -EINVAL; 762 763 if (cx->type != ACPI_STATE_C1) { 764 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { 765 index = ACPI_IDLE_STATE_START; 766 cx = per_cpu(acpi_cstate[index], dev->cpu); 767 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { 768 if (cx->bm_sts_skip || !acpi_idle_bm_check()) { 769 acpi_idle_enter_bm(pr, cx, true); 770 return index; 771 } else if (drv->safe_state_index >= 0) { 772 index = drv->safe_state_index; 773 cx = per_cpu(acpi_cstate[index], dev->cpu); 774 } else { 775 acpi_safe_halt(); 776 return -EBUSY; 777 } 778 } 779 } 780 781 lapic_timer_state_broadcast(pr, cx, 1); 782 783 if (cx->type == ACPI_STATE_C3) 784 ACPI_FLUSH_CPU_CACHE(); 785 786 acpi_idle_do_entry(cx); 787 788 lapic_timer_state_broadcast(pr, cx, 0); 789 790 return index; 791 } 792 793 static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, 794 struct cpuidle_driver *drv, int index) 795 { 796 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 797 798 if (cx->type == ACPI_STATE_C3) { 799 struct acpi_processor *pr = __this_cpu_read(processors); 800 801 if (unlikely(!pr)) 802 return; 803 804 if (pr->flags.bm_check) { 805 acpi_idle_enter_bm(pr, cx, false); 806 return; 807 } else { 808 ACPI_FLUSH_CPU_CACHE(); 809 } 810 } 811 acpi_idle_do_entry(cx); 812 } 813 814 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 815 struct cpuidle_device *dev) 816 { 817 int i, count = ACPI_IDLE_STATE_START; 818 struct acpi_processor_cx *cx; 819 820 if (max_cstate == 0) 821 max_cstate = 1; 822 823 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 824 cx = &pr->power.states[i]; 825 826 if (!cx->valid) 827 continue; 828 829 per_cpu(acpi_cstate[count], dev->cpu) = cx; 830 831 count++; 832 if (count == CPUIDLE_STATE_MAX) 833 break; 834 } 835 836 if (!count) 837 return -EINVAL; 838 839 return 0; 840 } 841 842 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 843 { 844 int i, count; 845 struct acpi_processor_cx *cx; 846 struct cpuidle_state *state; 847 struct cpuidle_driver *drv = &acpi_idle_driver; 848 849 if (max_cstate == 0) 850 max_cstate = 1; 851 852 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) { 853 cpuidle_poll_state_init(drv); 854 count = 1; 855 } else { 856 count = 0; 857 } 858 859 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 860 cx = &pr->power.states[i]; 861 862 if (!cx->valid) 863 continue; 864 865 state = &drv->states[count]; 866 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 867 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 868 state->exit_latency = cx->latency; 869 state->target_residency = cx->latency * latency_factor; 870 state->enter = acpi_idle_enter; 871 872 state->flags = 0; 873 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { 874 state->enter_dead = acpi_idle_play_dead; 875 drv->safe_state_index = count; 876 } 877 /* 878 * Halt-induced C1 is not good for ->enter_s2idle, because it 879 * re-enables interrupts on exit. Moreover, C1 is generally not 880 * particularly interesting from the suspend-to-idle angle, so 881 * avoid C1 and the situations in which we may need to fall back 882 * to it altogether. 883 */ 884 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) 885 state->enter_s2idle = acpi_idle_enter_s2idle; 886 887 count++; 888 if (count == CPUIDLE_STATE_MAX) 889 break; 890 } 891 892 drv->state_count = count; 893 894 if (!count) 895 return -EINVAL; 896 897 return 0; 898 } 899 900 static inline void acpi_processor_cstate_first_run_checks(void) 901 { 902 acpi_status status; 903 static int first_run; 904 905 if (first_run) 906 return; 907 dmi_check_system(processor_power_dmi_table); 908 max_cstate = acpi_processor_cstate_check(max_cstate); 909 if (max_cstate < ACPI_C_STATES_MAX) 910 pr_notice("ACPI: processor limited to max C-state %d\n", 911 max_cstate); 912 first_run++; 913 914 if (acpi_gbl_FADT.cst_control && !nocst) { 915 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 916 acpi_gbl_FADT.cst_control, 8); 917 if (ACPI_FAILURE(status)) 918 ACPI_EXCEPTION((AE_INFO, status, 919 "Notifying BIOS of _CST ability failed")); 920 } 921 } 922 #else 923 924 static inline int disabled_by_idle_boot_param(void) { return 0; } 925 static inline void acpi_processor_cstate_first_run_checks(void) { } 926 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 927 { 928 return -ENODEV; 929 } 930 931 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 932 struct cpuidle_device *dev) 933 { 934 return -EINVAL; 935 } 936 937 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 938 { 939 return -EINVAL; 940 } 941 942 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 943 944 struct acpi_lpi_states_array { 945 unsigned int size; 946 unsigned int composite_states_size; 947 struct acpi_lpi_state *entries; 948 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER]; 949 }; 950 951 static int obj_get_integer(union acpi_object *obj, u32 *value) 952 { 953 if (obj->type != ACPI_TYPE_INTEGER) 954 return -EINVAL; 955 956 *value = obj->integer.value; 957 return 0; 958 } 959 960 static int acpi_processor_evaluate_lpi(acpi_handle handle, 961 struct acpi_lpi_states_array *info) 962 { 963 acpi_status status; 964 int ret = 0; 965 int pkg_count, state_idx = 1, loop; 966 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 967 union acpi_object *lpi_data; 968 struct acpi_lpi_state *lpi_state; 969 970 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer); 971 if (ACPI_FAILURE(status)) { 972 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n")); 973 return -ENODEV; 974 } 975 976 lpi_data = buffer.pointer; 977 978 /* There must be at least 4 elements = 3 elements + 1 package */ 979 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE || 980 lpi_data->package.count < 4) { 981 pr_debug("not enough elements in _LPI\n"); 982 ret = -ENODATA; 983 goto end; 984 } 985 986 pkg_count = lpi_data->package.elements[2].integer.value; 987 988 /* Validate number of power states. */ 989 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) { 990 pr_debug("count given by _LPI is not valid\n"); 991 ret = -ENODATA; 992 goto end; 993 } 994 995 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL); 996 if (!lpi_state) { 997 ret = -ENOMEM; 998 goto end; 999 } 1000 1001 info->size = pkg_count; 1002 info->entries = lpi_state; 1003 1004 /* LPI States start at index 3 */ 1005 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) { 1006 union acpi_object *element, *pkg_elem, *obj; 1007 1008 element = &lpi_data->package.elements[loop]; 1009 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7) 1010 continue; 1011 1012 pkg_elem = element->package.elements; 1013 1014 obj = pkg_elem + 6; 1015 if (obj->type == ACPI_TYPE_BUFFER) { 1016 struct acpi_power_register *reg; 1017 1018 reg = (struct acpi_power_register *)obj->buffer.pointer; 1019 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 1020 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) 1021 continue; 1022 1023 lpi_state->address = reg->address; 1024 lpi_state->entry_method = 1025 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ? 1026 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO; 1027 } else if (obj->type == ACPI_TYPE_INTEGER) { 1028 lpi_state->entry_method = ACPI_CSTATE_INTEGER; 1029 lpi_state->address = obj->integer.value; 1030 } else { 1031 continue; 1032 } 1033 1034 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/ 1035 1036 obj = pkg_elem + 9; 1037 if (obj->type == ACPI_TYPE_STRING) 1038 strlcpy(lpi_state->desc, obj->string.pointer, 1039 ACPI_CX_DESC_LEN); 1040 1041 lpi_state->index = state_idx; 1042 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) { 1043 pr_debug("No min. residency found, assuming 10 us\n"); 1044 lpi_state->min_residency = 10; 1045 } 1046 1047 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) { 1048 pr_debug("No wakeup residency found, assuming 10 us\n"); 1049 lpi_state->wake_latency = 10; 1050 } 1051 1052 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags)) 1053 lpi_state->flags = 0; 1054 1055 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags)) 1056 lpi_state->arch_flags = 0; 1057 1058 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq)) 1059 lpi_state->res_cnt_freq = 1; 1060 1061 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state)) 1062 lpi_state->enable_parent_state = 0; 1063 } 1064 1065 acpi_handle_debug(handle, "Found %d power states\n", state_idx); 1066 end: 1067 kfree(buffer.pointer); 1068 return ret; 1069 } 1070 1071 /* 1072 * flat_state_cnt - the number of composite LPI states after the process of flattening 1073 */ 1074 static int flat_state_cnt; 1075 1076 /** 1077 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state 1078 * 1079 * @local: local LPI state 1080 * @parent: parent LPI state 1081 * @result: composite LPI state 1082 */ 1083 static bool combine_lpi_states(struct acpi_lpi_state *local, 1084 struct acpi_lpi_state *parent, 1085 struct acpi_lpi_state *result) 1086 { 1087 if (parent->entry_method == ACPI_CSTATE_INTEGER) { 1088 if (!parent->address) /* 0 means autopromotable */ 1089 return false; 1090 result->address = local->address + parent->address; 1091 } else { 1092 result->address = parent->address; 1093 } 1094 1095 result->min_residency = max(local->min_residency, parent->min_residency); 1096 result->wake_latency = local->wake_latency + parent->wake_latency; 1097 result->enable_parent_state = parent->enable_parent_state; 1098 result->entry_method = local->entry_method; 1099 1100 result->flags = parent->flags; 1101 result->arch_flags = parent->arch_flags; 1102 result->index = parent->index; 1103 1104 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN); 1105 strlcat(result->desc, "+", ACPI_CX_DESC_LEN); 1106 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); 1107 return true; 1108 } 1109 1110 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0) 1111 1112 static void stash_composite_state(struct acpi_lpi_states_array *curr_level, 1113 struct acpi_lpi_state *t) 1114 { 1115 curr_level->composite_states[curr_level->composite_states_size++] = t; 1116 } 1117 1118 static int flatten_lpi_states(struct acpi_processor *pr, 1119 struct acpi_lpi_states_array *curr_level, 1120 struct acpi_lpi_states_array *prev_level) 1121 { 1122 int i, j, state_count = curr_level->size; 1123 struct acpi_lpi_state *p, *t = curr_level->entries; 1124 1125 curr_level->composite_states_size = 0; 1126 for (j = 0; j < state_count; j++, t++) { 1127 struct acpi_lpi_state *flpi; 1128 1129 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED)) 1130 continue; 1131 1132 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) { 1133 pr_warn("Limiting number of LPI states to max (%d)\n", 1134 ACPI_PROCESSOR_MAX_POWER); 1135 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 1136 break; 1137 } 1138 1139 flpi = &pr->power.lpi_states[flat_state_cnt]; 1140 1141 if (!prev_level) { /* leaf/processor node */ 1142 memcpy(flpi, t, sizeof(*t)); 1143 stash_composite_state(curr_level, flpi); 1144 flat_state_cnt++; 1145 continue; 1146 } 1147 1148 for (i = 0; i < prev_level->composite_states_size; i++) { 1149 p = prev_level->composite_states[i]; 1150 if (t->index <= p->enable_parent_state && 1151 combine_lpi_states(p, t, flpi)) { 1152 stash_composite_state(curr_level, flpi); 1153 flat_state_cnt++; 1154 flpi++; 1155 } 1156 } 1157 } 1158 1159 kfree(curr_level->entries); 1160 return 0; 1161 } 1162 1163 static int acpi_processor_get_lpi_info(struct acpi_processor *pr) 1164 { 1165 int ret, i; 1166 acpi_status status; 1167 acpi_handle handle = pr->handle, pr_ahandle; 1168 struct acpi_device *d = NULL; 1169 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; 1170 1171 if (!osc_pc_lpi_support_confirmed) 1172 return -EOPNOTSUPP; 1173 1174 if (!acpi_has_method(handle, "_LPI")) 1175 return -EINVAL; 1176 1177 flat_state_cnt = 0; 1178 prev = &info[0]; 1179 curr = &info[1]; 1180 handle = pr->handle; 1181 ret = acpi_processor_evaluate_lpi(handle, prev); 1182 if (ret) 1183 return ret; 1184 flatten_lpi_states(pr, prev, NULL); 1185 1186 status = acpi_get_parent(handle, &pr_ahandle); 1187 while (ACPI_SUCCESS(status)) { 1188 acpi_bus_get_device(pr_ahandle, &d); 1189 handle = pr_ahandle; 1190 1191 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID)) 1192 break; 1193 1194 /* can be optional ? */ 1195 if (!acpi_has_method(handle, "_LPI")) 1196 break; 1197 1198 ret = acpi_processor_evaluate_lpi(handle, curr); 1199 if (ret) 1200 break; 1201 1202 /* flatten all the LPI states in this level of hierarchy */ 1203 flatten_lpi_states(pr, curr, prev); 1204 1205 tmp = prev, prev = curr, curr = tmp; 1206 1207 status = acpi_get_parent(handle, &pr_ahandle); 1208 } 1209 1210 pr->power.count = flat_state_cnt; 1211 /* reset the index after flattening */ 1212 for (i = 0; i < pr->power.count; i++) 1213 pr->power.lpi_states[i].index = i; 1214 1215 /* Tell driver that _LPI is supported. */ 1216 pr->flags.has_lpi = 1; 1217 pr->flags.power = 1; 1218 1219 return 0; 1220 } 1221 1222 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) 1223 { 1224 return -ENODEV; 1225 } 1226 1227 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) 1228 { 1229 return -ENODEV; 1230 } 1231 1232 /** 1233 * acpi_idle_lpi_enter - enters an ACPI any LPI state 1234 * @dev: the target CPU 1235 * @drv: cpuidle driver containing cpuidle state info 1236 * @index: index of target state 1237 * 1238 * Return: 0 for success or negative value for error 1239 */ 1240 static int acpi_idle_lpi_enter(struct cpuidle_device *dev, 1241 struct cpuidle_driver *drv, int index) 1242 { 1243 struct acpi_processor *pr; 1244 struct acpi_lpi_state *lpi; 1245 1246 pr = __this_cpu_read(processors); 1247 1248 if (unlikely(!pr)) 1249 return -EINVAL; 1250 1251 lpi = &pr->power.lpi_states[index]; 1252 if (lpi->entry_method == ACPI_CSTATE_FFH) 1253 return acpi_processor_ffh_lpi_enter(lpi); 1254 1255 return -EINVAL; 1256 } 1257 1258 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) 1259 { 1260 int i; 1261 struct acpi_lpi_state *lpi; 1262 struct cpuidle_state *state; 1263 struct cpuidle_driver *drv = &acpi_idle_driver; 1264 1265 if (!pr->flags.has_lpi) 1266 return -EOPNOTSUPP; 1267 1268 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) { 1269 lpi = &pr->power.lpi_states[i]; 1270 1271 state = &drv->states[i]; 1272 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); 1273 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); 1274 state->exit_latency = lpi->wake_latency; 1275 state->target_residency = lpi->min_residency; 1276 if (lpi->arch_flags) 1277 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 1278 state->enter = acpi_idle_lpi_enter; 1279 drv->safe_state_index = i; 1280 } 1281 1282 drv->state_count = i; 1283 1284 return 0; 1285 } 1286 1287 /** 1288 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle 1289 * global state data i.e. idle routines 1290 * 1291 * @pr: the ACPI processor 1292 */ 1293 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) 1294 { 1295 int i; 1296 struct cpuidle_driver *drv = &acpi_idle_driver; 1297 1298 if (!pr->flags.power_setup_done || !pr->flags.power) 1299 return -EINVAL; 1300 1301 drv->safe_state_index = -1; 1302 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) { 1303 drv->states[i].name[0] = '\0'; 1304 drv->states[i].desc[0] = '\0'; 1305 } 1306 1307 if (pr->flags.has_lpi) 1308 return acpi_processor_setup_lpi_states(pr); 1309 1310 return acpi_processor_setup_cstates(pr); 1311 } 1312 1313 /** 1314 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE 1315 * device i.e. per-cpu data 1316 * 1317 * @pr: the ACPI processor 1318 * @dev : the cpuidle device 1319 */ 1320 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, 1321 struct cpuidle_device *dev) 1322 { 1323 if (!pr->flags.power_setup_done || !pr->flags.power || !dev) 1324 return -EINVAL; 1325 1326 dev->cpu = pr->id; 1327 if (pr->flags.has_lpi) 1328 return acpi_processor_ffh_lpi_probe(pr->id); 1329 1330 return acpi_processor_setup_cpuidle_cx(pr, dev); 1331 } 1332 1333 static int acpi_processor_get_power_info(struct acpi_processor *pr) 1334 { 1335 int ret; 1336 1337 ret = acpi_processor_get_lpi_info(pr); 1338 if (ret) 1339 ret = acpi_processor_get_cstate_info(pr); 1340 1341 return ret; 1342 } 1343 1344 int acpi_processor_hotplug(struct acpi_processor *pr) 1345 { 1346 int ret = 0; 1347 struct cpuidle_device *dev; 1348 1349 if (disabled_by_idle_boot_param()) 1350 return 0; 1351 1352 if (!pr->flags.power_setup_done) 1353 return -ENODEV; 1354 1355 dev = per_cpu(acpi_cpuidle_device, pr->id); 1356 cpuidle_pause_and_lock(); 1357 cpuidle_disable_device(dev); 1358 ret = acpi_processor_get_power_info(pr); 1359 if (!ret && pr->flags.power) { 1360 acpi_processor_setup_cpuidle_dev(pr, dev); 1361 ret = cpuidle_enable_device(dev); 1362 } 1363 cpuidle_resume_and_unlock(); 1364 1365 return ret; 1366 } 1367 1368 int acpi_processor_power_state_has_changed(struct acpi_processor *pr) 1369 { 1370 int cpu; 1371 struct acpi_processor *_pr; 1372 struct cpuidle_device *dev; 1373 1374 if (disabled_by_idle_boot_param()) 1375 return 0; 1376 1377 if (!pr->flags.power_setup_done) 1378 return -ENODEV; 1379 1380 /* 1381 * FIXME: Design the ACPI notification to make it once per 1382 * system instead of once per-cpu. This condition is a hack 1383 * to make the code that updates C-States be called once. 1384 */ 1385 1386 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { 1387 1388 /* Protect against cpu-hotplug */ 1389 get_online_cpus(); 1390 cpuidle_pause_and_lock(); 1391 1392 /* Disable all cpuidle devices */ 1393 for_each_online_cpu(cpu) { 1394 _pr = per_cpu(processors, cpu); 1395 if (!_pr || !_pr->flags.power_setup_done) 1396 continue; 1397 dev = per_cpu(acpi_cpuidle_device, cpu); 1398 cpuidle_disable_device(dev); 1399 } 1400 1401 /* Populate Updated C-state information */ 1402 acpi_processor_get_power_info(pr); 1403 acpi_processor_setup_cpuidle_states(pr); 1404 1405 /* Enable all cpuidle devices */ 1406 for_each_online_cpu(cpu) { 1407 _pr = per_cpu(processors, cpu); 1408 if (!_pr || !_pr->flags.power_setup_done) 1409 continue; 1410 acpi_processor_get_power_info(_pr); 1411 if (_pr->flags.power) { 1412 dev = per_cpu(acpi_cpuidle_device, cpu); 1413 acpi_processor_setup_cpuidle_dev(_pr, dev); 1414 cpuidle_enable_device(dev); 1415 } 1416 } 1417 cpuidle_resume_and_unlock(); 1418 put_online_cpus(); 1419 } 1420 1421 return 0; 1422 } 1423 1424 static int acpi_processor_registered; 1425 1426 int acpi_processor_power_init(struct acpi_processor *pr) 1427 { 1428 int retval; 1429 struct cpuidle_device *dev; 1430 1431 if (disabled_by_idle_boot_param()) 1432 return 0; 1433 1434 acpi_processor_cstate_first_run_checks(); 1435 1436 if (!acpi_processor_get_power_info(pr)) 1437 pr->flags.power_setup_done = 1; 1438 1439 /* 1440 * Install the idle handler if processor power management is supported. 1441 * Note that we use previously set idle handler will be used on 1442 * platforms that only support C1. 1443 */ 1444 if (pr->flags.power) { 1445 /* Register acpi_idle_driver if not already registered */ 1446 if (!acpi_processor_registered) { 1447 acpi_processor_setup_cpuidle_states(pr); 1448 retval = cpuidle_register_driver(&acpi_idle_driver); 1449 if (retval) 1450 return retval; 1451 pr_debug("%s registered with cpuidle\n", 1452 acpi_idle_driver.name); 1453 } 1454 1455 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1456 if (!dev) 1457 return -ENOMEM; 1458 per_cpu(acpi_cpuidle_device, pr->id) = dev; 1459 1460 acpi_processor_setup_cpuidle_dev(pr, dev); 1461 1462 /* Register per-cpu cpuidle_device. Cpuidle driver 1463 * must already be registered before registering device 1464 */ 1465 retval = cpuidle_register_device(dev); 1466 if (retval) { 1467 if (acpi_processor_registered == 0) 1468 cpuidle_unregister_driver(&acpi_idle_driver); 1469 return retval; 1470 } 1471 acpi_processor_registered++; 1472 } 1473 return 0; 1474 } 1475 1476 int acpi_processor_power_exit(struct acpi_processor *pr) 1477 { 1478 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 1479 1480 if (disabled_by_idle_boot_param()) 1481 return 0; 1482 1483 if (pr->flags.power) { 1484 cpuidle_unregister_device(dev); 1485 acpi_processor_registered--; 1486 if (acpi_processor_registered == 0) 1487 cpuidle_unregister_driver(&acpi_idle_driver); 1488 } 1489 1490 pr->flags.power_setup_done = 0; 1491 return 0; 1492 } 1493