1 /* 2 * processor_idle - idle state submodule to the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 10 * - Added support for C3 on SMP 11 * 12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or (at 17 * your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 * 24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 */ 26 #define pr_fmt(fmt) "ACPI: " fmt 27 28 #include <linux/module.h> 29 #include <linux/acpi.h> 30 #include <linux/dmi.h> 31 #include <linux/sched.h> /* need_resched() */ 32 #include <linux/tick.h> 33 #include <linux/cpuidle.h> 34 #include <linux/cpu.h> 35 #include <acpi/processor.h> 36 37 /* 38 * Include the apic definitions for x86 to have the APIC timer related defines 39 * available also for UP (on SMP it gets magically included via linux/smp.h). 40 * asm/acpi.h is not an option, as it would require more include magic. Also 41 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 42 */ 43 #ifdef CONFIG_X86 44 #include <asm/apic.h> 45 #endif 46 47 #define ACPI_PROCESSOR_CLASS "processor" 48 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 49 ACPI_MODULE_NAME("processor_idle"); 50 51 #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) 52 53 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 54 module_param(max_cstate, uint, 0000); 55 static unsigned int nocst __read_mostly; 56 module_param(nocst, uint, 0000); 57 static int bm_check_disable __read_mostly; 58 module_param(bm_check_disable, uint, 0000); 59 60 static unsigned int latency_factor __read_mostly = 2; 61 module_param(latency_factor, uint, 0644); 62 63 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 64 65 struct cpuidle_driver acpi_idle_driver = { 66 .name = "acpi_idle", 67 .owner = THIS_MODULE, 68 }; 69 70 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 71 static 72 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); 73 74 static int disabled_by_idle_boot_param(void) 75 { 76 return boot_option_idle_override == IDLE_POLL || 77 boot_option_idle_override == IDLE_HALT; 78 } 79 80 /* 81 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 82 * For now disable this. Probably a bug somewhere else. 83 * 84 * To skip this limit, boot/load with a large max_cstate limit. 85 */ 86 static int set_max_cstate(const struct dmi_system_id *id) 87 { 88 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 89 return 0; 90 91 pr_notice("%s detected - limiting to C%ld max_cstate." 92 " Override with \"processor.max_cstate=%d\"\n", id->ident, 93 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 94 95 max_cstate = (long)id->driver_data; 96 97 return 0; 98 } 99 100 static const struct dmi_system_id processor_power_dmi_table[] = { 101 { set_max_cstate, "Clevo 5600D", { 102 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 103 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 104 (void *)2}, 105 { set_max_cstate, "Pavilion zv5000", { 106 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 107 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 108 (void *)1}, 109 { set_max_cstate, "Asus L8400B", { 110 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 111 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 112 (void *)1}, 113 {}, 114 }; 115 116 117 /* 118 * Callers should disable interrupts before the call and enable 119 * interrupts after return. 120 */ 121 static void __cpuidle acpi_safe_halt(void) 122 { 123 if (!tif_need_resched()) { 124 safe_halt(); 125 local_irq_disable(); 126 } 127 } 128 129 #ifdef ARCH_APICTIMER_STOPS_ON_C3 130 131 /* 132 * Some BIOS implementations switch to C3 in the published C2 state. 133 * This seems to be a common problem on AMD boxen, but other vendors 134 * are affected too. We pick the most conservative approach: we assume 135 * that the local APIC stops in both C2 and C3. 136 */ 137 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 138 struct acpi_processor_cx *cx) 139 { 140 struct acpi_processor_power *pwr = &pr->power; 141 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 142 143 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 144 return; 145 146 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) 147 type = ACPI_STATE_C1; 148 149 /* 150 * Check, if one of the previous states already marked the lapic 151 * unstable 152 */ 153 if (pwr->timer_broadcast_on_state < state) 154 return; 155 156 if (cx->type >= type) 157 pr->power.timer_broadcast_on_state = state; 158 } 159 160 static void __lapic_timer_propagate_broadcast(void *arg) 161 { 162 struct acpi_processor *pr = (struct acpi_processor *) arg; 163 164 if (pr->power.timer_broadcast_on_state < INT_MAX) 165 tick_broadcast_enable(); 166 else 167 tick_broadcast_disable(); 168 } 169 170 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 171 { 172 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, 173 (void *)pr, 1); 174 } 175 176 /* Power(C) State timer broadcast control */ 177 static void lapic_timer_state_broadcast(struct acpi_processor *pr, 178 struct acpi_processor_cx *cx, 179 int broadcast) 180 { 181 int state = cx - pr->power.states; 182 183 if (state >= pr->power.timer_broadcast_on_state) { 184 if (broadcast) 185 tick_broadcast_enter(); 186 else 187 tick_broadcast_exit(); 188 } 189 } 190 191 #else 192 193 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 194 struct acpi_processor_cx *cstate) { } 195 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } 196 static void lapic_timer_state_broadcast(struct acpi_processor *pr, 197 struct acpi_processor_cx *cx, 198 int broadcast) 199 { 200 } 201 202 #endif 203 204 #if defined(CONFIG_X86) 205 static void tsc_check_state(int state) 206 { 207 switch (boot_cpu_data.x86_vendor) { 208 case X86_VENDOR_AMD: 209 case X86_VENDOR_INTEL: 210 case X86_VENDOR_CENTAUR: 211 /* 212 * AMD Fam10h TSC will tick in all 213 * C/P/S0/S1 states when this bit is set. 214 */ 215 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 216 return; 217 218 /*FALL THROUGH*/ 219 default: 220 /* TSC could halt in idle, so notify users */ 221 if (state > ACPI_STATE_C1) 222 mark_tsc_unstable("TSC halts in idle"); 223 } 224 } 225 #else 226 static void tsc_check_state(int state) { return; } 227 #endif 228 229 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 230 { 231 232 if (!pr->pblk) 233 return -ENODEV; 234 235 /* if info is obtained from pblk/fadt, type equals state */ 236 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 237 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 238 239 #ifndef CONFIG_HOTPLUG_CPU 240 /* 241 * Check for P_LVL2_UP flag before entering C2 and above on 242 * an SMP system. 243 */ 244 if ((num_online_cpus() > 1) && 245 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 246 return -ENODEV; 247 #endif 248 249 /* determine C2 and C3 address from pblk */ 250 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 251 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 252 253 /* determine latencies from FADT */ 254 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; 255 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; 256 257 /* 258 * FADT specified C2 latency must be less than or equal to 259 * 100 microseconds. 260 */ 261 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 262 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 263 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency)); 264 /* invalidate C2 */ 265 pr->power.states[ACPI_STATE_C2].address = 0; 266 } 267 268 /* 269 * FADT supplied C3 latency must be less than or equal to 270 * 1000 microseconds. 271 */ 272 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 273 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 274 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency)); 275 /* invalidate C3 */ 276 pr->power.states[ACPI_STATE_C3].address = 0; 277 } 278 279 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 280 "lvl2[0x%08x] lvl3[0x%08x]\n", 281 pr->power.states[ACPI_STATE_C2].address, 282 pr->power.states[ACPI_STATE_C3].address)); 283 284 return 0; 285 } 286 287 static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 288 { 289 if (!pr->power.states[ACPI_STATE_C1].valid) { 290 /* set the first C-State to C1 */ 291 /* all processors need to support C1 */ 292 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 293 pr->power.states[ACPI_STATE_C1].valid = 1; 294 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 295 296 snprintf(pr->power.states[ACPI_STATE_C1].desc, 297 ACPI_CX_DESC_LEN, "ACPI HLT"); 298 } 299 /* the C0 state only exists as a filler in our array */ 300 pr->power.states[ACPI_STATE_C0].valid = 1; 301 return 0; 302 } 303 304 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 305 { 306 acpi_status status; 307 u64 count; 308 int current_count; 309 int i, ret = 0; 310 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 311 union acpi_object *cst; 312 313 if (nocst) 314 return -ENODEV; 315 316 current_count = 0; 317 318 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 319 if (ACPI_FAILURE(status)) { 320 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 321 return -ENODEV; 322 } 323 324 cst = buffer.pointer; 325 326 /* There must be at least 2 elements */ 327 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 328 pr_err("not enough elements in _CST\n"); 329 ret = -EFAULT; 330 goto end; 331 } 332 333 count = cst->package.elements[0].integer.value; 334 335 /* Validate number of power states. */ 336 if (count < 1 || count != cst->package.count - 1) { 337 pr_err("count given by _CST is not valid\n"); 338 ret = -EFAULT; 339 goto end; 340 } 341 342 /* Tell driver that at least _CST is supported. */ 343 pr->flags.has_cst = 1; 344 345 for (i = 1; i <= count; i++) { 346 union acpi_object *element; 347 union acpi_object *obj; 348 struct acpi_power_register *reg; 349 struct acpi_processor_cx cx; 350 351 memset(&cx, 0, sizeof(cx)); 352 353 element = &(cst->package.elements[i]); 354 if (element->type != ACPI_TYPE_PACKAGE) 355 continue; 356 357 if (element->package.count != 4) 358 continue; 359 360 obj = &(element->package.elements[0]); 361 362 if (obj->type != ACPI_TYPE_BUFFER) 363 continue; 364 365 reg = (struct acpi_power_register *)obj->buffer.pointer; 366 367 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 368 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 369 continue; 370 371 /* There should be an easy way to extract an integer... */ 372 obj = &(element->package.elements[1]); 373 if (obj->type != ACPI_TYPE_INTEGER) 374 continue; 375 376 cx.type = obj->integer.value; 377 /* 378 * Some buggy BIOSes won't list C1 in _CST - 379 * Let acpi_processor_get_power_info_default() handle them later 380 */ 381 if (i == 1 && cx.type != ACPI_STATE_C1) 382 current_count++; 383 384 cx.address = reg->address; 385 cx.index = current_count + 1; 386 387 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 388 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 389 if (acpi_processor_ffh_cstate_probe 390 (pr->id, &cx, reg) == 0) { 391 cx.entry_method = ACPI_CSTATE_FFH; 392 } else if (cx.type == ACPI_STATE_C1) { 393 /* 394 * C1 is a special case where FIXED_HARDWARE 395 * can be handled in non-MWAIT way as well. 396 * In that case, save this _CST entry info. 397 * Otherwise, ignore this info and continue. 398 */ 399 cx.entry_method = ACPI_CSTATE_HALT; 400 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 401 } else { 402 continue; 403 } 404 if (cx.type == ACPI_STATE_C1 && 405 (boot_option_idle_override == IDLE_NOMWAIT)) { 406 /* 407 * In most cases the C1 space_id obtained from 408 * _CST object is FIXED_HARDWARE access mode. 409 * But when the option of idle=halt is added, 410 * the entry_method type should be changed from 411 * CSTATE_FFH to CSTATE_HALT. 412 * When the option of idle=nomwait is added, 413 * the C1 entry_method type should be 414 * CSTATE_HALT. 415 */ 416 cx.entry_method = ACPI_CSTATE_HALT; 417 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 418 } 419 } else { 420 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 421 cx.address); 422 } 423 424 if (cx.type == ACPI_STATE_C1) { 425 cx.valid = 1; 426 } 427 428 obj = &(element->package.elements[2]); 429 if (obj->type != ACPI_TYPE_INTEGER) 430 continue; 431 432 cx.latency = obj->integer.value; 433 434 obj = &(element->package.elements[3]); 435 if (obj->type != ACPI_TYPE_INTEGER) 436 continue; 437 438 current_count++; 439 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 440 441 /* 442 * We support total ACPI_PROCESSOR_MAX_POWER - 1 443 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 444 */ 445 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 446 pr_warn("Limiting number of power states to max (%d)\n", 447 ACPI_PROCESSOR_MAX_POWER); 448 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 449 break; 450 } 451 } 452 453 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 454 current_count)); 455 456 /* Validate number of power states discovered */ 457 if (current_count < 2) 458 ret = -EFAULT; 459 460 end: 461 kfree(buffer.pointer); 462 463 return ret; 464 } 465 466 static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 467 struct acpi_processor_cx *cx) 468 { 469 static int bm_check_flag = -1; 470 static int bm_control_flag = -1; 471 472 473 if (!cx->address) 474 return; 475 476 /* 477 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 478 * DMA transfers are used by any ISA device to avoid livelock. 479 * Note that we could disable Type-F DMA (as recommended by 480 * the erratum), but this is known to disrupt certain ISA 481 * devices thus we take the conservative approach. 482 */ 483 else if (errata.piix4.fdma) { 484 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 485 "C3 not supported on PIIX4 with Type-F DMA\n")); 486 return; 487 } 488 489 /* All the logic here assumes flags.bm_check is same across all CPUs */ 490 if (bm_check_flag == -1) { 491 /* Determine whether bm_check is needed based on CPU */ 492 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 493 bm_check_flag = pr->flags.bm_check; 494 bm_control_flag = pr->flags.bm_control; 495 } else { 496 pr->flags.bm_check = bm_check_flag; 497 pr->flags.bm_control = bm_control_flag; 498 } 499 500 if (pr->flags.bm_check) { 501 if (!pr->flags.bm_control) { 502 if (pr->flags.has_cst != 1) { 503 /* bus mastering control is necessary */ 504 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 505 "C3 support requires BM control\n")); 506 return; 507 } else { 508 /* Here we enter C3 without bus mastering */ 509 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 510 "C3 support without BM control\n")); 511 } 512 } 513 } else { 514 /* 515 * WBINVD should be set in fadt, for C3 state to be 516 * supported on when bm_check is not required. 517 */ 518 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 519 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 520 "Cache invalidation should work properly" 521 " for C3 to be enabled on SMP systems\n")); 522 return; 523 } 524 } 525 526 /* 527 * Otherwise we've met all of our C3 requirements. 528 * Normalize the C3 latency to expidite policy. Enable 529 * checking of bus mastering status (bm_check) so we can 530 * use this in our C3 policy 531 */ 532 cx->valid = 1; 533 534 /* 535 * On older chipsets, BM_RLD needs to be set 536 * in order for Bus Master activity to wake the 537 * system from C3. Newer chipsets handle DMA 538 * during C3 automatically and BM_RLD is a NOP. 539 * In either case, the proper way to 540 * handle BM_RLD is to set it and leave it set. 541 */ 542 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 543 544 return; 545 } 546 547 static int acpi_processor_power_verify(struct acpi_processor *pr) 548 { 549 unsigned int i; 550 unsigned int working = 0; 551 552 pr->power.timer_broadcast_on_state = INT_MAX; 553 554 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 555 struct acpi_processor_cx *cx = &pr->power.states[i]; 556 557 switch (cx->type) { 558 case ACPI_STATE_C1: 559 cx->valid = 1; 560 break; 561 562 case ACPI_STATE_C2: 563 if (!cx->address) 564 break; 565 cx->valid = 1; 566 break; 567 568 case ACPI_STATE_C3: 569 acpi_processor_power_verify_c3(pr, cx); 570 break; 571 } 572 if (!cx->valid) 573 continue; 574 575 lapic_timer_check_state(i, pr, cx); 576 tsc_check_state(cx->type); 577 working++; 578 } 579 580 lapic_timer_propagate_broadcast(pr); 581 582 return (working); 583 } 584 585 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 586 { 587 unsigned int i; 588 int result; 589 590 591 /* NOTE: the idle thread may not be running while calling 592 * this function */ 593 594 /* Zero initialize all the C-states info. */ 595 memset(pr->power.states, 0, sizeof(pr->power.states)); 596 597 result = acpi_processor_get_power_info_cst(pr); 598 if (result == -ENODEV) 599 result = acpi_processor_get_power_info_fadt(pr); 600 601 if (result) 602 return result; 603 604 acpi_processor_get_power_info_default(pr); 605 606 pr->power.count = acpi_processor_power_verify(pr); 607 608 /* 609 * if one state of type C2 or C3 is available, mark this 610 * CPU as being "idle manageable" 611 */ 612 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 613 if (pr->power.states[i].valid) { 614 pr->power.count = i; 615 if (pr->power.states[i].type >= ACPI_STATE_C2) 616 pr->flags.power = 1; 617 } 618 } 619 620 return 0; 621 } 622 623 /** 624 * acpi_idle_bm_check - checks if bus master activity was detected 625 */ 626 static int acpi_idle_bm_check(void) 627 { 628 u32 bm_status = 0; 629 630 if (bm_check_disable) 631 return 0; 632 633 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 634 if (bm_status) 635 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 636 /* 637 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 638 * the true state of bus mastering activity; forcing us to 639 * manually check the BMIDEA bit of each IDE channel. 640 */ 641 else if (errata.piix4.bmisx) { 642 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 643 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 644 bm_status = 1; 645 } 646 return bm_status; 647 } 648 649 /** 650 * acpi_idle_do_entry - enter idle state using the appropriate method 651 * @cx: cstate data 652 * 653 * Caller disables interrupt before call and enables interrupt after return. 654 */ 655 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) 656 { 657 if (cx->entry_method == ACPI_CSTATE_FFH) { 658 /* Call into architectural FFH based C-state */ 659 acpi_processor_ffh_cstate_enter(cx); 660 } else if (cx->entry_method == ACPI_CSTATE_HALT) { 661 acpi_safe_halt(); 662 } else { 663 /* IO port based C-state */ 664 inb(cx->address); 665 /* Dummy wait op - must do something useless after P_LVL2 read 666 because chipsets cannot guarantee that STPCLK# signal 667 gets asserted in time to freeze execution properly. */ 668 inl(acpi_gbl_FADT.xpm_timer_block.address); 669 } 670 } 671 672 /** 673 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 674 * @dev: the target CPU 675 * @index: the index of suggested state 676 */ 677 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 678 { 679 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 680 681 ACPI_FLUSH_CPU_CACHE(); 682 683 while (1) { 684 685 if (cx->entry_method == ACPI_CSTATE_HALT) 686 safe_halt(); 687 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 688 inb(cx->address); 689 /* See comment in acpi_idle_do_entry() */ 690 inl(acpi_gbl_FADT.xpm_timer_block.address); 691 } else 692 return -ENODEV; 693 } 694 695 /* Never reached */ 696 return 0; 697 } 698 699 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) 700 { 701 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && 702 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); 703 } 704 705 static int c3_cpu_count; 706 static DEFINE_RAW_SPINLOCK(c3_lock); 707 708 /** 709 * acpi_idle_enter_bm - enters C3 with proper BM handling 710 * @pr: Target processor 711 * @cx: Target state context 712 * @timer_bc: Whether or not to change timer mode to broadcast 713 */ 714 static void acpi_idle_enter_bm(struct acpi_processor *pr, 715 struct acpi_processor_cx *cx, bool timer_bc) 716 { 717 acpi_unlazy_tlb(smp_processor_id()); 718 719 /* 720 * Must be done before busmaster disable as we might need to 721 * access HPET ! 722 */ 723 if (timer_bc) 724 lapic_timer_state_broadcast(pr, cx, 1); 725 726 /* 727 * disable bus master 728 * bm_check implies we need ARB_DIS 729 * bm_control implies whether we can do ARB_DIS 730 * 731 * That leaves a case where bm_check is set and bm_control is 732 * not set. In that case we cannot do much, we enter C3 733 * without doing anything. 734 */ 735 if (pr->flags.bm_control) { 736 raw_spin_lock(&c3_lock); 737 c3_cpu_count++; 738 /* Disable bus master arbitration when all CPUs are in C3 */ 739 if (c3_cpu_count == num_online_cpus()) 740 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 741 raw_spin_unlock(&c3_lock); 742 } 743 744 acpi_idle_do_entry(cx); 745 746 /* Re-enable bus master arbitration */ 747 if (pr->flags.bm_control) { 748 raw_spin_lock(&c3_lock); 749 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 750 c3_cpu_count--; 751 raw_spin_unlock(&c3_lock); 752 } 753 754 if (timer_bc) 755 lapic_timer_state_broadcast(pr, cx, 0); 756 } 757 758 static int acpi_idle_enter(struct cpuidle_device *dev, 759 struct cpuidle_driver *drv, int index) 760 { 761 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 762 struct acpi_processor *pr; 763 764 pr = __this_cpu_read(processors); 765 if (unlikely(!pr)) 766 return -EINVAL; 767 768 if (cx->type != ACPI_STATE_C1) { 769 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { 770 index = ACPI_IDLE_STATE_START; 771 cx = per_cpu(acpi_cstate[index], dev->cpu); 772 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { 773 if (cx->bm_sts_skip || !acpi_idle_bm_check()) { 774 acpi_idle_enter_bm(pr, cx, true); 775 return index; 776 } else if (drv->safe_state_index >= 0) { 777 index = drv->safe_state_index; 778 cx = per_cpu(acpi_cstate[index], dev->cpu); 779 } else { 780 acpi_safe_halt(); 781 return -EBUSY; 782 } 783 } 784 } 785 786 lapic_timer_state_broadcast(pr, cx, 1); 787 788 if (cx->type == ACPI_STATE_C3) 789 ACPI_FLUSH_CPU_CACHE(); 790 791 acpi_idle_do_entry(cx); 792 793 lapic_timer_state_broadcast(pr, cx, 0); 794 795 return index; 796 } 797 798 static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, 799 struct cpuidle_driver *drv, int index) 800 { 801 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 802 803 if (cx->type == ACPI_STATE_C3) { 804 struct acpi_processor *pr = __this_cpu_read(processors); 805 806 if (unlikely(!pr)) 807 return; 808 809 if (pr->flags.bm_check) { 810 acpi_idle_enter_bm(pr, cx, false); 811 return; 812 } else { 813 ACPI_FLUSH_CPU_CACHE(); 814 } 815 } 816 acpi_idle_do_entry(cx); 817 } 818 819 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 820 struct cpuidle_device *dev) 821 { 822 int i, count = ACPI_IDLE_STATE_START; 823 struct acpi_processor_cx *cx; 824 825 if (max_cstate == 0) 826 max_cstate = 1; 827 828 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 829 cx = &pr->power.states[i]; 830 831 if (!cx->valid) 832 continue; 833 834 per_cpu(acpi_cstate[count], dev->cpu) = cx; 835 836 count++; 837 if (count == CPUIDLE_STATE_MAX) 838 break; 839 } 840 841 if (!count) 842 return -EINVAL; 843 844 return 0; 845 } 846 847 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 848 { 849 int i, count; 850 struct acpi_processor_cx *cx; 851 struct cpuidle_state *state; 852 struct cpuidle_driver *drv = &acpi_idle_driver; 853 854 if (max_cstate == 0) 855 max_cstate = 1; 856 857 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) { 858 cpuidle_poll_state_init(drv); 859 count = 1; 860 } else { 861 count = 0; 862 } 863 864 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 865 cx = &pr->power.states[i]; 866 867 if (!cx->valid) 868 continue; 869 870 state = &drv->states[count]; 871 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 872 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 873 state->exit_latency = cx->latency; 874 state->target_residency = cx->latency * latency_factor; 875 state->enter = acpi_idle_enter; 876 877 state->flags = 0; 878 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { 879 state->enter_dead = acpi_idle_play_dead; 880 drv->safe_state_index = count; 881 } 882 /* 883 * Halt-induced C1 is not good for ->enter_s2idle, because it 884 * re-enables interrupts on exit. Moreover, C1 is generally not 885 * particularly interesting from the suspend-to-idle angle, so 886 * avoid C1 and the situations in which we may need to fall back 887 * to it altogether. 888 */ 889 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) 890 state->enter_s2idle = acpi_idle_enter_s2idle; 891 892 count++; 893 if (count == CPUIDLE_STATE_MAX) 894 break; 895 } 896 897 drv->state_count = count; 898 899 if (!count) 900 return -EINVAL; 901 902 return 0; 903 } 904 905 static inline void acpi_processor_cstate_first_run_checks(void) 906 { 907 acpi_status status; 908 static int first_run; 909 910 if (first_run) 911 return; 912 dmi_check_system(processor_power_dmi_table); 913 max_cstate = acpi_processor_cstate_check(max_cstate); 914 if (max_cstate < ACPI_C_STATES_MAX) 915 pr_notice("ACPI: processor limited to max C-state %d\n", 916 max_cstate); 917 first_run++; 918 919 if (acpi_gbl_FADT.cst_control && !nocst) { 920 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 921 acpi_gbl_FADT.cst_control, 8); 922 if (ACPI_FAILURE(status)) 923 ACPI_EXCEPTION((AE_INFO, status, 924 "Notifying BIOS of _CST ability failed")); 925 } 926 } 927 #else 928 929 static inline int disabled_by_idle_boot_param(void) { return 0; } 930 static inline void acpi_processor_cstate_first_run_checks(void) { } 931 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 932 { 933 return -ENODEV; 934 } 935 936 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 937 struct cpuidle_device *dev) 938 { 939 return -EINVAL; 940 } 941 942 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 943 { 944 return -EINVAL; 945 } 946 947 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 948 949 struct acpi_lpi_states_array { 950 unsigned int size; 951 unsigned int composite_states_size; 952 struct acpi_lpi_state *entries; 953 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER]; 954 }; 955 956 static int obj_get_integer(union acpi_object *obj, u32 *value) 957 { 958 if (obj->type != ACPI_TYPE_INTEGER) 959 return -EINVAL; 960 961 *value = obj->integer.value; 962 return 0; 963 } 964 965 static int acpi_processor_evaluate_lpi(acpi_handle handle, 966 struct acpi_lpi_states_array *info) 967 { 968 acpi_status status; 969 int ret = 0; 970 int pkg_count, state_idx = 1, loop; 971 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 972 union acpi_object *lpi_data; 973 struct acpi_lpi_state *lpi_state; 974 975 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer); 976 if (ACPI_FAILURE(status)) { 977 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n")); 978 return -ENODEV; 979 } 980 981 lpi_data = buffer.pointer; 982 983 /* There must be at least 4 elements = 3 elements + 1 package */ 984 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE || 985 lpi_data->package.count < 4) { 986 pr_debug("not enough elements in _LPI\n"); 987 ret = -ENODATA; 988 goto end; 989 } 990 991 pkg_count = lpi_data->package.elements[2].integer.value; 992 993 /* Validate number of power states. */ 994 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) { 995 pr_debug("count given by _LPI is not valid\n"); 996 ret = -ENODATA; 997 goto end; 998 } 999 1000 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL); 1001 if (!lpi_state) { 1002 ret = -ENOMEM; 1003 goto end; 1004 } 1005 1006 info->size = pkg_count; 1007 info->entries = lpi_state; 1008 1009 /* LPI States start at index 3 */ 1010 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) { 1011 union acpi_object *element, *pkg_elem, *obj; 1012 1013 element = &lpi_data->package.elements[loop]; 1014 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7) 1015 continue; 1016 1017 pkg_elem = element->package.elements; 1018 1019 obj = pkg_elem + 6; 1020 if (obj->type == ACPI_TYPE_BUFFER) { 1021 struct acpi_power_register *reg; 1022 1023 reg = (struct acpi_power_register *)obj->buffer.pointer; 1024 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 1025 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) 1026 continue; 1027 1028 lpi_state->address = reg->address; 1029 lpi_state->entry_method = 1030 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ? 1031 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO; 1032 } else if (obj->type == ACPI_TYPE_INTEGER) { 1033 lpi_state->entry_method = ACPI_CSTATE_INTEGER; 1034 lpi_state->address = obj->integer.value; 1035 } else { 1036 continue; 1037 } 1038 1039 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/ 1040 1041 obj = pkg_elem + 9; 1042 if (obj->type == ACPI_TYPE_STRING) 1043 strlcpy(lpi_state->desc, obj->string.pointer, 1044 ACPI_CX_DESC_LEN); 1045 1046 lpi_state->index = state_idx; 1047 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) { 1048 pr_debug("No min. residency found, assuming 10 us\n"); 1049 lpi_state->min_residency = 10; 1050 } 1051 1052 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) { 1053 pr_debug("No wakeup residency found, assuming 10 us\n"); 1054 lpi_state->wake_latency = 10; 1055 } 1056 1057 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags)) 1058 lpi_state->flags = 0; 1059 1060 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags)) 1061 lpi_state->arch_flags = 0; 1062 1063 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq)) 1064 lpi_state->res_cnt_freq = 1; 1065 1066 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state)) 1067 lpi_state->enable_parent_state = 0; 1068 } 1069 1070 acpi_handle_debug(handle, "Found %d power states\n", state_idx); 1071 end: 1072 kfree(buffer.pointer); 1073 return ret; 1074 } 1075 1076 /* 1077 * flat_state_cnt - the number of composite LPI states after the process of flattening 1078 */ 1079 static int flat_state_cnt; 1080 1081 /** 1082 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state 1083 * 1084 * @local: local LPI state 1085 * @parent: parent LPI state 1086 * @result: composite LPI state 1087 */ 1088 static bool combine_lpi_states(struct acpi_lpi_state *local, 1089 struct acpi_lpi_state *parent, 1090 struct acpi_lpi_state *result) 1091 { 1092 if (parent->entry_method == ACPI_CSTATE_INTEGER) { 1093 if (!parent->address) /* 0 means autopromotable */ 1094 return false; 1095 result->address = local->address + parent->address; 1096 } else { 1097 result->address = parent->address; 1098 } 1099 1100 result->min_residency = max(local->min_residency, parent->min_residency); 1101 result->wake_latency = local->wake_latency + parent->wake_latency; 1102 result->enable_parent_state = parent->enable_parent_state; 1103 result->entry_method = local->entry_method; 1104 1105 result->flags = parent->flags; 1106 result->arch_flags = parent->arch_flags; 1107 result->index = parent->index; 1108 1109 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN); 1110 strlcat(result->desc, "+", ACPI_CX_DESC_LEN); 1111 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); 1112 return true; 1113 } 1114 1115 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0) 1116 1117 static void stash_composite_state(struct acpi_lpi_states_array *curr_level, 1118 struct acpi_lpi_state *t) 1119 { 1120 curr_level->composite_states[curr_level->composite_states_size++] = t; 1121 } 1122 1123 static int flatten_lpi_states(struct acpi_processor *pr, 1124 struct acpi_lpi_states_array *curr_level, 1125 struct acpi_lpi_states_array *prev_level) 1126 { 1127 int i, j, state_count = curr_level->size; 1128 struct acpi_lpi_state *p, *t = curr_level->entries; 1129 1130 curr_level->composite_states_size = 0; 1131 for (j = 0; j < state_count; j++, t++) { 1132 struct acpi_lpi_state *flpi; 1133 1134 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED)) 1135 continue; 1136 1137 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) { 1138 pr_warn("Limiting number of LPI states to max (%d)\n", 1139 ACPI_PROCESSOR_MAX_POWER); 1140 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 1141 break; 1142 } 1143 1144 flpi = &pr->power.lpi_states[flat_state_cnt]; 1145 1146 if (!prev_level) { /* leaf/processor node */ 1147 memcpy(flpi, t, sizeof(*t)); 1148 stash_composite_state(curr_level, flpi); 1149 flat_state_cnt++; 1150 continue; 1151 } 1152 1153 for (i = 0; i < prev_level->composite_states_size; i++) { 1154 p = prev_level->composite_states[i]; 1155 if (t->index <= p->enable_parent_state && 1156 combine_lpi_states(p, t, flpi)) { 1157 stash_composite_state(curr_level, flpi); 1158 flat_state_cnt++; 1159 flpi++; 1160 } 1161 } 1162 } 1163 1164 kfree(curr_level->entries); 1165 return 0; 1166 } 1167 1168 static int acpi_processor_get_lpi_info(struct acpi_processor *pr) 1169 { 1170 int ret, i; 1171 acpi_status status; 1172 acpi_handle handle = pr->handle, pr_ahandle; 1173 struct acpi_device *d = NULL; 1174 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; 1175 1176 if (!osc_pc_lpi_support_confirmed) 1177 return -EOPNOTSUPP; 1178 1179 if (!acpi_has_method(handle, "_LPI")) 1180 return -EINVAL; 1181 1182 flat_state_cnt = 0; 1183 prev = &info[0]; 1184 curr = &info[1]; 1185 handle = pr->handle; 1186 ret = acpi_processor_evaluate_lpi(handle, prev); 1187 if (ret) 1188 return ret; 1189 flatten_lpi_states(pr, prev, NULL); 1190 1191 status = acpi_get_parent(handle, &pr_ahandle); 1192 while (ACPI_SUCCESS(status)) { 1193 acpi_bus_get_device(pr_ahandle, &d); 1194 handle = pr_ahandle; 1195 1196 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID)) 1197 break; 1198 1199 /* can be optional ? */ 1200 if (!acpi_has_method(handle, "_LPI")) 1201 break; 1202 1203 ret = acpi_processor_evaluate_lpi(handle, curr); 1204 if (ret) 1205 break; 1206 1207 /* flatten all the LPI states in this level of hierarchy */ 1208 flatten_lpi_states(pr, curr, prev); 1209 1210 tmp = prev, prev = curr, curr = tmp; 1211 1212 status = acpi_get_parent(handle, &pr_ahandle); 1213 } 1214 1215 pr->power.count = flat_state_cnt; 1216 /* reset the index after flattening */ 1217 for (i = 0; i < pr->power.count; i++) 1218 pr->power.lpi_states[i].index = i; 1219 1220 /* Tell driver that _LPI is supported. */ 1221 pr->flags.has_lpi = 1; 1222 pr->flags.power = 1; 1223 1224 return 0; 1225 } 1226 1227 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) 1228 { 1229 return -ENODEV; 1230 } 1231 1232 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) 1233 { 1234 return -ENODEV; 1235 } 1236 1237 /** 1238 * acpi_idle_lpi_enter - enters an ACPI any LPI state 1239 * @dev: the target CPU 1240 * @drv: cpuidle driver containing cpuidle state info 1241 * @index: index of target state 1242 * 1243 * Return: 0 for success or negative value for error 1244 */ 1245 static int acpi_idle_lpi_enter(struct cpuidle_device *dev, 1246 struct cpuidle_driver *drv, int index) 1247 { 1248 struct acpi_processor *pr; 1249 struct acpi_lpi_state *lpi; 1250 1251 pr = __this_cpu_read(processors); 1252 1253 if (unlikely(!pr)) 1254 return -EINVAL; 1255 1256 lpi = &pr->power.lpi_states[index]; 1257 if (lpi->entry_method == ACPI_CSTATE_FFH) 1258 return acpi_processor_ffh_lpi_enter(lpi); 1259 1260 return -EINVAL; 1261 } 1262 1263 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) 1264 { 1265 int i; 1266 struct acpi_lpi_state *lpi; 1267 struct cpuidle_state *state; 1268 struct cpuidle_driver *drv = &acpi_idle_driver; 1269 1270 if (!pr->flags.has_lpi) 1271 return -EOPNOTSUPP; 1272 1273 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) { 1274 lpi = &pr->power.lpi_states[i]; 1275 1276 state = &drv->states[i]; 1277 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); 1278 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); 1279 state->exit_latency = lpi->wake_latency; 1280 state->target_residency = lpi->min_residency; 1281 if (lpi->arch_flags) 1282 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 1283 state->enter = acpi_idle_lpi_enter; 1284 drv->safe_state_index = i; 1285 } 1286 1287 drv->state_count = i; 1288 1289 return 0; 1290 } 1291 1292 /** 1293 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle 1294 * global state data i.e. idle routines 1295 * 1296 * @pr: the ACPI processor 1297 */ 1298 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) 1299 { 1300 int i; 1301 struct cpuidle_driver *drv = &acpi_idle_driver; 1302 1303 if (!pr->flags.power_setup_done || !pr->flags.power) 1304 return -EINVAL; 1305 1306 drv->safe_state_index = -1; 1307 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) { 1308 drv->states[i].name[0] = '\0'; 1309 drv->states[i].desc[0] = '\0'; 1310 } 1311 1312 if (pr->flags.has_lpi) 1313 return acpi_processor_setup_lpi_states(pr); 1314 1315 return acpi_processor_setup_cstates(pr); 1316 } 1317 1318 /** 1319 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE 1320 * device i.e. per-cpu data 1321 * 1322 * @pr: the ACPI processor 1323 * @dev : the cpuidle device 1324 */ 1325 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, 1326 struct cpuidle_device *dev) 1327 { 1328 if (!pr->flags.power_setup_done || !pr->flags.power || !dev) 1329 return -EINVAL; 1330 1331 dev->cpu = pr->id; 1332 if (pr->flags.has_lpi) 1333 return acpi_processor_ffh_lpi_probe(pr->id); 1334 1335 return acpi_processor_setup_cpuidle_cx(pr, dev); 1336 } 1337 1338 static int acpi_processor_get_power_info(struct acpi_processor *pr) 1339 { 1340 int ret; 1341 1342 ret = acpi_processor_get_lpi_info(pr); 1343 if (ret) 1344 ret = acpi_processor_get_cstate_info(pr); 1345 1346 return ret; 1347 } 1348 1349 int acpi_processor_hotplug(struct acpi_processor *pr) 1350 { 1351 int ret = 0; 1352 struct cpuidle_device *dev; 1353 1354 if (disabled_by_idle_boot_param()) 1355 return 0; 1356 1357 if (!pr->flags.power_setup_done) 1358 return -ENODEV; 1359 1360 dev = per_cpu(acpi_cpuidle_device, pr->id); 1361 cpuidle_pause_and_lock(); 1362 cpuidle_disable_device(dev); 1363 ret = acpi_processor_get_power_info(pr); 1364 if (!ret && pr->flags.power) { 1365 acpi_processor_setup_cpuidle_dev(pr, dev); 1366 ret = cpuidle_enable_device(dev); 1367 } 1368 cpuidle_resume_and_unlock(); 1369 1370 return ret; 1371 } 1372 1373 int acpi_processor_power_state_has_changed(struct acpi_processor *pr) 1374 { 1375 int cpu; 1376 struct acpi_processor *_pr; 1377 struct cpuidle_device *dev; 1378 1379 if (disabled_by_idle_boot_param()) 1380 return 0; 1381 1382 if (!pr->flags.power_setup_done) 1383 return -ENODEV; 1384 1385 /* 1386 * FIXME: Design the ACPI notification to make it once per 1387 * system instead of once per-cpu. This condition is a hack 1388 * to make the code that updates C-States be called once. 1389 */ 1390 1391 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { 1392 1393 /* Protect against cpu-hotplug */ 1394 get_online_cpus(); 1395 cpuidle_pause_and_lock(); 1396 1397 /* Disable all cpuidle devices */ 1398 for_each_online_cpu(cpu) { 1399 _pr = per_cpu(processors, cpu); 1400 if (!_pr || !_pr->flags.power_setup_done) 1401 continue; 1402 dev = per_cpu(acpi_cpuidle_device, cpu); 1403 cpuidle_disable_device(dev); 1404 } 1405 1406 /* Populate Updated C-state information */ 1407 acpi_processor_get_power_info(pr); 1408 acpi_processor_setup_cpuidle_states(pr); 1409 1410 /* Enable all cpuidle devices */ 1411 for_each_online_cpu(cpu) { 1412 _pr = per_cpu(processors, cpu); 1413 if (!_pr || !_pr->flags.power_setup_done) 1414 continue; 1415 acpi_processor_get_power_info(_pr); 1416 if (_pr->flags.power) { 1417 dev = per_cpu(acpi_cpuidle_device, cpu); 1418 acpi_processor_setup_cpuidle_dev(_pr, dev); 1419 cpuidle_enable_device(dev); 1420 } 1421 } 1422 cpuidle_resume_and_unlock(); 1423 put_online_cpus(); 1424 } 1425 1426 return 0; 1427 } 1428 1429 static int acpi_processor_registered; 1430 1431 int acpi_processor_power_init(struct acpi_processor *pr) 1432 { 1433 int retval; 1434 struct cpuidle_device *dev; 1435 1436 if (disabled_by_idle_boot_param()) 1437 return 0; 1438 1439 acpi_processor_cstate_first_run_checks(); 1440 1441 if (!acpi_processor_get_power_info(pr)) 1442 pr->flags.power_setup_done = 1; 1443 1444 /* 1445 * Install the idle handler if processor power management is supported. 1446 * Note that we use previously set idle handler will be used on 1447 * platforms that only support C1. 1448 */ 1449 if (pr->flags.power) { 1450 /* Register acpi_idle_driver if not already registered */ 1451 if (!acpi_processor_registered) { 1452 acpi_processor_setup_cpuidle_states(pr); 1453 retval = cpuidle_register_driver(&acpi_idle_driver); 1454 if (retval) 1455 return retval; 1456 pr_debug("%s registered with cpuidle\n", 1457 acpi_idle_driver.name); 1458 } 1459 1460 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1461 if (!dev) 1462 return -ENOMEM; 1463 per_cpu(acpi_cpuidle_device, pr->id) = dev; 1464 1465 acpi_processor_setup_cpuidle_dev(pr, dev); 1466 1467 /* Register per-cpu cpuidle_device. Cpuidle driver 1468 * must already be registered before registering device 1469 */ 1470 retval = cpuidle_register_device(dev); 1471 if (retval) { 1472 if (acpi_processor_registered == 0) 1473 cpuidle_unregister_driver(&acpi_idle_driver); 1474 return retval; 1475 } 1476 acpi_processor_registered++; 1477 } 1478 return 0; 1479 } 1480 1481 int acpi_processor_power_exit(struct acpi_processor *pr) 1482 { 1483 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 1484 1485 if (disabled_by_idle_boot_param()) 1486 return 0; 1487 1488 if (pr->flags.power) { 1489 cpuidle_unregister_device(dev); 1490 acpi_processor_registered--; 1491 if (acpi_processor_registered == 0) 1492 cpuidle_unregister_driver(&acpi_idle_driver); 1493 } 1494 1495 pr->flags.power_setup_done = 0; 1496 return 0; 1497 } 1498