1 /* 2 * PowerNV cpuidle code 3 * 4 * Copyright 2015 IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/of.h> 16 #include <linux/device.h> 17 #include <linux/cpu.h> 18 19 #include <asm/firmware.h> 20 #include <asm/machdep.h> 21 #include <asm/opal.h> 22 #include <asm/cputhreads.h> 23 #include <asm/cpuidle.h> 24 #include <asm/code-patching.h> 25 #include <asm/smp.h> 26 #include <asm/runlatch.h> 27 28 #include "powernv.h" 29 #include "subcore.h" 30 31 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */ 32 #define MAX_STOP_STATE 0xF 33 34 #define P9_STOP_SPR_MSR 2000 35 #define P9_STOP_SPR_PSSCR 855 36 37 static u32 supported_cpuidle_states; 38 39 /* 40 * The default stop state that will be used by ppc_md.power_save 41 * function on platforms that support stop instruction. 42 */ 43 static u64 pnv_default_stop_val; 44 static u64 pnv_default_stop_mask; 45 static bool default_stop_found; 46 47 /* 48 * First deep stop state. Used to figure out when to save/restore 49 * hypervisor context. 50 */ 51 u64 pnv_first_deep_stop_state = MAX_STOP_STATE; 52 53 /* 54 * psscr value and mask of the deepest stop idle state. 55 * Used when a cpu is offlined. 56 */ 57 static u64 pnv_deepest_stop_psscr_val; 58 static u64 pnv_deepest_stop_psscr_mask; 59 static u64 pnv_deepest_stop_flag; 60 static bool deepest_stop_found; 61 62 static int pnv_save_sprs_for_deep_states(void) 63 { 64 int cpu; 65 int rc; 66 67 /* 68 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across 69 * all cpus at boot. Get these reg values of current cpu and use the 70 * same across all cpus. 71 */ 72 uint64_t lpcr_val = mfspr(SPRN_LPCR); 73 uint64_t hid0_val = mfspr(SPRN_HID0); 74 uint64_t hid1_val = mfspr(SPRN_HID1); 75 uint64_t hid4_val = mfspr(SPRN_HID4); 76 uint64_t hid5_val = mfspr(SPRN_HID5); 77 uint64_t hmeer_val = mfspr(SPRN_HMEER); 78 uint64_t msr_val = MSR_IDLE; 79 uint64_t psscr_val = pnv_deepest_stop_psscr_val; 80 81 for_each_possible_cpu(cpu) { 82 uint64_t pir = get_hard_smp_processor_id(cpu); 83 uint64_t hsprg0_val = (uint64_t)&paca[cpu]; 84 85 rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val); 86 if (rc != 0) 87 return rc; 88 89 rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); 90 if (rc != 0) 91 return rc; 92 93 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 94 rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val); 95 if (rc) 96 return rc; 97 98 rc = opal_slw_set_reg(pir, 99 P9_STOP_SPR_PSSCR, psscr_val); 100 101 if (rc) 102 return rc; 103 } 104 105 /* HIDs are per core registers */ 106 if (cpu_thread_in_core(cpu) == 0) { 107 108 rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val); 109 if (rc != 0) 110 return rc; 111 112 rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val); 113 if (rc != 0) 114 return rc; 115 116 /* Only p8 needs to set extra HID regiters */ 117 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 118 119 rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val); 120 if (rc != 0) 121 return rc; 122 123 rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val); 124 if (rc != 0) 125 return rc; 126 127 rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val); 128 if (rc != 0) 129 return rc; 130 } 131 } 132 } 133 134 return 0; 135 } 136 137 static void pnv_alloc_idle_core_states(void) 138 { 139 int i, j; 140 int nr_cores = cpu_nr_cores(); 141 u32 *core_idle_state; 142 143 /* 144 * core_idle_state - The lower 8 bits track the idle state of 145 * each thread of the core. 146 * 147 * The most significant bit is the lock bit. 148 * 149 * Initially all the bits corresponding to threads_per_core 150 * are set. They are cleared when the thread enters deep idle 151 * state like sleep and winkle/stop. 152 * 153 * Initially the lock bit is cleared. The lock bit has 2 154 * purposes: 155 * a. While the first thread in the core waking up from 156 * idle is restoring core state, it prevents other 157 * threads in the core from switching to process 158 * context. 159 * b. While the last thread in the core is saving the 160 * core state, it prevents a different thread from 161 * waking up. 162 */ 163 for (i = 0; i < nr_cores; i++) { 164 int first_cpu = i * threads_per_core; 165 int node = cpu_to_node(first_cpu); 166 size_t paca_ptr_array_size; 167 168 core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node); 169 *core_idle_state = (1 << threads_per_core) - 1; 170 paca_ptr_array_size = (threads_per_core * 171 sizeof(struct paca_struct *)); 172 173 for (j = 0; j < threads_per_core; j++) { 174 int cpu = first_cpu + j; 175 176 paca[cpu].core_idle_state_ptr = core_idle_state; 177 paca[cpu].thread_idle_state = PNV_THREAD_RUNNING; 178 paca[cpu].thread_mask = 1 << j; 179 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) 180 continue; 181 paca[cpu].thread_sibling_pacas = 182 kmalloc_node(paca_ptr_array_size, 183 GFP_KERNEL, node); 184 } 185 } 186 187 update_subcore_sibling_mask(); 188 189 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) { 190 int rc = pnv_save_sprs_for_deep_states(); 191 192 if (likely(!rc)) 193 return; 194 195 /* 196 * The stop-api is unable to restore hypervisor 197 * resources on wakeup from platform idle states which 198 * lose full context. So disable such states. 199 */ 200 supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT; 201 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n"); 202 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n"); 203 204 if (cpu_has_feature(CPU_FTR_ARCH_300) && 205 (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) { 206 /* 207 * Use the default stop state for CPU-Hotplug 208 * if available. 209 */ 210 if (default_stop_found) { 211 pnv_deepest_stop_psscr_val = 212 pnv_default_stop_val; 213 pnv_deepest_stop_psscr_mask = 214 pnv_default_stop_mask; 215 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n", 216 pnv_deepest_stop_psscr_val); 217 } else { /* Fallback to snooze loop for CPU-Hotplug */ 218 deepest_stop_found = false; 219 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n"); 220 } 221 } 222 } 223 } 224 225 u32 pnv_get_supported_cpuidle_states(void) 226 { 227 return supported_cpuidle_states; 228 } 229 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states); 230 231 static void pnv_fastsleep_workaround_apply(void *info) 232 233 { 234 int rc; 235 int *err = info; 236 237 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, 238 OPAL_CONFIG_IDLE_APPLY); 239 if (rc) 240 *err = 1; 241 } 242 243 /* 244 * Used to store fastsleep workaround state 245 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default) 246 * 1 - Workaround applied once, never undone. 247 */ 248 static u8 fastsleep_workaround_applyonce; 249 250 static ssize_t show_fastsleep_workaround_applyonce(struct device *dev, 251 struct device_attribute *attr, char *buf) 252 { 253 return sprintf(buf, "%u\n", fastsleep_workaround_applyonce); 254 } 255 256 static ssize_t store_fastsleep_workaround_applyonce(struct device *dev, 257 struct device_attribute *attr, const char *buf, 258 size_t count) 259 { 260 cpumask_t primary_thread_mask; 261 int err; 262 u8 val; 263 264 if (kstrtou8(buf, 0, &val) || val != 1) 265 return -EINVAL; 266 267 if (fastsleep_workaround_applyonce == 1) 268 return count; 269 270 /* 271 * fastsleep_workaround_applyonce = 1 implies 272 * fastsleep workaround needs to be left in 'applied' state on all 273 * the cores. Do this by- 274 * 1. Patching out the call to 'undo' workaround in fastsleep exit path 275 * 2. Sending ipi to all the cores which have at least one online thread 276 * 3. Patching out the call to 'apply' workaround in fastsleep entry 277 * path 278 * There is no need to send ipi to cores which have all threads 279 * offlined, as last thread of the core entering fastsleep or deeper 280 * state would have applied workaround. 281 */ 282 err = patch_instruction( 283 (unsigned int *)pnv_fastsleep_workaround_at_exit, 284 PPC_INST_NOP); 285 if (err) { 286 pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_exit"); 287 goto fail; 288 } 289 290 get_online_cpus(); 291 primary_thread_mask = cpu_online_cores_map(); 292 on_each_cpu_mask(&primary_thread_mask, 293 pnv_fastsleep_workaround_apply, 294 &err, 1); 295 put_online_cpus(); 296 if (err) { 297 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply"); 298 goto fail; 299 } 300 301 err = patch_instruction( 302 (unsigned int *)pnv_fastsleep_workaround_at_entry, 303 PPC_INST_NOP); 304 if (err) { 305 pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_entry"); 306 goto fail; 307 } 308 309 fastsleep_workaround_applyonce = 1; 310 311 return count; 312 fail: 313 return -EIO; 314 } 315 316 static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600, 317 show_fastsleep_workaround_applyonce, 318 store_fastsleep_workaround_applyonce); 319 320 static unsigned long __power7_idle_type(unsigned long type) 321 { 322 unsigned long srr1; 323 324 if (!prep_irq_for_idle_irqsoff()) 325 return 0; 326 327 __ppc64_runlatch_off(); 328 srr1 = power7_idle_insn(type); 329 __ppc64_runlatch_on(); 330 331 fini_irq_for_idle_irqsoff(); 332 333 return srr1; 334 } 335 336 void power7_idle_type(unsigned long type) 337 { 338 unsigned long srr1; 339 340 srr1 = __power7_idle_type(type); 341 irq_set_pending_from_srr1(srr1); 342 } 343 344 void power7_idle(void) 345 { 346 if (!powersave_nap) 347 return; 348 349 power7_idle_type(PNV_THREAD_NAP); 350 } 351 352 static unsigned long __power9_idle_type(unsigned long stop_psscr_val, 353 unsigned long stop_psscr_mask) 354 { 355 unsigned long psscr; 356 unsigned long srr1; 357 358 if (!prep_irq_for_idle_irqsoff()) 359 return 0; 360 361 psscr = mfspr(SPRN_PSSCR); 362 psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val; 363 364 __ppc64_runlatch_off(); 365 srr1 = power9_idle_stop(psscr); 366 __ppc64_runlatch_on(); 367 368 fini_irq_for_idle_irqsoff(); 369 370 return srr1; 371 } 372 373 void power9_idle_type(unsigned long stop_psscr_val, 374 unsigned long stop_psscr_mask) 375 { 376 unsigned long srr1; 377 378 srr1 = __power9_idle_type(stop_psscr_val, stop_psscr_mask); 379 irq_set_pending_from_srr1(srr1); 380 } 381 382 /* 383 * Used for ppc_md.power_save which needs a function with no parameters 384 */ 385 void power9_idle(void) 386 { 387 power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask); 388 } 389 390 #ifdef CONFIG_HOTPLUG_CPU 391 static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) 392 { 393 u64 pir = get_hard_smp_processor_id(cpu); 394 395 mtspr(SPRN_LPCR, lpcr_val); 396 opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); 397 } 398 399 /* 400 * pnv_cpu_offline: A function that puts the CPU into the deepest 401 * available platform idle state on a CPU-Offline. 402 * interrupts hard disabled and no lazy irq pending. 403 */ 404 unsigned long pnv_cpu_offline(unsigned int cpu) 405 { 406 unsigned long srr1; 407 u32 idle_states = pnv_get_supported_cpuidle_states(); 408 u64 lpcr_val; 409 410 /* 411 * We don't want to take decrementer interrupts while we are 412 * offline, so clear LPCR:PECE1. We keep PECE2 (and 413 * LPCR_PECE_HVEE on P9) enabled as to let IPIs in. 414 * 415 * If the CPU gets woken up by a special wakeup, ensure that 416 * the SLW engine sets LPCR with decrementer bit cleared, else 417 * the CPU will come back to the kernel due to a spurious 418 * wakeup. 419 */ 420 lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; 421 pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); 422 423 __ppc64_runlatch_off(); 424 425 if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) { 426 unsigned long psscr; 427 428 psscr = mfspr(SPRN_PSSCR); 429 psscr = (psscr & ~pnv_deepest_stop_psscr_mask) | 430 pnv_deepest_stop_psscr_val; 431 srr1 = power9_idle_stop(psscr); 432 433 } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) && 434 (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) { 435 srr1 = power7_idle_insn(PNV_THREAD_WINKLE); 436 } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || 437 (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { 438 srr1 = power7_idle_insn(PNV_THREAD_SLEEP); 439 } else if (idle_states & OPAL_PM_NAP_ENABLED) { 440 srr1 = power7_idle_insn(PNV_THREAD_NAP); 441 } else { 442 /* This is the fallback method. We emulate snooze */ 443 while (!generic_check_cpu_restart(cpu)) { 444 HMT_low(); 445 HMT_very_low(); 446 } 447 srr1 = 0; 448 HMT_medium(); 449 } 450 451 __ppc64_runlatch_on(); 452 453 /* 454 * Re-enable decrementer interrupts in LPCR. 455 * 456 * Further, we want stop states to be woken up by decrementer 457 * for non-hotplug cases. So program the LPCR via stop api as 458 * well. 459 */ 460 lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; 461 pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); 462 463 return srr1; 464 } 465 #endif 466 467 /* 468 * Power ISA 3.0 idle initialization. 469 * 470 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control 471 * Register (PSSCR) to control idle behavior. 472 * 473 * PSSCR layout: 474 * ---------------------------------------------------------- 475 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL | 476 * ---------------------------------------------------------- 477 * 0 4 41 42 43 44 48 54 56 60 478 * 479 * PSSCR key fields: 480 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the 481 * lowest power-saving state the thread entered since stop instruction was 482 * last executed. 483 * 484 * Bit 41 - Status Disable(SD) 485 * 0 - Shows PLS entries 486 * 1 - PLS entries are all 0 487 * 488 * Bit 42 - Enable State Loss 489 * 0 - No state is lost irrespective of other fields 490 * 1 - Allows state loss 491 * 492 * Bit 43 - Exit Criterion 493 * 0 - Exit from power-save mode on any interrupt 494 * 1 - Exit from power-save mode controlled by LPCR's PECE bits 495 * 496 * Bits 44:47 - Power-Saving Level Limit 497 * This limits the power-saving level that can be entered into. 498 * 499 * Bits 60:63 - Requested Level 500 * Used to specify which power-saving level must be entered on executing 501 * stop instruction 502 */ 503 504 int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags) 505 { 506 int err = 0; 507 508 /* 509 * psscr_mask == 0xf indicates an older firmware. 510 * Set remaining fields of psscr to the default values. 511 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL 512 */ 513 if (*psscr_mask == 0xf) { 514 *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL; 515 *psscr_mask = PSSCR_HV_DEFAULT_MASK; 516 return err; 517 } 518 519 /* 520 * New firmware is expected to set the psscr_val bits correctly. 521 * Validate that the following invariants are correctly maintained by 522 * the new firmware. 523 * - ESL bit value matches the EC bit value. 524 * - ESL bit is set for all the deep stop states. 525 */ 526 if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) { 527 err = ERR_EC_ESL_MISMATCH; 528 } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) && 529 GET_PSSCR_ESL(*psscr_val) == 0) { 530 err = ERR_DEEP_STATE_ESL_MISMATCH; 531 } 532 533 return err; 534 } 535 536 /* 537 * pnv_arch300_idle_init: Initializes the default idle state, first 538 * deep idle state and deepest idle state on 539 * ISA 3.0 CPUs. 540 * 541 * @np: /ibm,opal/power-mgt device node 542 * @flags: cpu-idle-state-flags array 543 * @dt_idle_states: Number of idle state entries 544 * Returns 0 on success 545 */ 546 static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags, 547 int dt_idle_states) 548 { 549 u64 *psscr_val = NULL; 550 u64 *psscr_mask = NULL; 551 u32 *residency_ns = NULL; 552 u64 max_residency_ns = 0; 553 int rc = 0, i; 554 555 psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val), GFP_KERNEL); 556 psscr_mask = kcalloc(dt_idle_states, sizeof(*psscr_mask), GFP_KERNEL); 557 residency_ns = kcalloc(dt_idle_states, sizeof(*residency_ns), 558 GFP_KERNEL); 559 560 if (!psscr_val || !psscr_mask || !residency_ns) { 561 rc = -1; 562 goto out; 563 } 564 565 if (of_property_read_u64_array(np, 566 "ibm,cpu-idle-state-psscr", 567 psscr_val, dt_idle_states)) { 568 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n"); 569 rc = -1; 570 goto out; 571 } 572 573 if (of_property_read_u64_array(np, 574 "ibm,cpu-idle-state-psscr-mask", 575 psscr_mask, dt_idle_states)) { 576 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n"); 577 rc = -1; 578 goto out; 579 } 580 581 if (of_property_read_u32_array(np, 582 "ibm,cpu-idle-state-residency-ns", 583 residency_ns, dt_idle_states)) { 584 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n"); 585 rc = -1; 586 goto out; 587 } 588 589 /* 590 * Set pnv_first_deep_stop_state, pnv_deepest_stop_psscr_{val,mask}, 591 * and the pnv_default_stop_{val,mask}. 592 * 593 * pnv_first_deep_stop_state should be set to the first stop 594 * level to cause hypervisor state loss. 595 * 596 * pnv_deepest_stop_{val,mask} should be set to values corresponding to 597 * the deepest stop state. 598 * 599 * pnv_default_stop_{val,mask} should be set to values corresponding to 600 * the shallowest (OPAL_PM_STOP_INST_FAST) loss-less stop state. 601 */ 602 pnv_first_deep_stop_state = MAX_STOP_STATE; 603 for (i = 0; i < dt_idle_states; i++) { 604 int err; 605 u64 psscr_rl = psscr_val[i] & PSSCR_RL_MASK; 606 607 if ((flags[i] & OPAL_PM_LOSE_FULL_CONTEXT) && 608 (pnv_first_deep_stop_state > psscr_rl)) 609 pnv_first_deep_stop_state = psscr_rl; 610 611 err = validate_psscr_val_mask(&psscr_val[i], &psscr_mask[i], 612 flags[i]); 613 if (err) { 614 report_invalid_psscr_val(psscr_val[i], err); 615 continue; 616 } 617 618 if (max_residency_ns < residency_ns[i]) { 619 max_residency_ns = residency_ns[i]; 620 pnv_deepest_stop_psscr_val = psscr_val[i]; 621 pnv_deepest_stop_psscr_mask = psscr_mask[i]; 622 pnv_deepest_stop_flag = flags[i]; 623 deepest_stop_found = true; 624 } 625 626 if (!default_stop_found && 627 (flags[i] & OPAL_PM_STOP_INST_FAST)) { 628 pnv_default_stop_val = psscr_val[i]; 629 pnv_default_stop_mask = psscr_mask[i]; 630 default_stop_found = true; 631 } 632 } 633 634 if (unlikely(!default_stop_found)) { 635 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n"); 636 } else { 637 ppc_md.power_save = power9_idle; 638 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n", 639 pnv_default_stop_val, pnv_default_stop_mask); 640 } 641 642 if (unlikely(!deepest_stop_found)) { 643 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait"); 644 } else { 645 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n", 646 pnv_deepest_stop_psscr_val, 647 pnv_deepest_stop_psscr_mask); 648 } 649 650 pr_info("cpuidle-powernv: Requested Level (RL) value of first deep stop = 0x%llx\n", 651 pnv_first_deep_stop_state); 652 out: 653 kfree(psscr_val); 654 kfree(psscr_mask); 655 kfree(residency_ns); 656 return rc; 657 } 658 659 /* 660 * Probe device tree for supported idle states 661 */ 662 static void __init pnv_probe_idle_states(void) 663 { 664 struct device_node *np; 665 int dt_idle_states; 666 u32 *flags = NULL; 667 int i; 668 669 np = of_find_node_by_path("/ibm,opal/power-mgt"); 670 if (!np) { 671 pr_warn("opal: PowerMgmt Node not found\n"); 672 goto out; 673 } 674 dt_idle_states = of_property_count_u32_elems(np, 675 "ibm,cpu-idle-state-flags"); 676 if (dt_idle_states < 0) { 677 pr_warn("cpuidle-powernv: no idle states found in the DT\n"); 678 goto out; 679 } 680 681 flags = kcalloc(dt_idle_states, sizeof(*flags), GFP_KERNEL); 682 683 if (of_property_read_u32_array(np, 684 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) { 685 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); 686 goto out; 687 } 688 689 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 690 if (pnv_power9_idle_init(np, flags, dt_idle_states)) 691 goto out; 692 } 693 694 for (i = 0; i < dt_idle_states; i++) 695 supported_cpuidle_states |= flags[i]; 696 697 out: 698 kfree(flags); 699 } 700 static int __init pnv_init_idle_states(void) 701 { 702 703 supported_cpuidle_states = 0; 704 705 if (cpuidle_disable != IDLE_NO_OVERRIDE) 706 goto out; 707 708 pnv_probe_idle_states(); 709 710 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { 711 patch_instruction( 712 (unsigned int *)pnv_fastsleep_workaround_at_entry, 713 PPC_INST_NOP); 714 patch_instruction( 715 (unsigned int *)pnv_fastsleep_workaround_at_exit, 716 PPC_INST_NOP); 717 } else { 718 /* 719 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that 720 * workaround is needed to use fastsleep. Provide sysfs 721 * control to choose how this workaround has to be applied. 722 */ 723 device_create_file(cpu_subsys.dev_root, 724 &dev_attr_fastsleep_workaround_applyonce); 725 } 726 727 pnv_alloc_idle_core_states(); 728 729 /* 730 * For each CPU, record its PACA address in each of it's 731 * sibling thread's PACA at the slot corresponding to this 732 * CPU's index in the core. 733 */ 734 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { 735 int cpu; 736 737 pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n"); 738 for_each_possible_cpu(cpu) { 739 int base_cpu = cpu_first_thread_sibling(cpu); 740 int idx = cpu_thread_in_core(cpu); 741 int i; 742 743 for (i = 0; i < threads_per_core; i++) { 744 int j = base_cpu + i; 745 746 paca[j].thread_sibling_pacas[idx] = &paca[cpu]; 747 } 748 } 749 } 750 751 if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) 752 ppc_md.power_save = power7_idle; 753 754 out: 755 return 0; 756 } 757 machine_subsys_initcall(powernv, pnv_init_idle_states); 758