12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
23179d37eSPaul Burton /*
33179d37eSPaul Burton * Copyright (C) 2014 Imagination Technologies
4fb615d61SPaul Burton * Author: Paul Burton <paul.burton@mips.com>
53179d37eSPaul Burton */
63179d37eSPaul Burton
7ba750502SPaul Burton #include <linux/cpuhotplug.h>
83179d37eSPaul Burton #include <linux/init.h>
93179d37eSPaul Burton #include <linux/percpu.h>
103179d37eSPaul Burton #include <linux/slab.h>
11b2ed33a8SMatt Redfearn #include <linux/suspend.h>
123179d37eSPaul Burton
133179d37eSPaul Burton #include <asm/asm-offsets.h>
143179d37eSPaul Burton #include <asm/cacheflush.h>
153179d37eSPaul Burton #include <asm/cacheops.h>
163179d37eSPaul Burton #include <asm/idle.h>
17e83f7e02SPaul Burton #include <asm/mips-cps.h>
183179d37eSPaul Burton #include <asm/mipsmtregs.h>
193179d37eSPaul Burton #include <asm/pm.h>
203179d37eSPaul Burton #include <asm/pm-cps.h>
213179d37eSPaul Burton #include <asm/smp-cps.h>
223179d37eSPaul Burton #include <asm/uasm.h>
233179d37eSPaul Burton
243179d37eSPaul Burton /*
253179d37eSPaul Burton * cps_nc_entry_fn - type of a generated non-coherent state entry function
263179d37eSPaul Burton * @online: the count of online coupled VPEs
273179d37eSPaul Burton * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
283179d37eSPaul Burton *
293179d37eSPaul Burton * The code entering & exiting non-coherent states is generated at runtime
303179d37eSPaul Burton * using uasm, in order to ensure that the compiler cannot insert a stray
313179d37eSPaul Burton * memory access at an unfortunate time and to allow the generation of optimal
323179d37eSPaul Burton * core-specific code particularly for cache routines. If coupled_coherence
333179d37eSPaul Burton * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
343179d37eSPaul Burton * returns the number of VPEs that were in the wait state at the point this
353179d37eSPaul Burton * VPE left it. Returns garbage if coupled_coherence is zero or this is not
363179d37eSPaul Burton * the entry function for CPS_PM_NC_WAIT.
373179d37eSPaul Burton */
383179d37eSPaul Burton typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
393179d37eSPaul Burton
403179d37eSPaul Burton /*
413179d37eSPaul Burton * The entry point of the generated non-coherent idle state entry/exit
423179d37eSPaul Burton * functions. Actually per-core rather than per-CPU.
433179d37eSPaul Burton */
443179d37eSPaul Burton static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
453179d37eSPaul Burton nc_asm_enter);
463179d37eSPaul Burton
473179d37eSPaul Burton /* Bitmap indicating which states are supported by the system */
48b7fc2cc5SPaul Burton static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
493179d37eSPaul Burton
503179d37eSPaul Burton /*
513179d37eSPaul Burton * Indicates the number of coupled VPEs ready to operate in a non-coherent
523179d37eSPaul Burton * state. Actually per-core rather than per-CPU.
533179d37eSPaul Burton */
543179d37eSPaul Burton static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
553179d37eSPaul Burton
563179d37eSPaul Burton /* Indicates online CPUs coupled with the current CPU */
573179d37eSPaul Burton static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
583179d37eSPaul Burton
593179d37eSPaul Burton /*
603179d37eSPaul Burton * Used to synchronize entry to deep idle states. Actually per-core rather
613179d37eSPaul Burton * than per-CPU.
623179d37eSPaul Burton */
633179d37eSPaul Burton static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
643179d37eSPaul Burton
653179d37eSPaul Burton /* Saved CPU state across the CPS_PM_POWER_GATED state */
663179d37eSPaul Burton DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
673179d37eSPaul Burton
683179d37eSPaul Burton /* A somewhat arbitrary number of labels & relocs for uasm */
69ba750502SPaul Burton static struct uasm_label labels[32];
70ba750502SPaul Burton static struct uasm_reloc relocs[32];
713179d37eSPaul Burton
723179d37eSPaul Burton enum mips_reg {
733179d37eSPaul Burton zero, at, v0, v1, a0, a1, a2, a3,
743179d37eSPaul Burton t0, t1, t2, t3, t4, t5, t6, t7,
753179d37eSPaul Burton s0, s1, s2, s3, s4, s5, s6, s7,
763179d37eSPaul Burton t8, t9, k0, k1, gp, sp, fp, ra,
773179d37eSPaul Burton };
783179d37eSPaul Burton
cps_pm_support_state(enum cps_pm_state state)793179d37eSPaul Burton bool cps_pm_support_state(enum cps_pm_state state)
803179d37eSPaul Burton {
813179d37eSPaul Burton return test_bit(state, state_support);
823179d37eSPaul Burton }
833179d37eSPaul Burton
coupled_barrier(atomic_t * a,unsigned online)843179d37eSPaul Burton static void coupled_barrier(atomic_t *a, unsigned online)
853179d37eSPaul Burton {
863179d37eSPaul Burton /*
873179d37eSPaul Burton * This function is effectively the same as
883179d37eSPaul Burton * cpuidle_coupled_parallel_barrier, which can't be used here since
893179d37eSPaul Burton * there's no cpuidle device.
903179d37eSPaul Burton */
913179d37eSPaul Burton
923179d37eSPaul Burton if (!coupled_coherence)
933179d37eSPaul Burton return;
943179d37eSPaul Burton
957c5491b8SPaul Burton smp_mb__before_atomic();
963179d37eSPaul Burton atomic_inc(a);
973179d37eSPaul Burton
983179d37eSPaul Burton while (atomic_read(a) < online)
993179d37eSPaul Burton cpu_relax();
1003179d37eSPaul Burton
1013179d37eSPaul Burton if (atomic_inc_return(a) == online * 2) {
1023179d37eSPaul Burton atomic_set(a, 0);
1033179d37eSPaul Burton return;
1043179d37eSPaul Burton }
1053179d37eSPaul Burton
1063179d37eSPaul Burton while (atomic_read(a) > online)
1073179d37eSPaul Burton cpu_relax();
1083179d37eSPaul Burton }
1093179d37eSPaul Burton
cps_pm_enter_state(enum cps_pm_state state)1103179d37eSPaul Burton int cps_pm_enter_state(enum cps_pm_state state)
1113179d37eSPaul Burton {
1123179d37eSPaul Burton unsigned cpu = smp_processor_id();
113f875a832SPaul Burton unsigned core = cpu_core(¤t_cpu_data);
1143179d37eSPaul Burton unsigned online, left;
1153179d37eSPaul Burton cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
1163179d37eSPaul Burton u32 *core_ready_count, *nc_core_ready_count;
1173179d37eSPaul Burton void *nc_addr;
1183179d37eSPaul Burton cps_nc_entry_fn entry;
1193179d37eSPaul Burton struct core_boot_config *core_cfg;
1203179d37eSPaul Burton struct vpe_boot_config *vpe_cfg;
1213179d37eSPaul Burton
1223179d37eSPaul Burton /* Check that there is an entry function for this state */
1233179d37eSPaul Burton entry = per_cpu(nc_asm_enter, core)[state];
1243179d37eSPaul Burton if (!entry)
1253179d37eSPaul Burton return -EINVAL;
1263179d37eSPaul Burton
1273179d37eSPaul Burton /* Calculate which coupled CPUs (VPEs) are online */
128929d4f51SMatt Redfearn #if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
1293179d37eSPaul Burton if (cpu_online(cpu)) {
1303179d37eSPaul Burton cpumask_and(coupled_mask, cpu_online_mask,
1313179d37eSPaul Burton &cpu_sibling_map[cpu]);
1323179d37eSPaul Burton online = cpumask_weight(coupled_mask);
1333179d37eSPaul Burton cpumask_clear_cpu(cpu, coupled_mask);
1343179d37eSPaul Burton } else
1353179d37eSPaul Burton #endif
1363179d37eSPaul Burton {
1373179d37eSPaul Burton cpumask_clear(coupled_mask);
1383179d37eSPaul Burton online = 1;
1393179d37eSPaul Burton }
1403179d37eSPaul Burton
1413179d37eSPaul Burton /* Setup the VPE to run mips_cps_pm_restore when started again */
14297f2645fSMasahiro Yamada if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
143064231e5SPaul Burton /* Power gating relies upon CPS SMP */
144064231e5SPaul Burton if (!mips_cps_smp_in_use())
145064231e5SPaul Burton return -EINVAL;
146064231e5SPaul Burton
1473179d37eSPaul Burton core_cfg = &mips_cps_core_bootcfg[core];
148c90e49f2SPaul Burton vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)];
1493179d37eSPaul Burton vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
1503179d37eSPaul Burton vpe_cfg->gp = (unsigned long)current_thread_info();
1513179d37eSPaul Burton vpe_cfg->sp = 0;
1523179d37eSPaul Burton }
1533179d37eSPaul Burton
1543179d37eSPaul Burton /* Indicate that this CPU might not be coherent */
1553179d37eSPaul Burton cpumask_clear_cpu(cpu, &cpu_coherent_mask);
1567c5491b8SPaul Burton smp_mb__after_atomic();
1573179d37eSPaul Burton
1583179d37eSPaul Burton /* Create a non-coherent mapping of the core ready_count */
1593179d37eSPaul Burton core_ready_count = per_cpu(ready_count, core);
1603179d37eSPaul Burton nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
1613179d37eSPaul Burton (unsigned long)core_ready_count);
1623179d37eSPaul Burton nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
1633179d37eSPaul Burton nc_core_ready_count = nc_addr;
1643179d37eSPaul Burton
1653179d37eSPaul Burton /* Ensure ready_count is zero-initialised before the assembly runs */
1666aa7de05SMark Rutland WRITE_ONCE(*nc_core_ready_count, 0);
1673179d37eSPaul Burton coupled_barrier(&per_cpu(pm_barrier, core), online);
1683179d37eSPaul Burton
1693179d37eSPaul Burton /* Run the generated entry code */
1703179d37eSPaul Burton left = entry(online, nc_core_ready_count);
1713179d37eSPaul Burton
1723179d37eSPaul Burton /* Remove the non-coherent mapping of ready_count */
1733179d37eSPaul Burton kunmap_noncoherent();
1743179d37eSPaul Burton
1753179d37eSPaul Burton /* Indicate that this CPU is definitely coherent */
1763179d37eSPaul Burton cpumask_set_cpu(cpu, &cpu_coherent_mask);
1773179d37eSPaul Burton
1783179d37eSPaul Burton /*
1793179d37eSPaul Burton * If this VPE is the first to leave the non-coherent wait state then
1803179d37eSPaul Burton * it needs to wake up any coupled VPEs still running their wait
1813179d37eSPaul Burton * instruction so that they return to cpuidle, which can then complete
1823179d37eSPaul Burton * coordination between the coupled VPEs & provide the governor with
1833179d37eSPaul Burton * a chance to reflect on the length of time the VPEs were in the
1843179d37eSPaul Burton * idle state.
1853179d37eSPaul Burton */
1863179d37eSPaul Burton if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
1873179d37eSPaul Burton arch_send_call_function_ipi_mask(coupled_mask);
1883179d37eSPaul Burton
1893179d37eSPaul Burton return 0;
1903179d37eSPaul Burton }
1913179d37eSPaul Burton
cps_gen_cache_routine(u32 ** pp,struct uasm_label ** pl,struct uasm_reloc ** pr,const struct cache_desc * cache,unsigned op,int lbl)192ba750502SPaul Burton static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
1933179d37eSPaul Burton struct uasm_reloc **pr,
1943179d37eSPaul Burton const struct cache_desc *cache,
1953179d37eSPaul Burton unsigned op, int lbl)
1963179d37eSPaul Burton {
1973179d37eSPaul Burton unsigned cache_size = cache->ways << cache->waybit;
1983179d37eSPaul Burton unsigned i;
1993179d37eSPaul Burton const unsigned unroll_lines = 32;
2003179d37eSPaul Burton
2013179d37eSPaul Burton /* If the cache isn't present this function has it easy */
2023179d37eSPaul Burton if (cache->flags & MIPS_CACHE_NOT_PRESENT)
2033179d37eSPaul Burton return;
2043179d37eSPaul Burton
2053179d37eSPaul Burton /* Load base address */
2063179d37eSPaul Burton UASM_i_LA(pp, t0, (long)CKSEG0);
2073179d37eSPaul Burton
2083179d37eSPaul Burton /* Calculate end address */
2093179d37eSPaul Burton if (cache_size < 0x8000)
2103179d37eSPaul Burton uasm_i_addiu(pp, t1, t0, cache_size);
2113179d37eSPaul Burton else
2123179d37eSPaul Burton UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
2133179d37eSPaul Burton
2143179d37eSPaul Burton /* Start of cache op loop */
2153179d37eSPaul Burton uasm_build_label(pl, *pp, lbl);
2163179d37eSPaul Burton
2173179d37eSPaul Burton /* Generate the cache ops */
2180f2a1484SMarkos Chandras for (i = 0; i < unroll_lines; i++) {
2190f2a1484SMarkos Chandras if (cpu_has_mips_r6) {
2200f2a1484SMarkos Chandras uasm_i_cache(pp, op, 0, t0);
2210f2a1484SMarkos Chandras uasm_i_addiu(pp, t0, t0, cache->linesz);
2220f2a1484SMarkos Chandras } else {
2233179d37eSPaul Burton uasm_i_cache(pp, op, i * cache->linesz, t0);
2240f2a1484SMarkos Chandras }
2250f2a1484SMarkos Chandras }
2263179d37eSPaul Burton
2270f2a1484SMarkos Chandras if (!cpu_has_mips_r6)
2283179d37eSPaul Burton /* Update the base address */
2293179d37eSPaul Burton uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
2303179d37eSPaul Burton
2313179d37eSPaul Burton /* Loop if we haven't reached the end address yet */
2323179d37eSPaul Burton uasm_il_bne(pp, pr, t0, t1, lbl);
2333179d37eSPaul Burton uasm_i_nop(pp);
2343179d37eSPaul Burton }
2353179d37eSPaul Burton
cps_gen_flush_fsb(u32 ** pp,struct uasm_label ** pl,struct uasm_reloc ** pr,const struct cpuinfo_mips * cpu_info,int lbl)236ba750502SPaul Burton static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
2373179d37eSPaul Burton struct uasm_reloc **pr,
2383179d37eSPaul Burton const struct cpuinfo_mips *cpu_info,
2393179d37eSPaul Burton int lbl)
2403179d37eSPaul Burton {
2413179d37eSPaul Burton unsigned i, fsb_size = 8;
2423179d37eSPaul Burton unsigned num_loads = (fsb_size * 3) / 2;
2433179d37eSPaul Burton unsigned line_stride = 2;
2443179d37eSPaul Burton unsigned line_size = cpu_info->dcache.linesz;
2453179d37eSPaul Burton unsigned perf_counter, perf_event;
2463179d37eSPaul Burton unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
2473179d37eSPaul Burton
2483179d37eSPaul Burton /*
2493179d37eSPaul Burton * Determine whether this CPU requires an FSB flush, and if so which
2503179d37eSPaul Burton * performance counter/event reflect stalls due to a full FSB.
2513179d37eSPaul Burton */
2523179d37eSPaul Burton switch (__get_cpu_type(cpu_info->cputype)) {
2533179d37eSPaul Burton case CPU_INTERAPTIV:
2543179d37eSPaul Burton perf_counter = 1;
2553179d37eSPaul Burton perf_event = 51;
2563179d37eSPaul Burton break;
2573179d37eSPaul Burton
2583179d37eSPaul Burton case CPU_PROAPTIV:
2593179d37eSPaul Burton /* Newer proAptiv cores don't require this workaround */
2603179d37eSPaul Burton if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
2613179d37eSPaul Burton return 0;
2623179d37eSPaul Burton
2633179d37eSPaul Burton /* On older ones it's unavailable */
2643179d37eSPaul Burton return -1;
2653179d37eSPaul Burton
2663179d37eSPaul Burton default:
267b97d0b90SMatt Redfearn /* Assume that the CPU does not need this workaround */
268b97d0b90SMatt Redfearn return 0;
2693179d37eSPaul Burton }
2703179d37eSPaul Burton
2713179d37eSPaul Burton /*
2723179d37eSPaul Burton * Ensure that the fill/store buffer (FSB) is not holding the results
2733179d37eSPaul Burton * of a prefetch, since if it is then the CPC sequencer may become
2743179d37eSPaul Burton * stuck in the D3 (ClrBus) state whilst entering a low power state.
2753179d37eSPaul Burton */
2763179d37eSPaul Burton
2773179d37eSPaul Burton /* Preserve perf counter setup */
2783179d37eSPaul Burton uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
2793179d37eSPaul Burton uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
2803179d37eSPaul Burton
2813179d37eSPaul Burton /* Setup perf counter to count FSB full pipeline stalls */
2823179d37eSPaul Burton uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
2833179d37eSPaul Burton uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
2843179d37eSPaul Burton uasm_i_ehb(pp);
2853179d37eSPaul Burton uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
2863179d37eSPaul Burton uasm_i_ehb(pp);
2873179d37eSPaul Burton
2883179d37eSPaul Burton /* Base address for loads */
2893179d37eSPaul Burton UASM_i_LA(pp, t0, (long)CKSEG0);
2903179d37eSPaul Burton
2913179d37eSPaul Burton /* Start of clear loop */
2923179d37eSPaul Burton uasm_build_label(pl, *pp, lbl);
2933179d37eSPaul Burton
2943179d37eSPaul Burton /* Perform some loads to fill the FSB */
2953179d37eSPaul Burton for (i = 0; i < num_loads; i++)
2963179d37eSPaul Burton uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
2973179d37eSPaul Burton
2983179d37eSPaul Burton /*
2993179d37eSPaul Burton * Invalidate the new D-cache entries so that the cache will need
3003179d37eSPaul Burton * refilling (via the FSB) if the loop is executed again.
3013179d37eSPaul Burton */
3023179d37eSPaul Burton for (i = 0; i < num_loads; i++) {
3033179d37eSPaul Burton uasm_i_cache(pp, Hit_Invalidate_D,
3043179d37eSPaul Burton i * line_size * line_stride, t0);
3053179d37eSPaul Burton uasm_i_cache(pp, Hit_Writeback_Inv_SD,
3063179d37eSPaul Burton i * line_size * line_stride, t0);
3073179d37eSPaul Burton }
3083179d37eSPaul Burton
309f6b43d93SMatt Redfearn /* Barrier ensuring previous cache invalidates are complete */
310*bf929272SPaul Burton uasm_i_sync(pp, __SYNC_full);
3113179d37eSPaul Burton uasm_i_ehb(pp);
3123179d37eSPaul Burton
3133179d37eSPaul Burton /* Check whether the pipeline stalled due to the FSB being full */
3143179d37eSPaul Burton uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
3153179d37eSPaul Burton
3163179d37eSPaul Burton /* Loop if it didn't */
3173179d37eSPaul Burton uasm_il_beqz(pp, pr, t1, lbl);
3183179d37eSPaul Burton uasm_i_nop(pp);
3193179d37eSPaul Burton
3203179d37eSPaul Burton /* Restore perf counter 1. The count may well now be wrong... */
3213179d37eSPaul Burton uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
3223179d37eSPaul Burton uasm_i_ehb(pp);
3233179d37eSPaul Burton uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
3243179d37eSPaul Burton uasm_i_ehb(pp);
3253179d37eSPaul Burton
3263179d37eSPaul Burton return 0;
3273179d37eSPaul Burton }
3283179d37eSPaul Burton
cps_gen_set_top_bit(u32 ** pp,struct uasm_label ** pl,struct uasm_reloc ** pr,unsigned r_addr,int lbl)329ba750502SPaul Burton static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
3303179d37eSPaul Burton struct uasm_reloc **pr,
3313179d37eSPaul Burton unsigned r_addr, int lbl)
3323179d37eSPaul Burton {
3333179d37eSPaul Burton uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
3343179d37eSPaul Burton uasm_build_label(pl, *pp, lbl);
3353179d37eSPaul Burton uasm_i_ll(pp, t1, 0, r_addr);
3363179d37eSPaul Burton uasm_i_or(pp, t1, t1, t0);
3373179d37eSPaul Burton uasm_i_sc(pp, t1, 0, r_addr);
3383179d37eSPaul Burton uasm_il_beqz(pp, pr, t1, lbl);
3393179d37eSPaul Burton uasm_i_nop(pp);
3403179d37eSPaul Burton }
3413179d37eSPaul Burton
cps_gen_entry_code(unsigned cpu,enum cps_pm_state state)342ba750502SPaul Burton static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
3433179d37eSPaul Burton {
3443179d37eSPaul Burton struct uasm_label *l = labels;
3453179d37eSPaul Burton struct uasm_reloc *r = relocs;
3463179d37eSPaul Burton u32 *buf, *p;
3473179d37eSPaul Burton const unsigned r_online = a0;
3483179d37eSPaul Burton const unsigned r_nc_count = a1;
3493179d37eSPaul Burton const unsigned r_pcohctl = t7;
3503179d37eSPaul Burton const unsigned max_instrs = 256;
3513179d37eSPaul Burton unsigned cpc_cmd;
3523179d37eSPaul Burton int err;
3533179d37eSPaul Burton enum {
3543179d37eSPaul Burton lbl_incready = 1,
3553179d37eSPaul Burton lbl_poll_cont,
3563179d37eSPaul Burton lbl_secondary_hang,
3573179d37eSPaul Burton lbl_disable_coherence,
3583179d37eSPaul Burton lbl_flush_fsb,
3593179d37eSPaul Burton lbl_invicache,
3603179d37eSPaul Burton lbl_flushdcache,
3613179d37eSPaul Burton lbl_hang,
3623179d37eSPaul Burton lbl_set_cont,
3633179d37eSPaul Burton lbl_secondary_cont,
3643179d37eSPaul Burton lbl_decready,
3653179d37eSPaul Burton };
3663179d37eSPaul Burton
3673179d37eSPaul Burton /* Allocate a buffer to hold the generated code */
3683179d37eSPaul Burton p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
3693179d37eSPaul Burton if (!buf)
3703179d37eSPaul Burton return NULL;
3713179d37eSPaul Burton
3723179d37eSPaul Burton /* Clear labels & relocs ready for (re)use */
3733179d37eSPaul Burton memset(labels, 0, sizeof(labels));
3743179d37eSPaul Burton memset(relocs, 0, sizeof(relocs));
3753179d37eSPaul Burton
37697f2645fSMasahiro Yamada if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
377064231e5SPaul Burton /* Power gating relies upon CPS SMP */
378064231e5SPaul Burton if (!mips_cps_smp_in_use())
379064231e5SPaul Burton goto out_err;
380064231e5SPaul Burton
3813179d37eSPaul Burton /*
3823179d37eSPaul Burton * Save CPU state. Note the non-standard calling convention
3833179d37eSPaul Burton * with the return address placed in v0 to avoid clobbering
3843179d37eSPaul Burton * the ra register before it is saved.
3853179d37eSPaul Burton */
3863179d37eSPaul Burton UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
3873179d37eSPaul Burton uasm_i_jalr(&p, v0, t0);
3883179d37eSPaul Burton uasm_i_nop(&p);
3893179d37eSPaul Burton }
3903179d37eSPaul Burton
3913179d37eSPaul Burton /*
3923179d37eSPaul Burton * Load addresses of required CM & CPC registers. This is done early
3933179d37eSPaul Burton * because they're needed in both the enable & disable coherence steps
3943179d37eSPaul Burton * but in the coupled case the enable step will only run on one VPE.
3953179d37eSPaul Burton */
3963179d37eSPaul Burton UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
3973179d37eSPaul Burton
3983179d37eSPaul Burton if (coupled_coherence) {
3993179d37eSPaul Burton /* Increment ready_count */
400*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_mb);
4013179d37eSPaul Burton uasm_build_label(&l, p, lbl_incready);
4023179d37eSPaul Burton uasm_i_ll(&p, t1, 0, r_nc_count);
4033179d37eSPaul Burton uasm_i_addiu(&p, t2, t1, 1);
4043179d37eSPaul Burton uasm_i_sc(&p, t2, 0, r_nc_count);
4053179d37eSPaul Burton uasm_il_beqz(&p, &r, t2, lbl_incready);
4063179d37eSPaul Burton uasm_i_addiu(&p, t1, t1, 1);
4073179d37eSPaul Burton
408f6b43d93SMatt Redfearn /* Barrier ensuring all CPUs see the updated r_nc_count value */
409*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_mb);
4103179d37eSPaul Burton
4113179d37eSPaul Burton /*
4123179d37eSPaul Burton * If this is the last VPE to become ready for non-coherence
4133179d37eSPaul Burton * then it should branch below.
4143179d37eSPaul Burton */
4153179d37eSPaul Burton uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
4163179d37eSPaul Burton uasm_i_nop(&p);
4173179d37eSPaul Burton
4183179d37eSPaul Burton if (state < CPS_PM_POWER_GATED) {
4193179d37eSPaul Burton /*
4203179d37eSPaul Burton * Otherwise this is not the last VPE to become ready
4213179d37eSPaul Burton * for non-coherence. It needs to wait until coherence
4223179d37eSPaul Burton * has been disabled before proceeding, which it will do
4233179d37eSPaul Burton * by polling for the top bit of ready_count being set.
4243179d37eSPaul Burton */
4253179d37eSPaul Burton uasm_i_addiu(&p, t1, zero, -1);
4263179d37eSPaul Burton uasm_build_label(&l, p, lbl_poll_cont);
4273179d37eSPaul Burton uasm_i_lw(&p, t0, 0, r_nc_count);
4283179d37eSPaul Burton uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
4293179d37eSPaul Burton uasm_i_ehb(&p);
430929d4f51SMatt Redfearn if (cpu_has_mipsmt)
4313179d37eSPaul Burton uasm_i_yield(&p, zero, t1);
4323179d37eSPaul Burton uasm_il_b(&p, &r, lbl_poll_cont);
4333179d37eSPaul Burton uasm_i_nop(&p);
4343179d37eSPaul Burton } else {
4353179d37eSPaul Burton /*
4363179d37eSPaul Burton * The core will lose power & this VPE will not continue
4373179d37eSPaul Burton * so it can simply halt here.
4383179d37eSPaul Burton */
439929d4f51SMatt Redfearn if (cpu_has_mipsmt) {
440929d4f51SMatt Redfearn /* Halt the VPE via C0 tchalt register */
4413179d37eSPaul Burton uasm_i_addiu(&p, t0, zero, TCHALT_H);
4423179d37eSPaul Burton uasm_i_mtc0(&p, t0, 2, 4);
443929d4f51SMatt Redfearn } else if (cpu_has_vp) {
444929d4f51SMatt Redfearn /* Halt the VP via the CPC VP_STOP register */
445929d4f51SMatt Redfearn unsigned int vpe_id;
446929d4f51SMatt Redfearn
447929d4f51SMatt Redfearn vpe_id = cpu_vpe_id(&cpu_data[cpu]);
448929d4f51SMatt Redfearn uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
449929d4f51SMatt Redfearn UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
450929d4f51SMatt Redfearn uasm_i_sw(&p, t0, 0, t1);
451929d4f51SMatt Redfearn } else {
452929d4f51SMatt Redfearn BUG();
453929d4f51SMatt Redfearn }
4543179d37eSPaul Burton uasm_build_label(&l, p, lbl_secondary_hang);
4553179d37eSPaul Burton uasm_il_b(&p, &r, lbl_secondary_hang);
4563179d37eSPaul Burton uasm_i_nop(&p);
4573179d37eSPaul Burton }
4583179d37eSPaul Burton }
4593179d37eSPaul Burton
4603179d37eSPaul Burton /*
4613179d37eSPaul Burton * This is the point of no return - this VPE will now proceed to
4623179d37eSPaul Burton * disable coherence. At this point we *must* be sure that no other
4633179d37eSPaul Burton * VPE within the core will interfere with the L1 dcache.
4643179d37eSPaul Burton */
4653179d37eSPaul Burton uasm_build_label(&l, p, lbl_disable_coherence);
4663179d37eSPaul Burton
4673179d37eSPaul Burton /* Invalidate the L1 icache */
4683179d37eSPaul Burton cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
4693179d37eSPaul Burton Index_Invalidate_I, lbl_invicache);
4703179d37eSPaul Burton
4713179d37eSPaul Burton /* Writeback & invalidate the L1 dcache */
4723179d37eSPaul Burton cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
4733179d37eSPaul Burton Index_Writeback_Inv_D, lbl_flushdcache);
4743179d37eSPaul Burton
475f6b43d93SMatt Redfearn /* Barrier ensuring previous cache invalidates are complete */
476*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_full);
4773179d37eSPaul Burton uasm_i_ehb(&p);
4783179d37eSPaul Burton
47977451997SMatt Redfearn if (mips_cm_revision() < CM_REV_CM3) {
4803179d37eSPaul Burton /*
48177451997SMatt Redfearn * Disable all but self interventions. The load from COHCTL is
48277451997SMatt Redfearn * defined by the interAptiv & proAptiv SUMs as ensuring that the
48377451997SMatt Redfearn * operation resulting from the preceding store is complete.
4843179d37eSPaul Burton */
485f875a832SPaul Burton uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
4863179d37eSPaul Burton uasm_i_sw(&p, t0, 0, r_pcohctl);
4873179d37eSPaul Burton uasm_i_lw(&p, t0, 0, r_pcohctl);
4883179d37eSPaul Burton
489f6b43d93SMatt Redfearn /* Barrier to ensure write to coherence control is complete */
490*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_full);
4913179d37eSPaul Burton uasm_i_ehb(&p);
49277451997SMatt Redfearn }
4933179d37eSPaul Burton
4943179d37eSPaul Burton /* Disable coherence */
4953179d37eSPaul Burton uasm_i_sw(&p, zero, 0, r_pcohctl);
4963179d37eSPaul Burton uasm_i_lw(&p, t0, 0, r_pcohctl);
4973179d37eSPaul Burton
4983179d37eSPaul Burton if (state >= CPS_PM_CLOCK_GATED) {
4993179d37eSPaul Burton err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
5003179d37eSPaul Burton lbl_flush_fsb);
5013179d37eSPaul Burton if (err)
5023179d37eSPaul Burton goto out_err;
5033179d37eSPaul Burton
5043179d37eSPaul Burton /* Determine the CPC command to issue */
5053179d37eSPaul Burton switch (state) {
5063179d37eSPaul Burton case CPS_PM_CLOCK_GATED:
5073179d37eSPaul Burton cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
5083179d37eSPaul Burton break;
5093179d37eSPaul Burton case CPS_PM_POWER_GATED:
5103179d37eSPaul Burton cpc_cmd = CPC_Cx_CMD_PWRDOWN;
5113179d37eSPaul Burton break;
5123179d37eSPaul Burton default:
5133179d37eSPaul Burton BUG();
5143179d37eSPaul Burton goto out_err;
5153179d37eSPaul Burton }
5163179d37eSPaul Burton
5173179d37eSPaul Burton /* Issue the CPC command */
5183179d37eSPaul Burton UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
5193179d37eSPaul Burton uasm_i_addiu(&p, t1, zero, cpc_cmd);
5203179d37eSPaul Burton uasm_i_sw(&p, t1, 0, t0);
5213179d37eSPaul Burton
5223179d37eSPaul Burton if (state == CPS_PM_POWER_GATED) {
5233179d37eSPaul Burton /* If anything goes wrong just hang */
5243179d37eSPaul Burton uasm_build_label(&l, p, lbl_hang);
5253179d37eSPaul Burton uasm_il_b(&p, &r, lbl_hang);
5263179d37eSPaul Burton uasm_i_nop(&p);
5273179d37eSPaul Burton
5283179d37eSPaul Burton /*
5293179d37eSPaul Burton * There's no point generating more code, the core is
5303179d37eSPaul Burton * powered down & if powered back up will run from the
5313179d37eSPaul Burton * reset vector not from here.
5323179d37eSPaul Burton */
5333179d37eSPaul Burton goto gen_done;
5343179d37eSPaul Burton }
5353179d37eSPaul Burton
536f6b43d93SMatt Redfearn /* Barrier to ensure write to CPC command is complete */
537*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_full);
5383179d37eSPaul Burton uasm_i_ehb(&p);
5393179d37eSPaul Burton }
5403179d37eSPaul Burton
5413179d37eSPaul Burton if (state == CPS_PM_NC_WAIT) {
5423179d37eSPaul Burton /*
5433179d37eSPaul Burton * At this point it is safe for all VPEs to proceed with
5443179d37eSPaul Burton * execution. This VPE will set the top bit of ready_count
5453179d37eSPaul Burton * to indicate to the other VPEs that they may continue.
5463179d37eSPaul Burton */
5473179d37eSPaul Burton if (coupled_coherence)
5483179d37eSPaul Burton cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
5493179d37eSPaul Burton lbl_set_cont);
5503179d37eSPaul Burton
5513179d37eSPaul Burton /*
5523179d37eSPaul Burton * VPEs which did not disable coherence will continue
5533179d37eSPaul Burton * executing, after coherence has been disabled, from this
5543179d37eSPaul Burton * point.
5553179d37eSPaul Burton */
5563179d37eSPaul Burton uasm_build_label(&l, p, lbl_secondary_cont);
5573179d37eSPaul Burton
5583179d37eSPaul Burton /* Now perform our wait */
5593179d37eSPaul Burton uasm_i_wait(&p, 0);
5603179d37eSPaul Burton }
5613179d37eSPaul Burton
5623179d37eSPaul Burton /*
5633179d37eSPaul Burton * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
5643179d37eSPaul Burton * will run this. The first will actually re-enable coherence & the
5653179d37eSPaul Burton * rest will just be performing a rather unusual nop.
5663179d37eSPaul Burton */
56777451997SMatt Redfearn uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
56893c5bba5SPaul Burton ? CM_GCR_Cx_COHERENCE_COHDOMAINEN
56993c5bba5SPaul Burton : CM3_GCR_Cx_COHERENCE_COHEN);
57077451997SMatt Redfearn
5713179d37eSPaul Burton uasm_i_sw(&p, t0, 0, r_pcohctl);
5723179d37eSPaul Burton uasm_i_lw(&p, t0, 0, r_pcohctl);
5733179d37eSPaul Burton
574f6b43d93SMatt Redfearn /* Barrier to ensure write to coherence control is complete */
575*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_full);
5763179d37eSPaul Burton uasm_i_ehb(&p);
5773179d37eSPaul Burton
5783179d37eSPaul Burton if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
5793179d37eSPaul Burton /* Decrement ready_count */
5803179d37eSPaul Burton uasm_build_label(&l, p, lbl_decready);
581*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_mb);
5823179d37eSPaul Burton uasm_i_ll(&p, t1, 0, r_nc_count);
5833179d37eSPaul Burton uasm_i_addiu(&p, t2, t1, -1);
5843179d37eSPaul Burton uasm_i_sc(&p, t2, 0, r_nc_count);
5853179d37eSPaul Burton uasm_il_beqz(&p, &r, t2, lbl_decready);
5863179d37eSPaul Burton uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
5873179d37eSPaul Burton
588f6b43d93SMatt Redfearn /* Barrier ensuring all CPUs see the updated r_nc_count value */
589*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_mb);
5903179d37eSPaul Burton }
5913179d37eSPaul Burton
5923179d37eSPaul Burton if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
5933179d37eSPaul Burton /*
5943179d37eSPaul Burton * At this point it is safe for all VPEs to proceed with
5953179d37eSPaul Burton * execution. This VPE will set the top bit of ready_count
5963179d37eSPaul Burton * to indicate to the other VPEs that they may continue.
5973179d37eSPaul Burton */
5983179d37eSPaul Burton cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
5993179d37eSPaul Burton
6003179d37eSPaul Burton /*
6013179d37eSPaul Burton * This core will be reliant upon another core sending a
6023179d37eSPaul Burton * power-up command to the CPC in order to resume operation.
6033179d37eSPaul Burton * Thus an arbitrary VPE can't trigger the core leaving the
6043179d37eSPaul Burton * idle state and the one that disables coherence might as well
6053179d37eSPaul Burton * be the one to re-enable it. The rest will continue from here
6063179d37eSPaul Burton * after that has been done.
6073179d37eSPaul Burton */
6083179d37eSPaul Burton uasm_build_label(&l, p, lbl_secondary_cont);
6093179d37eSPaul Burton
610f6b43d93SMatt Redfearn /* Barrier ensuring all CPUs see the updated r_nc_count value */
611*bf929272SPaul Burton uasm_i_sync(&p, __SYNC_mb);
6123179d37eSPaul Burton }
6133179d37eSPaul Burton
6143179d37eSPaul Burton /* The core is coherent, time to return to C code */
6153179d37eSPaul Burton uasm_i_jr(&p, ra);
6163179d37eSPaul Burton uasm_i_nop(&p);
6173179d37eSPaul Burton
6183179d37eSPaul Burton gen_done:
6193179d37eSPaul Burton /* Ensure the code didn't exceed the resources allocated for it */
6203179d37eSPaul Burton BUG_ON((p - buf) > max_instrs);
6213179d37eSPaul Burton BUG_ON((l - labels) > ARRAY_SIZE(labels));
6223179d37eSPaul Burton BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
6233179d37eSPaul Burton
6243179d37eSPaul Burton /* Patch branch offsets */
6253179d37eSPaul Burton uasm_resolve_relocs(relocs, labels);
6263179d37eSPaul Burton
6273179d37eSPaul Burton /* Flush the icache */
6283179d37eSPaul Burton local_flush_icache_range((unsigned long)buf, (unsigned long)p);
6293179d37eSPaul Burton
6303179d37eSPaul Burton return buf;
6313179d37eSPaul Burton out_err:
6323179d37eSPaul Burton kfree(buf);
6333179d37eSPaul Burton return NULL;
6343179d37eSPaul Burton }
6353179d37eSPaul Burton
cps_pm_online_cpu(unsigned int cpu)636ba750502SPaul Burton static int cps_pm_online_cpu(unsigned int cpu)
6373179d37eSPaul Burton {
6383179d37eSPaul Burton enum cps_pm_state state;
639f875a832SPaul Burton unsigned core = cpu_core(&cpu_data[cpu]);
6403179d37eSPaul Burton void *entry_fn, *core_rc;
6413179d37eSPaul Burton
6423179d37eSPaul Burton for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
6433179d37eSPaul Burton if (per_cpu(nc_asm_enter, core)[state])
6443179d37eSPaul Burton continue;
6453179d37eSPaul Burton if (!test_bit(state, state_support))
6463179d37eSPaul Burton continue;
6473179d37eSPaul Burton
6483179d37eSPaul Burton entry_fn = cps_gen_entry_code(cpu, state);
6493179d37eSPaul Burton if (!entry_fn) {
6503179d37eSPaul Burton pr_err("Failed to generate core %u state %u entry\n",
6513179d37eSPaul Burton core, state);
6523179d37eSPaul Burton clear_bit(state, state_support);
6533179d37eSPaul Burton }
6543179d37eSPaul Burton
6553179d37eSPaul Burton per_cpu(nc_asm_enter, core)[state] = entry_fn;
6563179d37eSPaul Burton }
6573179d37eSPaul Burton
6583179d37eSPaul Burton if (!per_cpu(ready_count, core)) {
659161c51ccSPaul Burton core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
6603179d37eSPaul Burton if (!core_rc) {
6613179d37eSPaul Burton pr_err("Failed allocate core %u ready_count\n", core);
6623179d37eSPaul Burton return -ENOMEM;
6633179d37eSPaul Burton }
6643179d37eSPaul Burton per_cpu(ready_count, core) = core_rc;
6653179d37eSPaul Burton }
6663179d37eSPaul Burton
6673179d37eSPaul Burton return 0;
6683179d37eSPaul Burton }
6693179d37eSPaul Burton
cps_pm_power_notifier(struct notifier_block * this,unsigned long event,void * ptr)670b2ed33a8SMatt Redfearn static int cps_pm_power_notifier(struct notifier_block *this,
671b2ed33a8SMatt Redfearn unsigned long event, void *ptr)
672b2ed33a8SMatt Redfearn {
673b2ed33a8SMatt Redfearn unsigned int stat;
674b2ed33a8SMatt Redfearn
675b2ed33a8SMatt Redfearn switch (event) {
676b2ed33a8SMatt Redfearn case PM_SUSPEND_PREPARE:
677b2ed33a8SMatt Redfearn stat = read_cpc_cl_stat_conf();
678b2ed33a8SMatt Redfearn /*
679b2ed33a8SMatt Redfearn * If we're attempting to suspend the system and power down all
680b2ed33a8SMatt Redfearn * of the cores, the JTAG detect bit indicates that the CPC will
681b2ed33a8SMatt Redfearn * instead put the cores into clock-off state. In this state
682b2ed33a8SMatt Redfearn * a connected debugger can cause the CPU to attempt
683b2ed33a8SMatt Redfearn * interactions with the powered down system. At best this will
684b2ed33a8SMatt Redfearn * fail. At worst, it can hang the NoC, requiring a hard reset.
685b2ed33a8SMatt Redfearn * To avoid this, just block system suspend if a JTAG probe
686b2ed33a8SMatt Redfearn * is detected.
687b2ed33a8SMatt Redfearn */
688b2ed33a8SMatt Redfearn if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) {
689b2ed33a8SMatt Redfearn pr_warn("JTAG probe is connected - abort suspend\n");
690b2ed33a8SMatt Redfearn return NOTIFY_BAD;
691b2ed33a8SMatt Redfearn }
692b2ed33a8SMatt Redfearn return NOTIFY_DONE;
693b2ed33a8SMatt Redfearn default:
694b2ed33a8SMatt Redfearn return NOTIFY_DONE;
695b2ed33a8SMatt Redfearn }
696b2ed33a8SMatt Redfearn }
697b2ed33a8SMatt Redfearn
cps_pm_init(void)6983179d37eSPaul Burton static int __init cps_pm_init(void)
6993179d37eSPaul Burton {
7003179d37eSPaul Burton /* A CM is required for all non-coherent states */
7013179d37eSPaul Burton if (!mips_cm_present()) {
7023179d37eSPaul Burton pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
703ba750502SPaul Burton return 0;
7043179d37eSPaul Burton }
7053179d37eSPaul Burton
7063179d37eSPaul Burton /*
7073179d37eSPaul Burton * If interrupts were enabled whilst running a wait instruction on a
7083179d37eSPaul Burton * non-coherent core then the VPE may end up processing interrupts
7093179d37eSPaul Burton * whilst non-coherent. That would be bad.
7103179d37eSPaul Burton */
7113179d37eSPaul Burton if (cpu_wait == r4k_wait_irqoff)
7123179d37eSPaul Burton set_bit(CPS_PM_NC_WAIT, state_support);
7133179d37eSPaul Burton else
7143179d37eSPaul Burton pr_warn("pm-cps: non-coherent wait unavailable\n");
7153179d37eSPaul Burton
7163179d37eSPaul Burton /* Detect whether a CPC is present */
7173179d37eSPaul Burton if (mips_cpc_present()) {
7183179d37eSPaul Burton /* Detect whether clock gating is implemented */
719829ca2beSPaul Burton if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL)
7203179d37eSPaul Burton set_bit(CPS_PM_CLOCK_GATED, state_support);
7213179d37eSPaul Burton else
7223179d37eSPaul Burton pr_warn("pm-cps: CPC does not support clock gating\n");
7233179d37eSPaul Burton
7243179d37eSPaul Burton /* Power gating is available with CPS SMP & any CPC */
7253179d37eSPaul Burton if (mips_cps_smp_in_use())
7263179d37eSPaul Burton set_bit(CPS_PM_POWER_GATED, state_support);
7273179d37eSPaul Burton else
7283179d37eSPaul Burton pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
7293179d37eSPaul Burton } else {
7303179d37eSPaul Burton pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
7313179d37eSPaul Burton }
7323179d37eSPaul Burton
733b2ed33a8SMatt Redfearn pm_notifier(cps_pm_power_notifier, 0);
734b2ed33a8SMatt Redfearn
73573c1b41eSThomas Gleixner return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
736ba750502SPaul Burton cps_pm_online_cpu, NULL);
7373179d37eSPaul Burton }
7383179d37eSPaul Burton arch_initcall(cps_pm_init);
739