xref: /openbmc/linux/arch/arm64/kernel/idle.c (revision b5df5b83)
1*b5df5b83SMark Rutland // SPDX-License-Identifier: GPL-2.0-only
2*b5df5b83SMark Rutland /*
3*b5df5b83SMark Rutland  * Low-level idle sequences
4*b5df5b83SMark Rutland  */
5*b5df5b83SMark Rutland 
6*b5df5b83SMark Rutland #include <linux/cpu.h>
7*b5df5b83SMark Rutland #include <linux/irqflags.h>
8*b5df5b83SMark Rutland 
9*b5df5b83SMark Rutland #include <asm/arch_gicv3.h>
10*b5df5b83SMark Rutland #include <asm/barrier.h>
11*b5df5b83SMark Rutland #include <asm/cpufeature.h>
12*b5df5b83SMark Rutland #include <asm/sysreg.h>
13*b5df5b83SMark Rutland 
14*b5df5b83SMark Rutland static void noinstr __cpu_do_idle(void)
15*b5df5b83SMark Rutland {
16*b5df5b83SMark Rutland 	dsb(sy);
17*b5df5b83SMark Rutland 	wfi();
18*b5df5b83SMark Rutland }
19*b5df5b83SMark Rutland 
20*b5df5b83SMark Rutland static void noinstr __cpu_do_idle_irqprio(void)
21*b5df5b83SMark Rutland {
22*b5df5b83SMark Rutland 	unsigned long pmr;
23*b5df5b83SMark Rutland 	unsigned long daif_bits;
24*b5df5b83SMark Rutland 
25*b5df5b83SMark Rutland 	daif_bits = read_sysreg(daif);
26*b5df5b83SMark Rutland 	write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
27*b5df5b83SMark Rutland 
28*b5df5b83SMark Rutland 	/*
29*b5df5b83SMark Rutland 	 * Unmask PMR before going idle to make sure interrupts can
30*b5df5b83SMark Rutland 	 * be raised.
31*b5df5b83SMark Rutland 	 */
32*b5df5b83SMark Rutland 	pmr = gic_read_pmr();
33*b5df5b83SMark Rutland 	gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
34*b5df5b83SMark Rutland 
35*b5df5b83SMark Rutland 	__cpu_do_idle();
36*b5df5b83SMark Rutland 
37*b5df5b83SMark Rutland 	gic_write_pmr(pmr);
38*b5df5b83SMark Rutland 	write_sysreg(daif_bits, daif);
39*b5df5b83SMark Rutland }
40*b5df5b83SMark Rutland 
41*b5df5b83SMark Rutland /*
42*b5df5b83SMark Rutland  *	cpu_do_idle()
43*b5df5b83SMark Rutland  *
44*b5df5b83SMark Rutland  *	Idle the processor (wait for interrupt).
45*b5df5b83SMark Rutland  *
46*b5df5b83SMark Rutland  *	If the CPU supports priority masking we must do additional work to
47*b5df5b83SMark Rutland  *	ensure that interrupts are not masked at the PMR (because the core will
48*b5df5b83SMark Rutland  *	not wake up if we block the wake up signal in the interrupt controller).
49*b5df5b83SMark Rutland  */
50*b5df5b83SMark Rutland void noinstr cpu_do_idle(void)
51*b5df5b83SMark Rutland {
52*b5df5b83SMark Rutland 	if (system_uses_irq_prio_masking())
53*b5df5b83SMark Rutland 		__cpu_do_idle_irqprio();
54*b5df5b83SMark Rutland 	else
55*b5df5b83SMark Rutland 		__cpu_do_idle();
56*b5df5b83SMark Rutland }
57*b5df5b83SMark Rutland 
58*b5df5b83SMark Rutland /*
59*b5df5b83SMark Rutland  * This is our default idle handler.
60*b5df5b83SMark Rutland  */
61*b5df5b83SMark Rutland void noinstr arch_cpu_idle(void)
62*b5df5b83SMark Rutland {
63*b5df5b83SMark Rutland 	/*
64*b5df5b83SMark Rutland 	 * This should do all the clock switching and wait for interrupt
65*b5df5b83SMark Rutland 	 * tricks
66*b5df5b83SMark Rutland 	 */
67*b5df5b83SMark Rutland 	cpu_do_idle();
68*b5df5b83SMark Rutland 	raw_local_irq_enable();
69*b5df5b83SMark Rutland }
70