12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
249f2ec91SRalf Baechle /*
349f2ec91SRalf Baechle * MIPS idle loop and WAIT instruction support.
449f2ec91SRalf Baechle *
549f2ec91SRalf Baechle * Copyright (C) xxxx the Anonymous
649f2ec91SRalf Baechle * Copyright (C) 1994 - 2006 Ralf Baechle
749f2ec91SRalf Baechle * Copyright (C) 2003, 2004 Maciej W. Rozycki
849f2ec91SRalf Baechle * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
949f2ec91SRalf Baechle */
1091955e3eSPaul Burton #include <linux/cpu.h>
1149f2ec91SRalf Baechle #include <linux/export.h>
1249f2ec91SRalf Baechle #include <linux/init.h>
1349f2ec91SRalf Baechle #include <linux/irqflags.h>
1449f2ec91SRalf Baechle #include <linux/printk.h>
1549f2ec91SRalf Baechle #include <linux/sched.h>
1649f2ec91SRalf Baechle #include <asm/cpu.h>
1749f2ec91SRalf Baechle #include <asm/cpu-info.h>
1869f24d17SRalf Baechle #include <asm/cpu-type.h>
19bdc92d74SRalf Baechle #include <asm/idle.h>
2049f2ec91SRalf Baechle #include <asm/mipsregs.h>
2149f2ec91SRalf Baechle
2249f2ec91SRalf Baechle /*
2349f2ec91SRalf Baechle * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
2449f2ec91SRalf Baechle * the implementation of the "wait" feature differs between CPU families. This
2549f2ec91SRalf Baechle * points to the function that implements CPU specific wait.
2649f2ec91SRalf Baechle * The wait instruction stops the pipeline and reduces the power consumption of
2749f2ec91SRalf Baechle * the CPU very much.
2849f2ec91SRalf Baechle */
2949f2ec91SRalf Baechle void (*cpu_wait)(void);
3049f2ec91SRalf Baechle EXPORT_SYMBOL(cpu_wait);
3149f2ec91SRalf Baechle
r3081_wait(void)3297c8580eSPaul Burton static void __cpuidle r3081_wait(void)
3349f2ec91SRalf Baechle {
3449f2ec91SRalf Baechle unsigned long cfg = read_c0_conf();
3549f2ec91SRalf Baechle write_c0_conf(cfg | R30XX_CONF_HALT);
3649f2ec91SRalf Baechle }
3749f2ec91SRalf Baechle
r4k_wait(void)3897c8580eSPaul Burton void __cpuidle r4k_wait(void)
39087d990bSRalf Baechle {
4058c644baSPeter Zijlstra raw_local_irq_enable();
41087d990bSRalf Baechle __r4k_wait();
4289b30987SPeter Zijlstra raw_local_irq_disable();
43087d990bSRalf Baechle }
44087d990bSRalf Baechle
4549f2ec91SRalf Baechle /*
4649f2ec91SRalf Baechle * This variant is preferable as it allows testing need_resched and going to
4749f2ec91SRalf Baechle * sleep depending on the outcome atomically. Unfortunately the "It is
4849f2ec91SRalf Baechle * implementation-dependent whether the pipeline restarts when a non-enabled
4949f2ec91SRalf Baechle * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
5049f2ec91SRalf Baechle * using this version a gamble.
5149f2ec91SRalf Baechle */
r4k_wait_irqoff(void)5297c8580eSPaul Burton void __cpuidle r4k_wait_irqoff(void)
5349f2ec91SRalf Baechle {
5449f2ec91SRalf Baechle if (!need_resched())
55f91a148aSRalf Baechle __asm__(
56f91a148aSRalf Baechle " .set push \n"
57a809d460SRalf Baechle " .set arch=r4000 \n"
5849f2ec91SRalf Baechle " wait \n"
5949f2ec91SRalf Baechle " .set pop \n");
6049f2ec91SRalf Baechle }
6149f2ec91SRalf Baechle
6249f2ec91SRalf Baechle /*
6349f2ec91SRalf Baechle * The RM7000 variant has to handle erratum 38. The workaround is to not
6449f2ec91SRalf Baechle * have any pending stores when the WAIT instruction is executed.
6549f2ec91SRalf Baechle */
rm7k_wait_irqoff(void)6697c8580eSPaul Burton static void __cpuidle rm7k_wait_irqoff(void)
6749f2ec91SRalf Baechle {
6849f2ec91SRalf Baechle if (!need_resched())
6949f2ec91SRalf Baechle __asm__(
7049f2ec91SRalf Baechle " .set push \n"
71a809d460SRalf Baechle " .set arch=r4000 \n"
7249f2ec91SRalf Baechle " .set noat \n"
7349f2ec91SRalf Baechle " mfc0 $1, $12 \n"
7449f2ec91SRalf Baechle " sync \n"
7549f2ec91SRalf Baechle " mtc0 $1, $12 # stalls until W stage \n"
7649f2ec91SRalf Baechle " wait \n"
7749f2ec91SRalf Baechle " mtc0 $1, $12 # stalls until W stage \n"
7849f2ec91SRalf Baechle " .set pop \n");
7949f2ec91SRalf Baechle }
8049f2ec91SRalf Baechle
8149f2ec91SRalf Baechle /*
82e63a24ddSManuel Lauss * Au1 'wait' is only useful when the 32kHz counter is used as timer,
83e63a24ddSManuel Lauss * since coreclock (and the cp0 counter) stops upon executing it. Only an
84e63a24ddSManuel Lauss * interrupt can wake it, so they must be enabled before entering idle modes.
8549f2ec91SRalf Baechle */
au1k_wait(void)8697c8580eSPaul Burton static void __cpuidle au1k_wait(void)
8749f2ec91SRalf Baechle {
88e63a24ddSManuel Lauss unsigned long c0status = read_c0_status() | 1; /* irqs on */
89e63a24ddSManuel Lauss
90f91a148aSRalf Baechle __asm__(
91378ed6f0SPaul Burton " .set push \n"
92a809d460SRalf Baechle " .set arch=r4000 \n"
9349f2ec91SRalf Baechle " cache 0x14, 0(%0) \n"
9449f2ec91SRalf Baechle " cache 0x14, 32(%0) \n"
9549f2ec91SRalf Baechle " sync \n"
96e63a24ddSManuel Lauss " mtc0 %1, $12 \n" /* wr c0status */
9749f2ec91SRalf Baechle " wait \n"
9849f2ec91SRalf Baechle " nop \n"
9949f2ec91SRalf Baechle " nop \n"
10049f2ec91SRalf Baechle " nop \n"
10149f2ec91SRalf Baechle " nop \n"
102378ed6f0SPaul Burton " .set pop \n"
103e63a24ddSManuel Lauss : : "r" (au1k_wait), "r" (c0status));
10489b30987SPeter Zijlstra
10589b30987SPeter Zijlstra raw_local_irq_disable();
10649f2ec91SRalf Baechle }
10749f2ec91SRalf Baechle
10849f2ec91SRalf Baechle static int __initdata nowait;
10949f2ec91SRalf Baechle
wait_disable(char * s)11049f2ec91SRalf Baechle static int __init wait_disable(char *s)
11149f2ec91SRalf Baechle {
11249f2ec91SRalf Baechle nowait = 1;
11349f2ec91SRalf Baechle
11449f2ec91SRalf Baechle return 1;
11549f2ec91SRalf Baechle }
11649f2ec91SRalf Baechle
11749f2ec91SRalf Baechle __setup("nowait", wait_disable);
11849f2ec91SRalf Baechle
check_wait(void)11949f2ec91SRalf Baechle void __init check_wait(void)
12049f2ec91SRalf Baechle {
12149f2ec91SRalf Baechle struct cpuinfo_mips *c = ¤t_cpu_data;
12249f2ec91SRalf Baechle
12349f2ec91SRalf Baechle if (nowait) {
12449f2ec91SRalf Baechle printk("Wait instruction disabled.\n");
12549f2ec91SRalf Baechle return;
12649f2ec91SRalf Baechle }
12749f2ec91SRalf Baechle
1285b10a0e8SPaul Burton /*
1295b10a0e8SPaul Burton * MIPSr6 specifies that masked interrupts should unblock an executing
1305b10a0e8SPaul Burton * wait instruction, and thus that it is safe for us to use
1315b10a0e8SPaul Burton * r4k_wait_irqoff. Yippee!
1325b10a0e8SPaul Burton */
1335b10a0e8SPaul Burton if (cpu_has_mips_r6) {
1345b10a0e8SPaul Burton cpu_wait = r4k_wait_irqoff;
1355b10a0e8SPaul Burton return;
1365b10a0e8SPaul Burton }
1375b10a0e8SPaul Burton
13869f24d17SRalf Baechle switch (current_cpu_type()) {
13949f2ec91SRalf Baechle case CPU_R3081:
14049f2ec91SRalf Baechle case CPU_R3081E:
14149f2ec91SRalf Baechle cpu_wait = r3081_wait;
14249f2ec91SRalf Baechle break;
14349f2ec91SRalf Baechle case CPU_R4200:
14465ce6197SLauri Kasanen /* case CPU_R4300: */
14549f2ec91SRalf Baechle case CPU_R4600:
14649f2ec91SRalf Baechle case CPU_R4640:
14749f2ec91SRalf Baechle case CPU_R4650:
14849f2ec91SRalf Baechle case CPU_R4700:
14949f2ec91SRalf Baechle case CPU_R5000:
15049f2ec91SRalf Baechle case CPU_R5500:
15149f2ec91SRalf Baechle case CPU_NEVADA:
15249f2ec91SRalf Baechle case CPU_4KC:
15349f2ec91SRalf Baechle case CPU_4KEC:
15449f2ec91SRalf Baechle case CPU_4KSC:
15549f2ec91SRalf Baechle case CPU_5KC:
156bf463f2fSAurelien Jarno case CPU_5KE:
15749f2ec91SRalf Baechle case CPU_25KF:
15849f2ec91SRalf Baechle case CPU_PR4450:
15949f2ec91SRalf Baechle case CPU_BMIPS3300:
16049f2ec91SRalf Baechle case CPU_BMIPS4350:
16149f2ec91SRalf Baechle case CPU_BMIPS4380:
16249f2ec91SRalf Baechle case CPU_CAVIUM_OCTEON:
16349f2ec91SRalf Baechle case CPU_CAVIUM_OCTEON_PLUS:
16449f2ec91SRalf Baechle case CPU_CAVIUM_OCTEON2:
1654122af0aSDavid Daney case CPU_CAVIUM_OCTEON3:
1663b25b763SPaul Cercueil case CPU_XBURST:
167b2afb64cSHuacai Chen case CPU_LOONGSON32:
16849f2ec91SRalf Baechle cpu_wait = r4k_wait;
16949f2ec91SRalf Baechle break;
170268a2d60SJiaxun Yang case CPU_LOONGSON64:
1717507445bSHuacai Chen if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
1720cf2ea11SJiaxun Yang (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
1730cf2ea11SJiaxun Yang (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
174b2edcfc8SHuacai Chen cpu_wait = r4k_wait;
175b2edcfc8SHuacai Chen break;
176b2edcfc8SHuacai Chen
177adaa0b6cSPetri Gynther case CPU_BMIPS5000:
178adaa0b6cSPetri Gynther cpu_wait = r4k_wait_irqoff;
179adaa0b6cSPetri Gynther break;
18049f2ec91SRalf Baechle case CPU_RM7000:
18149f2ec91SRalf Baechle cpu_wait = rm7k_wait_irqoff;
18249f2ec91SRalf Baechle break;
18349f2ec91SRalf Baechle
184e38df288SJames Hogan case CPU_PROAPTIV:
185e38df288SJames Hogan case CPU_P5600:
186e38df288SJames Hogan /*
187e38df288SJames Hogan * Incoming Fast Debug Channel (FDC) data during a wait
188e38df288SJames Hogan * instruction causes the wait never to resume, even if an
189e38df288SJames Hogan * interrupt is received. Avoid using wait at all if FDC data is
190e38df288SJames Hogan * likely to be received.
191e38df288SJames Hogan */
192e38df288SJames Hogan if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
193e38df288SJames Hogan break;
194c9b02990SLiangliang Huang fallthrough;
19549f2ec91SRalf Baechle case CPU_M14KC:
19649f2ec91SRalf Baechle case CPU_M14KEC:
19749f2ec91SRalf Baechle case CPU_24K:
19849f2ec91SRalf Baechle case CPU_34K:
19949f2ec91SRalf Baechle case CPU_1004K:
200442e14a2SSteven J. Hill case CPU_1074K:
20126ab96dfSLeonid Yegoshin case CPU_INTERAPTIV:
202f36c4720SLeonid Yegoshin case CPU_M5150:
2034695089fSLeonid Yegoshin case CPU_QEMU_GENERIC:
20449f2ec91SRalf Baechle cpu_wait = r4k_wait;
20549f2ec91SRalf Baechle if (read_c0_config7() & MIPS_CONF7_WII)
20649f2ec91SRalf Baechle cpu_wait = r4k_wait_irqoff;
20749f2ec91SRalf Baechle break;
20849f2ec91SRalf Baechle
20949f2ec91SRalf Baechle case CPU_74K:
21049f2ec91SRalf Baechle cpu_wait = r4k_wait;
21149f2ec91SRalf Baechle if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
21249f2ec91SRalf Baechle cpu_wait = r4k_wait_irqoff;
21349f2ec91SRalf Baechle break;
21449f2ec91SRalf Baechle
21549f2ec91SRalf Baechle case CPU_TX49XX:
21649f2ec91SRalf Baechle cpu_wait = r4k_wait_irqoff;
21749f2ec91SRalf Baechle break;
21849f2ec91SRalf Baechle case CPU_ALCHEMY:
21949f2ec91SRalf Baechle cpu_wait = au1k_wait;
22049f2ec91SRalf Baechle break;
22149f2ec91SRalf Baechle case CPU_20KC:
22249f2ec91SRalf Baechle /*
22349f2ec91SRalf Baechle * WAIT on Rev1.0 has E1, E2, E3 and E16.
22449f2ec91SRalf Baechle * WAIT on Rev2.0 and Rev3.0 has E16.
22549f2ec91SRalf Baechle * Rev3.1 WAIT is nop, why bother
22649f2ec91SRalf Baechle */
22749f2ec91SRalf Baechle if ((c->processor_id & 0xff) <= 0x64)
22849f2ec91SRalf Baechle break;
22949f2ec91SRalf Baechle
23049f2ec91SRalf Baechle /*
23194bd83e4SJulia Lawall * Another rev is incrementing c0_count at a reduced clock
23249f2ec91SRalf Baechle * rate while in WAIT mode. So we basically have the choice
23349f2ec91SRalf Baechle * between using the cp0 timer as clocksource or avoiding
23449f2ec91SRalf Baechle * the WAIT instruction. Until more details are known,
23549f2ec91SRalf Baechle * disable the use of WAIT for 20Kc entirely.
23649f2ec91SRalf Baechle cpu_wait = r4k_wait;
23749f2ec91SRalf Baechle */
23849f2ec91SRalf Baechle break;
23949f2ec91SRalf Baechle default:
24049f2ec91SRalf Baechle break;
24149f2ec91SRalf Baechle }
24249f2ec91SRalf Baechle }
24349f2ec91SRalf Baechle
arch_cpu_idle(void)244*26388a7cSPeter Zijlstra __cpuidle void arch_cpu_idle(void)
24500baf857SRalf Baechle {
24649f2ec91SRalf Baechle if (cpu_wait)
247c9b6869dSRalf Baechle cpu_wait();
24849f2ec91SRalf Baechle }
249da9f970fSPaul Burton
250da9f970fSPaul Burton #ifdef CONFIG_CPU_IDLE
251da9f970fSPaul Burton
mips_cpuidle_wait_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)252*26388a7cSPeter Zijlstra __cpuidle int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
253da9f970fSPaul Burton struct cpuidle_driver *drv, int index)
254da9f970fSPaul Burton {
255da9f970fSPaul Burton arch_cpu_idle();
256da9f970fSPaul Burton return index;
257da9f970fSPaul Burton }
258da9f970fSPaul Burton
259da9f970fSPaul Burton #endif
260