xref: /openbmc/linux/arch/mips/kernel/idle.c (revision 1fe3a33b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MIPS idle loop and WAIT instruction support.
4  *
5  * Copyright (C) xxxx  the Anonymous
6  * Copyright (C) 1994 - 2006 Ralf Baechle
7  * Copyright (C) 2003, 2004  Maciej W. Rozycki
8  * Copyright (C) 2001, 2004, 2011, 2012	 MIPS Technologies, Inc.
9  */
10 #include <linux/cpu.h>
11 #include <linux/export.h>
12 #include <linux/init.h>
13 #include <linux/irqflags.h>
14 #include <linux/printk.h>
15 #include <linux/sched.h>
16 #include <asm/cpu.h>
17 #include <asm/cpu-info.h>
18 #include <asm/cpu-type.h>
19 #include <asm/idle.h>
20 #include <asm/mipsregs.h>
21 
22 /*
23  * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
24  * the implementation of the "wait" feature differs between CPU families. This
25  * points to the function that implements CPU specific wait.
26  * The wait instruction stops the pipeline and reduces the power consumption of
27  * the CPU very much.
28  */
29 void (*cpu_wait)(void);
30 EXPORT_SYMBOL(cpu_wait);
31 
32 static void __cpuidle r3081_wait(void)
33 {
34 	unsigned long cfg = read_c0_conf();
35 	write_c0_conf(cfg | R30XX_CONF_HALT);
36 	raw_local_irq_enable();
37 }
38 
39 static void __cpuidle r39xx_wait(void)
40 {
41 	if (!need_resched())
42 		write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
43 	raw_local_irq_enable();
44 }
45 
46 void __cpuidle r4k_wait(void)
47 {
48 	raw_local_irq_enable();
49 	__r4k_wait();
50 }
51 
52 /*
53  * This variant is preferable as it allows testing need_resched and going to
54  * sleep depending on the outcome atomically.  Unfortunately the "It is
55  * implementation-dependent whether the pipeline restarts when a non-enabled
56  * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
57  * using this version a gamble.
58  */
59 void __cpuidle r4k_wait_irqoff(void)
60 {
61 	if (!need_resched())
62 		__asm__(
63 		"	.set	push		\n"
64 		"	.set	arch=r4000	\n"
65 		"	wait			\n"
66 		"	.set	pop		\n");
67 	raw_local_irq_enable();
68 }
69 
70 /*
71  * The RM7000 variant has to handle erratum 38.	 The workaround is to not
72  * have any pending stores when the WAIT instruction is executed.
73  */
74 static void __cpuidle rm7k_wait_irqoff(void)
75 {
76 	if (!need_resched())
77 		__asm__(
78 		"	.set	push					\n"
79 		"	.set	arch=r4000				\n"
80 		"	.set	noat					\n"
81 		"	mfc0	$1, $12					\n"
82 		"	sync						\n"
83 		"	mtc0	$1, $12		# stalls until W stage	\n"
84 		"	wait						\n"
85 		"	mtc0	$1, $12		# stalls until W stage	\n"
86 		"	.set	pop					\n");
87 	raw_local_irq_enable();
88 }
89 
90 /*
91  * Au1 'wait' is only useful when the 32kHz counter is used as timer,
92  * since coreclock (and the cp0 counter) stops upon executing it. Only an
93  * interrupt can wake it, so they must be enabled before entering idle modes.
94  */
95 static void __cpuidle au1k_wait(void)
96 {
97 	unsigned long c0status = read_c0_status() | 1;	/* irqs on */
98 
99 	__asm__(
100 	"	.set	push			\n"
101 	"	.set	arch=r4000		\n"
102 	"	cache	0x14, 0(%0)		\n"
103 	"	cache	0x14, 32(%0)		\n"
104 	"	sync				\n"
105 	"	mtc0	%1, $12			\n" /* wr c0status */
106 	"	wait				\n"
107 	"	nop				\n"
108 	"	nop				\n"
109 	"	nop				\n"
110 	"	nop				\n"
111 	"	.set	pop			\n"
112 	: : "r" (au1k_wait), "r" (c0status));
113 }
114 
115 static int __initdata nowait;
116 
117 static int __init wait_disable(char *s)
118 {
119 	nowait = 1;
120 
121 	return 1;
122 }
123 
124 __setup("nowait", wait_disable);
125 
126 void __init check_wait(void)
127 {
128 	struct cpuinfo_mips *c = &current_cpu_data;
129 
130 	if (nowait) {
131 		printk("Wait instruction disabled.\n");
132 		return;
133 	}
134 
135 	/*
136 	 * MIPSr6 specifies that masked interrupts should unblock an executing
137 	 * wait instruction, and thus that it is safe for us to use
138 	 * r4k_wait_irqoff. Yippee!
139 	 */
140 	if (cpu_has_mips_r6) {
141 		cpu_wait = r4k_wait_irqoff;
142 		return;
143 	}
144 
145 	switch (current_cpu_type()) {
146 	case CPU_R3081:
147 	case CPU_R3081E:
148 		cpu_wait = r3081_wait;
149 		break;
150 	case CPU_TX3927:
151 		cpu_wait = r39xx_wait;
152 		break;
153 	case CPU_R4200:
154 /*	case CPU_R4300: */
155 	case CPU_R4600:
156 	case CPU_R4640:
157 	case CPU_R4650:
158 	case CPU_R4700:
159 	case CPU_R5000:
160 	case CPU_R5500:
161 	case CPU_NEVADA:
162 	case CPU_4KC:
163 	case CPU_4KEC:
164 	case CPU_4KSC:
165 	case CPU_5KC:
166 	case CPU_5KE:
167 	case CPU_25KF:
168 	case CPU_PR4450:
169 	case CPU_BMIPS3300:
170 	case CPU_BMIPS4350:
171 	case CPU_BMIPS4380:
172 	case CPU_CAVIUM_OCTEON:
173 	case CPU_CAVIUM_OCTEON_PLUS:
174 	case CPU_CAVIUM_OCTEON2:
175 	case CPU_CAVIUM_OCTEON3:
176 	case CPU_XBURST:
177 	case CPU_LOONGSON32:
178 		cpu_wait = r4k_wait;
179 		break;
180 	case CPU_LOONGSON64:
181 		if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
182 				(PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
183 				(c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
184 			cpu_wait = r4k_wait;
185 		break;
186 
187 	case CPU_BMIPS5000:
188 		cpu_wait = r4k_wait_irqoff;
189 		break;
190 	case CPU_RM7000:
191 		cpu_wait = rm7k_wait_irqoff;
192 		break;
193 
194 	case CPU_PROAPTIV:
195 	case CPU_P5600:
196 		/*
197 		 * Incoming Fast Debug Channel (FDC) data during a wait
198 		 * instruction causes the wait never to resume, even if an
199 		 * interrupt is received. Avoid using wait at all if FDC data is
200 		 * likely to be received.
201 		 */
202 		if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
203 			break;
204 		fallthrough;
205 	case CPU_M14KC:
206 	case CPU_M14KEC:
207 	case CPU_24K:
208 	case CPU_34K:
209 	case CPU_1004K:
210 	case CPU_1074K:
211 	case CPU_INTERAPTIV:
212 	case CPU_M5150:
213 	case CPU_QEMU_GENERIC:
214 		cpu_wait = r4k_wait;
215 		if (read_c0_config7() & MIPS_CONF7_WII)
216 			cpu_wait = r4k_wait_irqoff;
217 		break;
218 
219 	case CPU_74K:
220 		cpu_wait = r4k_wait;
221 		if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
222 			cpu_wait = r4k_wait_irqoff;
223 		break;
224 
225 	case CPU_TX49XX:
226 		cpu_wait = r4k_wait_irqoff;
227 		break;
228 	case CPU_ALCHEMY:
229 		cpu_wait = au1k_wait;
230 		break;
231 	case CPU_20KC:
232 		/*
233 		 * WAIT on Rev1.0 has E1, E2, E3 and E16.
234 		 * WAIT on Rev2.0 and Rev3.0 has E16.
235 		 * Rev3.1 WAIT is nop, why bother
236 		 */
237 		if ((c->processor_id & 0xff) <= 0x64)
238 			break;
239 
240 		/*
241 		 * Another rev is incremeting c0_count at a reduced clock
242 		 * rate while in WAIT mode.  So we basically have the choice
243 		 * between using the cp0 timer as clocksource or avoiding
244 		 * the WAIT instruction.  Until more details are known,
245 		 * disable the use of WAIT for 20Kc entirely.
246 		   cpu_wait = r4k_wait;
247 		 */
248 		break;
249 	default:
250 		break;
251 	}
252 }
253 
254 void arch_cpu_idle(void)
255 {
256 	if (cpu_wait)
257 		cpu_wait();
258 	else
259 		raw_local_irq_enable();
260 }
261 
262 #ifdef CONFIG_CPU_IDLE
263 
264 int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
265 			    struct cpuidle_driver *drv, int index)
266 {
267 	arch_cpu_idle();
268 	return index;
269 }
270 
271 #endif
272