xref: /openbmc/linux/arch/mips/kernel/r4k-bugs64.c (revision d9f6e12f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2003, 2004, 2007  Maciej W. Rozycki
4  */
5 #include <linux/context_tracking.h>
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/ptrace.h>
9 #include <linux/stddef.h>
10 
11 #include <asm/bugs.h>
12 #include <asm/compiler.h>
13 #include <asm/cpu.h>
14 #include <asm/fpu.h>
15 #include <asm/mipsregs.h>
16 #include <asm/setup.h>
17 
18 static char bug64hit[] __initdata =
19 	"reliable operation impossible!\n%s";
20 static char nowar[] __initdata =
21 	"Please report to <linux-mips@vger.kernel.org>.";
22 static char r4kwar[] __initdata =
23 	"Enable CPU_R4000_WORKAROUNDS to rectify.";
24 static char daddiwar[] __initdata =
25 	"Enable CPU_DADDI_WORKAROUNDS to rectify.";
26 
27 static __always_inline __init
28 void align_mod(const int align, const int mod)
29 {
30 	asm volatile(
31 		".set	push\n\t"
32 		".set	noreorder\n\t"
33 		".balign %0\n\t"
34 		".rept	%1\n\t"
35 		"nop\n\t"
36 		".endr\n\t"
37 		".set	pop"
38 		:
39 		: "n"(align), "n"(mod));
40 }
41 
42 static __always_inline __init
43 void mult_sh_align_mod(long *v1, long *v2, long *w,
44 		       const int align, const int mod)
45 {
46 	unsigned long flags;
47 	int m1, m2;
48 	long p, s, lv1, lv2, lw;
49 
50 	/*
51 	 * We want the multiply and the shift to be isolated from the
52 	 * rest of the code to disable gcc optimizations.  Hence the
53 	 * asm statements that execute nothing, but make gcc not know
54 	 * what the values of m1, m2 and s are and what lv2 and p are
55 	 * used for.
56 	 */
57 
58 	local_irq_save(flags);
59 	/*
60 	 * The following code leads to a wrong result of the first
61 	 * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId
62 	 * 00000422 or 00000430, respectively).
63 	 *
64 	 * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
65 	 * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for
66 	 * details.  I got no permission to duplicate them here,
67 	 * sigh... --macro
68 	 */
69 	asm volatile(
70 		""
71 		: "=r" (m1), "=r" (m2), "=r" (s)
72 		: "0" (5), "1" (8), "2" (5));
73 	align_mod(align, mod);
74 	/*
75 	 * The trailing nop is needed to fulfill the two-instruction
76 	 * requirement between reading hi/lo and staring a mult/div.
77 	 * Leaving it out may cause gas insert a nop itself breaking
78 	 * the desired alignment of the next chunk.
79 	 */
80 	asm volatile(
81 		".set	push\n\t"
82 		".set	noat\n\t"
83 		".set	noreorder\n\t"
84 		".set	nomacro\n\t"
85 		"mult	%2, %3\n\t"
86 		"dsll32 %0, %4, %5\n\t"
87 		"mflo	$0\n\t"
88 		"dsll32 %1, %4, %5\n\t"
89 		"nop\n\t"
90 		".set	pop"
91 		: "=&r" (lv1), "=r" (lw)
92 		: "r" (m1), "r" (m2), "r" (s), "I" (0)
93 		: "hi", "lo", "$0");
94 	/* We have to use single integers for m1 and m2 and a double
95 	 * one for p to be sure the mulsidi3 gcc's RTL multiplication
96 	 * instruction has the workaround applied.  Older versions of
97 	 * gcc have correct umulsi3 and mulsi3, but other
98 	 * multiplication variants lack the workaround.
99 	 */
100 	asm volatile(
101 		""
102 		: "=r" (m1), "=r" (m2), "=r" (s)
103 		: "0" (m1), "1" (m2), "2" (s));
104 	align_mod(align, mod);
105 	p = m1 * m2;
106 	lv2 = s << 32;
107 	asm volatile(
108 		""
109 		: "=r" (lv2)
110 		: "0" (lv2), "r" (p));
111 	local_irq_restore(flags);
112 
113 	*v1 = lv1;
114 	*v2 = lv2;
115 	*w = lw;
116 }
117 
118 static __always_inline __init void check_mult_sh(void)
119 {
120 	long v1[8], v2[8], w[8];
121 	int bug, fix, i;
122 
123 	printk("Checking for the multiply/shift bug... ");
124 
125 	/*
126 	 * Testing discovered false negatives for certain code offsets
127 	 * into cache lines.  Hence we test all possible offsets for
128 	 * the worst assumption of an R4000 I-cache line width of 32
129 	 * bytes.
130 	 *
131 	 * We can't use a loop as alignment directives need to be
132 	 * immediates.
133 	 */
134 	mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
135 	mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
136 	mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
137 	mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
138 	mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
139 	mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
140 	mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
141 	mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
142 
143 	bug = 0;
144 	for (i = 0; i < 8; i++)
145 		if (v1[i] != w[i])
146 			bug = 1;
147 
148 	if (bug == 0) {
149 		pr_cont("no.\n");
150 		return;
151 	}
152 
153 	pr_cont("yes, workaround... ");
154 
155 	fix = 1;
156 	for (i = 0; i < 8; i++)
157 		if (v2[i] != w[i])
158 			fix = 0;
159 
160 	if (fix == 1) {
161 		pr_cont("yes.\n");
162 		return;
163 	}
164 
165 	pr_cont("no.\n");
166 	panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
167 }
168 
169 static volatile int daddi_ov;
170 
171 asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
172 {
173 	enum ctx_state prev_state;
174 
175 	prev_state = exception_enter();
176 	daddi_ov = 1;
177 	regs->cp0_epc += 4;
178 	exception_exit(prev_state);
179 }
180 
181 static __init void check_daddi(void)
182 {
183 	extern asmlinkage void handle_daddi_ov(void);
184 	unsigned long flags;
185 	void *handler;
186 	long v, tmp;
187 
188 	printk("Checking for the daddi bug... ");
189 
190 	local_irq_save(flags);
191 	handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
192 	/*
193 	 * The following code fails to trigger an overflow exception
194 	 * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
195 	 * 00000430, respectively).
196 	 *
197 	 * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
198 	 * 3.0" by MIPS Technologies, Inc., erratum #23 for details.
199 	 * I got no permission to duplicate it here, sigh... --macro
200 	 */
201 	asm volatile(
202 		".set	push\n\t"
203 		".set	noat\n\t"
204 		".set	noreorder\n\t"
205 		".set	nomacro\n\t"
206 		"addiu	%1, $0, %2\n\t"
207 		"dsrl	%1, %1, 1\n\t"
208 #ifdef HAVE_AS_SET_DADDI
209 		".set	daddi\n\t"
210 #endif
211 		"daddi	%0, %1, %3\n\t"
212 		".set	pop"
213 		: "=r" (v), "=&r" (tmp)
214 		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
215 	set_except_vector(EXCCODE_OV, handler);
216 	local_irq_restore(flags);
217 
218 	if (daddi_ov) {
219 		pr_cont("no.\n");
220 		return;
221 	}
222 
223 	pr_cont("yes, workaround... ");
224 
225 	local_irq_save(flags);
226 	handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
227 	asm volatile(
228 		"addiu	%1, $0, %2\n\t"
229 		"dsrl	%1, %1, 1\n\t"
230 		"daddi	%0, %1, %3"
231 		: "=r" (v), "=&r" (tmp)
232 		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
233 	set_except_vector(EXCCODE_OV, handler);
234 	local_irq_restore(flags);
235 
236 	if (daddi_ov) {
237 		pr_cont("yes.\n");
238 		return;
239 	}
240 
241 	pr_cont("no.\n");
242 	panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
243 }
244 
245 int daddiu_bug	= -1;
246 
247 static __init void check_daddiu(void)
248 {
249 	long v, w, tmp;
250 
251 	printk("Checking for the daddiu bug... ");
252 
253 	/*
254 	 * The following code leads to a wrong result of daddiu when
255 	 * executed on R4400 rev. 1.0 (PRId 00000440).
256 	 *
257 	 * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by
258 	 * MIPS Technologies, Inc., erratum #7 for details.
259 	 *
260 	 * According to "MIPS R4000PC/SC Errata, Processor Revision
261 	 * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this
262 	 * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and
263 	 * 00000430, respectively), too.  Testing failed to trigger it
264 	 * so far.
265 	 *
266 	 * I got no permission to duplicate the errata here, sigh...
267 	 * --macro
268 	 */
269 	asm volatile(
270 		".set	push\n\t"
271 		".set	noat\n\t"
272 		".set	noreorder\n\t"
273 		".set	nomacro\n\t"
274 		"addiu	%2, $0, %3\n\t"
275 		"dsrl	%2, %2, 1\n\t"
276 #ifdef HAVE_AS_SET_DADDI
277 		".set	daddi\n\t"
278 #endif
279 		"daddiu %0, %2, %4\n\t"
280 		"addiu	%1, $0, %4\n\t"
281 		"daddu	%1, %2\n\t"
282 		".set	pop"
283 		: "=&r" (v), "=&r" (w), "=&r" (tmp)
284 		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
285 
286 	daddiu_bug = v != w;
287 
288 	if (!daddiu_bug) {
289 		pr_cont("no.\n");
290 		return;
291 	}
292 
293 	pr_cont("yes, workaround... ");
294 
295 	asm volatile(
296 		"addiu	%2, $0, %3\n\t"
297 		"dsrl	%2, %2, 1\n\t"
298 		"daddiu %0, %2, %4\n\t"
299 		"addiu	%1, $0, %4\n\t"
300 		"daddu	%1, %2"
301 		: "=&r" (v), "=&r" (w), "=&r" (tmp)
302 		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
303 
304 	if (v == w) {
305 		pr_cont("yes.\n");
306 		return;
307 	}
308 
309 	pr_cont("no.\n");
310 	panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
311 }
312 
313 void __init check_bugs64_early(void)
314 {
315 	check_mult_sh();
316 	check_daddiu();
317 }
318 
319 void __init check_bugs64(void)
320 {
321 	check_daddi();
322 }
323