xref: /openbmc/linux/arch/mips/kernel/unaligned.c (revision 26ba4e57)
1 /*
2  * Handle unaligned accesses by emulation.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Copyright (C) 2014 Imagination Technologies Ltd.
11  *
12  * This file contains exception handler for address error exception with the
13  * special capability to execute faulting instructions in software.  The
14  * handler does not try to handle the case when the program counter points
15  * to an address not aligned to a word boundary.
16  *
17  * Putting data to unaligned addresses is a bad practice even on Intel where
18  * only the performance is affected.  Much worse is that such code is non-
19  * portable.  Due to several programs that die on MIPS due to alignment
20  * problems I decided to implement this handler anyway though I originally
21  * didn't intend to do this at all for user code.
22  *
23  * For now I enable fixing of address errors by default to make life easier.
24  * I however intend to disable this somewhen in the future when the alignment
25  * problems with user programs have been fixed.	 For programmers this is the
26  * right way to go.
27  *
28  * Fixing address errors is a per process option.  The option is inherited
29  * across fork(2) and execve(2) calls.	If you really want to use the
30  * option in your user programs - I discourage the use of the software
31  * emulation strongly - use the following code in your userland stuff:
32  *
33  * #include <sys/sysmips.h>
34  *
35  * ...
36  * sysmips(MIPS_FIXADE, x);
37  * ...
38  *
39  * The argument x is 0 for disabling software emulation, enabled otherwise.
40  *
41  * Below a little program to play around with this feature.
42  *
43  * #include <stdio.h>
44  * #include <sys/sysmips.h>
45  *
46  * struct foo {
47  *	   unsigned char bar[8];
48  * };
49  *
50  * main(int argc, char *argv[])
51  * {
52  *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53  *	   unsigned int *p = (unsigned int *) (x.bar + 3);
54  *	   int i;
55  *
56  *	   if (argc > 1)
57  *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
58  *
59  *	   printf("*p = %08lx\n", *p);
60  *
61  *	   *p = 0xdeadface;
62  *
63  *	   for(i = 0; i <= 7; i++)
64  *	   printf("%02x ", x.bar[i]);
65  *	   printf("\n");
66  * }
67  *
68  * Coprocessor loads are not supported; I think this case is unimportant
69  * in the practice.
70  *
71  * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72  *	 exception for the R6000.
73  *	 A store crossing a page boundary might be executed only partially.
74  *	 Undo the partial store in this case.
75  */
76 #include <linux/context_tracking.h>
77 #include <linux/mm.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
83 
84 #include <asm/asm.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
87 #include <asm/cop2.h>
88 #include <asm/debug.h>
89 #include <asm/fpu.h>
90 #include <asm/fpu_emulator.h>
91 #include <asm/inst.h>
92 #include <asm/mmu_context.h>
93 #include <linux/uaccess.h>
94 
95 #define STR(x)	__STR(x)
96 #define __STR(x)  #x
97 
98 enum {
99 	UNALIGNED_ACTION_QUIET,
100 	UNALIGNED_ACTION_SIGNAL,
101 	UNALIGNED_ACTION_SHOW,
102 };
103 #ifdef CONFIG_DEBUG_FS
104 static u32 unaligned_instructions;
105 static u32 unaligned_action;
106 #else
107 #define unaligned_action UNALIGNED_ACTION_QUIET
108 #endif
109 extern void show_registers(struct pt_regs *regs);
110 
111 #ifdef __BIG_ENDIAN
112 #define     _LoadHW(addr, value, res, type)  \
113 do {                                                        \
114 		__asm__ __volatile__ (".set\tnoat\n"        \
115 			"1:\t"type##_lb("%0", "0(%2)")"\n"  \
116 			"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
117 			"sll\t%0, 0x8\n\t"                  \
118 			"or\t%0, $1\n\t"                    \
119 			"li\t%1, 0\n"                       \
120 			"3:\t.set\tat\n\t"                  \
121 			".insn\n\t"                         \
122 			".section\t.fixup,\"ax\"\n\t"       \
123 			"4:\tli\t%1, %3\n\t"                \
124 			"j\t3b\n\t"                         \
125 			".previous\n\t"                     \
126 			".section\t__ex_table,\"a\"\n\t"    \
127 			STR(PTR)"\t1b, 4b\n\t"              \
128 			STR(PTR)"\t2b, 4b\n\t"              \
129 			".previous"                         \
130 			: "=&r" (value), "=r" (res)         \
131 			: "r" (addr), "i" (-EFAULT));       \
132 } while(0)
133 
134 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
135 #define     _LoadW(addr, value, res, type)   \
136 do {                                                        \
137 		__asm__ __volatile__ (                      \
138 			"1:\t"type##_lwl("%0", "(%2)")"\n"   \
139 			"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
140 			"li\t%1, 0\n"                       \
141 			"3:\n\t"                            \
142 			".insn\n\t"                         \
143 			".section\t.fixup,\"ax\"\n\t"       \
144 			"4:\tli\t%1, %3\n\t"                \
145 			"j\t3b\n\t"                         \
146 			".previous\n\t"                     \
147 			".section\t__ex_table,\"a\"\n\t"    \
148 			STR(PTR)"\t1b, 4b\n\t"              \
149 			STR(PTR)"\t2b, 4b\n\t"              \
150 			".previous"                         \
151 			: "=&r" (value), "=r" (res)         \
152 			: "r" (addr), "i" (-EFAULT));       \
153 } while(0)
154 
155 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
156 /* For CPUs without lwl instruction */
157 #define     _LoadW(addr, value, res, type) \
158 do {                                                        \
159 		__asm__ __volatile__ (			    \
160 			".set\tpush\n"			    \
161 			".set\tnoat\n\t"		    \
162 			"1:"type##_lb("%0", "0(%2)")"\n\t"  \
163 			"2:"type##_lbu("$1", "1(%2)")"\n\t" \
164 			"sll\t%0, 0x8\n\t"		    \
165 			"or\t%0, $1\n\t"		    \
166 			"3:"type##_lbu("$1", "2(%2)")"\n\t" \
167 			"sll\t%0, 0x8\n\t"		    \
168 			"or\t%0, $1\n\t"		    \
169 			"4:"type##_lbu("$1", "3(%2)")"\n\t" \
170 			"sll\t%0, 0x8\n\t"		    \
171 			"or\t%0, $1\n\t"		    \
172 			"li\t%1, 0\n"			    \
173 			".set\tpop\n"			    \
174 			"10:\n\t"			    \
175 			".insn\n\t"			    \
176 			".section\t.fixup,\"ax\"\n\t"	    \
177 			"11:\tli\t%1, %3\n\t"		    \
178 			"j\t10b\n\t"			    \
179 			".previous\n\t"			    \
180 			".section\t__ex_table,\"a\"\n\t"    \
181 			STR(PTR)"\t1b, 11b\n\t"		    \
182 			STR(PTR)"\t2b, 11b\n\t"		    \
183 			STR(PTR)"\t3b, 11b\n\t"		    \
184 			STR(PTR)"\t4b, 11b\n\t"		    \
185 			".previous"			    \
186 			: "=&r" (value), "=r" (res)	    \
187 			: "r" (addr), "i" (-EFAULT));       \
188 } while(0)
189 
190 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
191 
192 #define     _LoadHWU(addr, value, res, type) \
193 do {                                                        \
194 		__asm__ __volatile__ (                      \
195 			".set\tnoat\n"                      \
196 			"1:\t"type##_lbu("%0", "0(%2)")"\n" \
197 			"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
198 			"sll\t%0, 0x8\n\t"                  \
199 			"or\t%0, $1\n\t"                    \
200 			"li\t%1, 0\n"                       \
201 			"3:\n\t"                            \
202 			".insn\n\t"                         \
203 			".set\tat\n\t"                      \
204 			".section\t.fixup,\"ax\"\n\t"       \
205 			"4:\tli\t%1, %3\n\t"                \
206 			"j\t3b\n\t"                         \
207 			".previous\n\t"                     \
208 			".section\t__ex_table,\"a\"\n\t"    \
209 			STR(PTR)"\t1b, 4b\n\t"              \
210 			STR(PTR)"\t2b, 4b\n\t"              \
211 			".previous"                         \
212 			: "=&r" (value), "=r" (res)         \
213 			: "r" (addr), "i" (-EFAULT));       \
214 } while(0)
215 
216 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
217 #define     _LoadWU(addr, value, res, type)  \
218 do {                                                        \
219 		__asm__ __volatile__ (                      \
220 			"1:\t"type##_lwl("%0", "(%2)")"\n"  \
221 			"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
222 			"dsll\t%0, %0, 32\n\t"              \
223 			"dsrl\t%0, %0, 32\n\t"              \
224 			"li\t%1, 0\n"                       \
225 			"3:\n\t"                            \
226 			".insn\n\t"                         \
227 			"\t.section\t.fixup,\"ax\"\n\t"     \
228 			"4:\tli\t%1, %3\n\t"                \
229 			"j\t3b\n\t"                         \
230 			".previous\n\t"                     \
231 			".section\t__ex_table,\"a\"\n\t"    \
232 			STR(PTR)"\t1b, 4b\n\t"              \
233 			STR(PTR)"\t2b, 4b\n\t"              \
234 			".previous"                         \
235 			: "=&r" (value), "=r" (res)         \
236 			: "r" (addr), "i" (-EFAULT));       \
237 } while(0)
238 
239 #define     _LoadDW(addr, value, res)  \
240 do {                                                        \
241 		__asm__ __volatile__ (                      \
242 			"1:\tldl\t%0, (%2)\n"               \
243 			"2:\tldr\t%0, 7(%2)\n\t"            \
244 			"li\t%1, 0\n"                       \
245 			"3:\n\t"                            \
246 			".insn\n\t"                         \
247 			"\t.section\t.fixup,\"ax\"\n\t"     \
248 			"4:\tli\t%1, %3\n\t"                \
249 			"j\t3b\n\t"                         \
250 			".previous\n\t"                     \
251 			".section\t__ex_table,\"a\"\n\t"    \
252 			STR(PTR)"\t1b, 4b\n\t"              \
253 			STR(PTR)"\t2b, 4b\n\t"              \
254 			".previous"                         \
255 			: "=&r" (value), "=r" (res)         \
256 			: "r" (addr), "i" (-EFAULT));       \
257 } while(0)
258 
259 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
260 /* For CPUs without lwl and ldl instructions */
261 #define	    _LoadWU(addr, value, res, type) \
262 do {                                                        \
263 		__asm__ __volatile__ (			    \
264 			".set\tpush\n\t"		    \
265 			".set\tnoat\n\t"		    \
266 			"1:"type##_lbu("%0", "0(%2)")"\n\t" \
267 			"2:"type##_lbu("$1", "1(%2)")"\n\t" \
268 			"sll\t%0, 0x8\n\t"		    \
269 			"or\t%0, $1\n\t"		    \
270 			"3:"type##_lbu("$1", "2(%2)")"\n\t" \
271 			"sll\t%0, 0x8\n\t"		    \
272 			"or\t%0, $1\n\t"		    \
273 			"4:"type##_lbu("$1", "3(%2)")"\n\t" \
274 			"sll\t%0, 0x8\n\t"		    \
275 			"or\t%0, $1\n\t"		    \
276 			"li\t%1, 0\n"			    \
277 			".set\tpop\n"			    \
278 			"10:\n\t"			    \
279 			".insn\n\t"			    \
280 			".section\t.fixup,\"ax\"\n\t"	    \
281 			"11:\tli\t%1, %3\n\t"		    \
282 			"j\t10b\n\t"			    \
283 			".previous\n\t"			    \
284 			".section\t__ex_table,\"a\"\n\t"    \
285 			STR(PTR)"\t1b, 11b\n\t"		    \
286 			STR(PTR)"\t2b, 11b\n\t"		    \
287 			STR(PTR)"\t3b, 11b\n\t"		    \
288 			STR(PTR)"\t4b, 11b\n\t"		    \
289 			".previous"			    \
290 			: "=&r" (value), "=r" (res)	    \
291 			: "r" (addr), "i" (-EFAULT));       \
292 } while(0)
293 
294 #define     _LoadDW(addr, value, res)  \
295 do {                                                        \
296 		__asm__ __volatile__ (			    \
297 			".set\tpush\n\t"		    \
298 			".set\tnoat\n\t"		    \
299 			"1:lb\t%0, 0(%2)\n\t"    	    \
300 			"2:lbu\t $1, 1(%2)\n\t"   	    \
301 			"dsll\t%0, 0x8\n\t"		    \
302 			"or\t%0, $1\n\t"		    \
303 			"3:lbu\t$1, 2(%2)\n\t"   	    \
304 			"dsll\t%0, 0x8\n\t"		    \
305 			"or\t%0, $1\n\t"		    \
306 			"4:lbu\t$1, 3(%2)\n\t"   	    \
307 			"dsll\t%0, 0x8\n\t"		    \
308 			"or\t%0, $1\n\t"		    \
309 			"5:lbu\t$1, 4(%2)\n\t"   	    \
310 			"dsll\t%0, 0x8\n\t"		    \
311 			"or\t%0, $1\n\t"		    \
312 			"6:lbu\t$1, 5(%2)\n\t"   	    \
313 			"dsll\t%0, 0x8\n\t"		    \
314 			"or\t%0, $1\n\t"		    \
315 			"7:lbu\t$1, 6(%2)\n\t"   	    \
316 			"dsll\t%0, 0x8\n\t"		    \
317 			"or\t%0, $1\n\t"		    \
318 			"8:lbu\t$1, 7(%2)\n\t"   	    \
319 			"dsll\t%0, 0x8\n\t"		    \
320 			"or\t%0, $1\n\t"		    \
321 			"li\t%1, 0\n"			    \
322 			".set\tpop\n\t"			    \
323 			"10:\n\t"			    \
324 			".insn\n\t"			    \
325 			".section\t.fixup,\"ax\"\n\t"	    \
326 			"11:\tli\t%1, %3\n\t"		    \
327 			"j\t10b\n\t"			    \
328 			".previous\n\t"			    \
329 			".section\t__ex_table,\"a\"\n\t"    \
330 			STR(PTR)"\t1b, 11b\n\t"		    \
331 			STR(PTR)"\t2b, 11b\n\t"		    \
332 			STR(PTR)"\t3b, 11b\n\t"		    \
333 			STR(PTR)"\t4b, 11b\n\t"		    \
334 			STR(PTR)"\t5b, 11b\n\t"		    \
335 			STR(PTR)"\t6b, 11b\n\t"		    \
336 			STR(PTR)"\t7b, 11b\n\t"		    \
337 			STR(PTR)"\t8b, 11b\n\t"		    \
338 			".previous"			    \
339 			: "=&r" (value), "=r" (res)	    \
340 			: "r" (addr), "i" (-EFAULT));       \
341 } while(0)
342 
343 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
344 
345 
346 #define     _StoreHW(addr, value, res, type) \
347 do {                                                        \
348 		__asm__ __volatile__ (                      \
349 			".set\tnoat\n"                      \
350 			"1:\t"type##_sb("%1", "1(%2)")"\n"  \
351 			"srl\t$1, %1, 0x8\n"                \
352 			"2:\t"type##_sb("$1", "0(%2)")"\n"  \
353 			".set\tat\n\t"                      \
354 			"li\t%0, 0\n"                       \
355 			"3:\n\t"                            \
356 			".insn\n\t"                         \
357 			".section\t.fixup,\"ax\"\n\t"       \
358 			"4:\tli\t%0, %3\n\t"                \
359 			"j\t3b\n\t"                         \
360 			".previous\n\t"                     \
361 			".section\t__ex_table,\"a\"\n\t"    \
362 			STR(PTR)"\t1b, 4b\n\t"              \
363 			STR(PTR)"\t2b, 4b\n\t"              \
364 			".previous"                         \
365 			: "=r" (res)                        \
366 			: "r" (value), "r" (addr), "i" (-EFAULT));\
367 } while(0)
368 
369 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
370 #define     _StoreW(addr, value, res, type)  \
371 do {                                                        \
372 		__asm__ __volatile__ (                      \
373 			"1:\t"type##_swl("%1", "(%2)")"\n"  \
374 			"2:\t"type##_swr("%1", "3(%2)")"\n\t"\
375 			"li\t%0, 0\n"                       \
376 			"3:\n\t"                            \
377 			".insn\n\t"                         \
378 			".section\t.fixup,\"ax\"\n\t"       \
379 			"4:\tli\t%0, %3\n\t"                \
380 			"j\t3b\n\t"                         \
381 			".previous\n\t"                     \
382 			".section\t__ex_table,\"a\"\n\t"    \
383 			STR(PTR)"\t1b, 4b\n\t"              \
384 			STR(PTR)"\t2b, 4b\n\t"              \
385 			".previous"                         \
386 		: "=r" (res)                                \
387 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
388 } while(0)
389 
390 #define     _StoreDW(addr, value, res) \
391 do {                                                        \
392 		__asm__ __volatile__ (                      \
393 			"1:\tsdl\t%1,(%2)\n"                \
394 			"2:\tsdr\t%1, 7(%2)\n\t"            \
395 			"li\t%0, 0\n"                       \
396 			"3:\n\t"                            \
397 			".insn\n\t"                         \
398 			".section\t.fixup,\"ax\"\n\t"       \
399 			"4:\tli\t%0, %3\n\t"                \
400 			"j\t3b\n\t"                         \
401 			".previous\n\t"                     \
402 			".section\t__ex_table,\"a\"\n\t"    \
403 			STR(PTR)"\t1b, 4b\n\t"              \
404 			STR(PTR)"\t2b, 4b\n\t"              \
405 			".previous"                         \
406 		: "=r" (res)                                \
407 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
408 } while(0)
409 
410 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
411 #define     _StoreW(addr, value, res, type)  \
412 do {                                                        \
413 		__asm__ __volatile__ (                      \
414 			".set\tpush\n\t"		    \
415 			".set\tnoat\n\t"		    \
416 			"1:"type##_sb("%1", "3(%2)")"\n\t"  \
417 			"srl\t$1, %1, 0x8\n\t"		    \
418 			"2:"type##_sb("$1", "2(%2)")"\n\t"  \
419 			"srl\t$1, $1,  0x8\n\t"		    \
420 			"3:"type##_sb("$1", "1(%2)")"\n\t"  \
421 			"srl\t$1, $1, 0x8\n\t"		    \
422 			"4:"type##_sb("$1", "0(%2)")"\n\t"  \
423 			".set\tpop\n\t"			    \
424 			"li\t%0, 0\n"			    \
425 			"10:\n\t"			    \
426 			".insn\n\t"			    \
427 			".section\t.fixup,\"ax\"\n\t"	    \
428 			"11:\tli\t%0, %3\n\t"		    \
429 			"j\t10b\n\t"			    \
430 			".previous\n\t"			    \
431 			".section\t__ex_table,\"a\"\n\t"    \
432 			STR(PTR)"\t1b, 11b\n\t"		    \
433 			STR(PTR)"\t2b, 11b\n\t"		    \
434 			STR(PTR)"\t3b, 11b\n\t"		    \
435 			STR(PTR)"\t4b, 11b\n\t"		    \
436 			".previous"			    \
437 		: "=&r" (res)			    	    \
438 		: "r" (value), "r" (addr), "i" (-EFAULT)    \
439 		: "memory");                                \
440 } while(0)
441 
442 #define     _StoreDW(addr, value, res) \
443 do {                                                        \
444 		__asm__ __volatile__ (                      \
445 			".set\tpush\n\t"		    \
446 			".set\tnoat\n\t"		    \
447 			"1:sb\t%1, 7(%2)\n\t"    	    \
448 			"dsrl\t$1, %1, 0x8\n\t"		    \
449 			"2:sb\t$1, 6(%2)\n\t"    	    \
450 			"dsrl\t$1, $1, 0x8\n\t"		    \
451 			"3:sb\t$1, 5(%2)\n\t"    	    \
452 			"dsrl\t$1, $1, 0x8\n\t"		    \
453 			"4:sb\t$1, 4(%2)\n\t"    	    \
454 			"dsrl\t$1, $1, 0x8\n\t"		    \
455 			"5:sb\t$1, 3(%2)\n\t"    	    \
456 			"dsrl\t$1, $1, 0x8\n\t"		    \
457 			"6:sb\t$1, 2(%2)\n\t"    	    \
458 			"dsrl\t$1, $1, 0x8\n\t"		    \
459 			"7:sb\t$1, 1(%2)\n\t"    	    \
460 			"dsrl\t$1, $1, 0x8\n\t"		    \
461 			"8:sb\t$1, 0(%2)\n\t"    	    \
462 			"dsrl\t$1, $1, 0x8\n\t"		    \
463 			".set\tpop\n\t"			    \
464 			"li\t%0, 0\n"			    \
465 			"10:\n\t"			    \
466 			".insn\n\t"			    \
467 			".section\t.fixup,\"ax\"\n\t"	    \
468 			"11:\tli\t%0, %3\n\t"		    \
469 			"j\t10b\n\t"			    \
470 			".previous\n\t"			    \
471 			".section\t__ex_table,\"a\"\n\t"    \
472 			STR(PTR)"\t1b, 11b\n\t"		    \
473 			STR(PTR)"\t2b, 11b\n\t"		    \
474 			STR(PTR)"\t3b, 11b\n\t"		    \
475 			STR(PTR)"\t4b, 11b\n\t"		    \
476 			STR(PTR)"\t5b, 11b\n\t"		    \
477 			STR(PTR)"\t6b, 11b\n\t"		    \
478 			STR(PTR)"\t7b, 11b\n\t"		    \
479 			STR(PTR)"\t8b, 11b\n\t"		    \
480 			".previous"			    \
481 		: "=&r" (res)			    	    \
482 		: "r" (value), "r" (addr), "i" (-EFAULT)    \
483 		: "memory");                                \
484 } while(0)
485 
486 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
487 
488 #else /* __BIG_ENDIAN */
489 
490 #define     _LoadHW(addr, value, res, type)  \
491 do {                                                        \
492 		__asm__ __volatile__ (".set\tnoat\n"        \
493 			"1:\t"type##_lb("%0", "1(%2)")"\n"  \
494 			"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
495 			"sll\t%0, 0x8\n\t"                  \
496 			"or\t%0, $1\n\t"                    \
497 			"li\t%1, 0\n"                       \
498 			"3:\t.set\tat\n\t"                  \
499 			".insn\n\t"                         \
500 			".section\t.fixup,\"ax\"\n\t"       \
501 			"4:\tli\t%1, %3\n\t"                \
502 			"j\t3b\n\t"                         \
503 			".previous\n\t"                     \
504 			".section\t__ex_table,\"a\"\n\t"    \
505 			STR(PTR)"\t1b, 4b\n\t"              \
506 			STR(PTR)"\t2b, 4b\n\t"              \
507 			".previous"                         \
508 			: "=&r" (value), "=r" (res)         \
509 			: "r" (addr), "i" (-EFAULT));       \
510 } while(0)
511 
512 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
513 #define     _LoadW(addr, value, res, type)   \
514 do {                                                        \
515 		__asm__ __volatile__ (                      \
516 			"1:\t"type##_lwl("%0", "3(%2)")"\n" \
517 			"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
518 			"li\t%1, 0\n"                       \
519 			"3:\n\t"                            \
520 			".insn\n\t"                         \
521 			".section\t.fixup,\"ax\"\n\t"       \
522 			"4:\tli\t%1, %3\n\t"                \
523 			"j\t3b\n\t"                         \
524 			".previous\n\t"                     \
525 			".section\t__ex_table,\"a\"\n\t"    \
526 			STR(PTR)"\t1b, 4b\n\t"              \
527 			STR(PTR)"\t2b, 4b\n\t"              \
528 			".previous"                         \
529 			: "=&r" (value), "=r" (res)         \
530 			: "r" (addr), "i" (-EFAULT));       \
531 } while(0)
532 
533 #else  /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
534 /* For CPUs without lwl instruction */
535 #define     _LoadW(addr, value, res, type) \
536 do {                                                        \
537 		__asm__ __volatile__ (			    \
538 			".set\tpush\n"			    \
539 			".set\tnoat\n\t"		    \
540 			"1:"type##_lb("%0", "3(%2)")"\n\t"  \
541 			"2:"type##_lbu("$1", "2(%2)")"\n\t" \
542 			"sll\t%0, 0x8\n\t"		    \
543 			"or\t%0, $1\n\t"		    \
544 			"3:"type##_lbu("$1", "1(%2)")"\n\t" \
545 			"sll\t%0, 0x8\n\t"		    \
546 			"or\t%0, $1\n\t"		    \
547 			"4:"type##_lbu("$1", "0(%2)")"\n\t" \
548 			"sll\t%0, 0x8\n\t"		    \
549 			"or\t%0, $1\n\t"		    \
550 			"li\t%1, 0\n"			    \
551 			".set\tpop\n"			    \
552 			"10:\n\t"			    \
553 			".insn\n\t"			    \
554 			".section\t.fixup,\"ax\"\n\t"	    \
555 			"11:\tli\t%1, %3\n\t"		    \
556 			"j\t10b\n\t"			    \
557 			".previous\n\t"			    \
558 			".section\t__ex_table,\"a\"\n\t"    \
559 			STR(PTR)"\t1b, 11b\n\t"		    \
560 			STR(PTR)"\t2b, 11b\n\t"		    \
561 			STR(PTR)"\t3b, 11b\n\t"		    \
562 			STR(PTR)"\t4b, 11b\n\t"		    \
563 			".previous"			    \
564 			: "=&r" (value), "=r" (res)	    \
565 			: "r" (addr), "i" (-EFAULT));       \
566 } while(0)
567 
568 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
569 
570 
571 #define     _LoadHWU(addr, value, res, type) \
572 do {                                                        \
573 		__asm__ __volatile__ (                      \
574 			".set\tnoat\n"                      \
575 			"1:\t"type##_lbu("%0", "1(%2)")"\n" \
576 			"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
577 			"sll\t%0, 0x8\n\t"                  \
578 			"or\t%0, $1\n\t"                    \
579 			"li\t%1, 0\n"                       \
580 			"3:\n\t"                            \
581 			".insn\n\t"                         \
582 			".set\tat\n\t"                      \
583 			".section\t.fixup,\"ax\"\n\t"       \
584 			"4:\tli\t%1, %3\n\t"                \
585 			"j\t3b\n\t"                         \
586 			".previous\n\t"                     \
587 			".section\t__ex_table,\"a\"\n\t"    \
588 			STR(PTR)"\t1b, 4b\n\t"              \
589 			STR(PTR)"\t2b, 4b\n\t"              \
590 			".previous"                         \
591 			: "=&r" (value), "=r" (res)         \
592 			: "r" (addr), "i" (-EFAULT));       \
593 } while(0)
594 
595 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
596 #define     _LoadWU(addr, value, res, type)  \
597 do {                                                        \
598 		__asm__ __volatile__ (                      \
599 			"1:\t"type##_lwl("%0", "3(%2)")"\n" \
600 			"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
601 			"dsll\t%0, %0, 32\n\t"              \
602 			"dsrl\t%0, %0, 32\n\t"              \
603 			"li\t%1, 0\n"                       \
604 			"3:\n\t"                            \
605 			".insn\n\t"                         \
606 			"\t.section\t.fixup,\"ax\"\n\t"     \
607 			"4:\tli\t%1, %3\n\t"                \
608 			"j\t3b\n\t"                         \
609 			".previous\n\t"                     \
610 			".section\t__ex_table,\"a\"\n\t"    \
611 			STR(PTR)"\t1b, 4b\n\t"              \
612 			STR(PTR)"\t2b, 4b\n\t"              \
613 			".previous"                         \
614 			: "=&r" (value), "=r" (res)         \
615 			: "r" (addr), "i" (-EFAULT));       \
616 } while(0)
617 
618 #define     _LoadDW(addr, value, res)  \
619 do {                                                        \
620 		__asm__ __volatile__ (                      \
621 			"1:\tldl\t%0, 7(%2)\n"              \
622 			"2:\tldr\t%0, (%2)\n\t"             \
623 			"li\t%1, 0\n"                       \
624 			"3:\n\t"                            \
625 			".insn\n\t"                         \
626 			"\t.section\t.fixup,\"ax\"\n\t"     \
627 			"4:\tli\t%1, %3\n\t"                \
628 			"j\t3b\n\t"                         \
629 			".previous\n\t"                     \
630 			".section\t__ex_table,\"a\"\n\t"    \
631 			STR(PTR)"\t1b, 4b\n\t"              \
632 			STR(PTR)"\t2b, 4b\n\t"              \
633 			".previous"                         \
634 			: "=&r" (value), "=r" (res)         \
635 			: "r" (addr), "i" (-EFAULT));       \
636 } while(0)
637 
638 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
639 /* For CPUs without lwl and ldl instructions */
640 #define	    _LoadWU(addr, value, res, type) \
641 do {                                                        \
642 		__asm__ __volatile__ (			    \
643 			".set\tpush\n\t"		    \
644 			".set\tnoat\n\t"		    \
645 			"1:"type##_lbu("%0", "3(%2)")"\n\t" \
646 			"2:"type##_lbu("$1", "2(%2)")"\n\t" \
647 			"sll\t%0, 0x8\n\t"		    \
648 			"or\t%0, $1\n\t"		    \
649 			"3:"type##_lbu("$1", "1(%2)")"\n\t" \
650 			"sll\t%0, 0x8\n\t"		    \
651 			"or\t%0, $1\n\t"		    \
652 			"4:"type##_lbu("$1", "0(%2)")"\n\t" \
653 			"sll\t%0, 0x8\n\t"		    \
654 			"or\t%0, $1\n\t"		    \
655 			"li\t%1, 0\n"			    \
656 			".set\tpop\n"			    \
657 			"10:\n\t"			    \
658 			".insn\n\t"			    \
659 			".section\t.fixup,\"ax\"\n\t"	    \
660 			"11:\tli\t%1, %3\n\t"		    \
661 			"j\t10b\n\t"			    \
662 			".previous\n\t"			    \
663 			".section\t__ex_table,\"a\"\n\t"    \
664 			STR(PTR)"\t1b, 11b\n\t"		    \
665 			STR(PTR)"\t2b, 11b\n\t"		    \
666 			STR(PTR)"\t3b, 11b\n\t"		    \
667 			STR(PTR)"\t4b, 11b\n\t"		    \
668 			".previous"			    \
669 			: "=&r" (value), "=r" (res)	    \
670 			: "r" (addr), "i" (-EFAULT));       \
671 } while(0)
672 
673 #define     _LoadDW(addr, value, res)  \
674 do {                                                        \
675 		__asm__ __volatile__ (			    \
676 			".set\tpush\n\t"		    \
677 			".set\tnoat\n\t"		    \
678 			"1:lb\t%0, 7(%2)\n\t"    	    \
679 			"2:lbu\t$1, 6(%2)\n\t"   	    \
680 			"dsll\t%0, 0x8\n\t"		    \
681 			"or\t%0, $1\n\t"		    \
682 			"3:lbu\t$1, 5(%2)\n\t"   	    \
683 			"dsll\t%0, 0x8\n\t"		    \
684 			"or\t%0, $1\n\t"		    \
685 			"4:lbu\t$1, 4(%2)\n\t"   	    \
686 			"dsll\t%0, 0x8\n\t"		    \
687 			"or\t%0, $1\n\t"		    \
688 			"5:lbu\t$1, 3(%2)\n\t"   	    \
689 			"dsll\t%0, 0x8\n\t"		    \
690 			"or\t%0, $1\n\t"		    \
691 			"6:lbu\t$1, 2(%2)\n\t"   	    \
692 			"dsll\t%0, 0x8\n\t"		    \
693 			"or\t%0, $1\n\t"		    \
694 			"7:lbu\t$1, 1(%2)\n\t"   	    \
695 			"dsll\t%0, 0x8\n\t"		    \
696 			"or\t%0, $1\n\t"		    \
697 			"8:lbu\t$1, 0(%2)\n\t"   	    \
698 			"dsll\t%0, 0x8\n\t"		    \
699 			"or\t%0, $1\n\t"		    \
700 			"li\t%1, 0\n"			    \
701 			".set\tpop\n\t"			    \
702 			"10:\n\t"			    \
703 			".insn\n\t"			    \
704 			".section\t.fixup,\"ax\"\n\t"	    \
705 			"11:\tli\t%1, %3\n\t"		    \
706 			"j\t10b\n\t"			    \
707 			".previous\n\t"			    \
708 			".section\t__ex_table,\"a\"\n\t"    \
709 			STR(PTR)"\t1b, 11b\n\t"		    \
710 			STR(PTR)"\t2b, 11b\n\t"		    \
711 			STR(PTR)"\t3b, 11b\n\t"		    \
712 			STR(PTR)"\t4b, 11b\n\t"		    \
713 			STR(PTR)"\t5b, 11b\n\t"		    \
714 			STR(PTR)"\t6b, 11b\n\t"		    \
715 			STR(PTR)"\t7b, 11b\n\t"		    \
716 			STR(PTR)"\t8b, 11b\n\t"		    \
717 			".previous"			    \
718 			: "=&r" (value), "=r" (res)	    \
719 			: "r" (addr), "i" (-EFAULT));       \
720 } while(0)
721 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
722 
723 #define     _StoreHW(addr, value, res, type) \
724 do {                                                        \
725 		__asm__ __volatile__ (                      \
726 			".set\tnoat\n"                      \
727 			"1:\t"type##_sb("%1", "0(%2)")"\n"  \
728 			"srl\t$1,%1, 0x8\n"                 \
729 			"2:\t"type##_sb("$1", "1(%2)")"\n"  \
730 			".set\tat\n\t"                      \
731 			"li\t%0, 0\n"                       \
732 			"3:\n\t"                            \
733 			".insn\n\t"                         \
734 			".section\t.fixup,\"ax\"\n\t"       \
735 			"4:\tli\t%0, %3\n\t"                \
736 			"j\t3b\n\t"                         \
737 			".previous\n\t"                     \
738 			".section\t__ex_table,\"a\"\n\t"    \
739 			STR(PTR)"\t1b, 4b\n\t"              \
740 			STR(PTR)"\t2b, 4b\n\t"              \
741 			".previous"                         \
742 			: "=r" (res)                        \
743 			: "r" (value), "r" (addr), "i" (-EFAULT));\
744 } while(0)
745 
746 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
747 #define     _StoreW(addr, value, res, type)  \
748 do {                                                        \
749 		__asm__ __volatile__ (                      \
750 			"1:\t"type##_swl("%1", "3(%2)")"\n" \
751 			"2:\t"type##_swr("%1", "(%2)")"\n\t"\
752 			"li\t%0, 0\n"                       \
753 			"3:\n\t"                            \
754 			".insn\n\t"                         \
755 			".section\t.fixup,\"ax\"\n\t"       \
756 			"4:\tli\t%0, %3\n\t"                \
757 			"j\t3b\n\t"                         \
758 			".previous\n\t"                     \
759 			".section\t__ex_table,\"a\"\n\t"    \
760 			STR(PTR)"\t1b, 4b\n\t"              \
761 			STR(PTR)"\t2b, 4b\n\t"              \
762 			".previous"                         \
763 		: "=r" (res)                                \
764 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
765 } while(0)
766 
767 #define     _StoreDW(addr, value, res) \
768 do {                                                        \
769 		__asm__ __volatile__ (                      \
770 			"1:\tsdl\t%1, 7(%2)\n"              \
771 			"2:\tsdr\t%1, (%2)\n\t"             \
772 			"li\t%0, 0\n"                       \
773 			"3:\n\t"                            \
774 			".insn\n\t"                         \
775 			".section\t.fixup,\"ax\"\n\t"       \
776 			"4:\tli\t%0, %3\n\t"                \
777 			"j\t3b\n\t"                         \
778 			".previous\n\t"                     \
779 			".section\t__ex_table,\"a\"\n\t"    \
780 			STR(PTR)"\t1b, 4b\n\t"              \
781 			STR(PTR)"\t2b, 4b\n\t"              \
782 			".previous"                         \
783 		: "=r" (res)                                \
784 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
785 } while(0)
786 
787 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
788 /* For CPUs without swl and sdl instructions */
789 #define     _StoreW(addr, value, res, type)  \
790 do {                                                        \
791 		__asm__ __volatile__ (                      \
792 			".set\tpush\n\t"		    \
793 			".set\tnoat\n\t"		    \
794 			"1:"type##_sb("%1", "0(%2)")"\n\t"  \
795 			"srl\t$1, %1, 0x8\n\t"		    \
796 			"2:"type##_sb("$1", "1(%2)")"\n\t"  \
797 			"srl\t$1, $1,  0x8\n\t"		    \
798 			"3:"type##_sb("$1", "2(%2)")"\n\t"  \
799 			"srl\t$1, $1, 0x8\n\t"		    \
800 			"4:"type##_sb("$1", "3(%2)")"\n\t"  \
801 			".set\tpop\n\t"			    \
802 			"li\t%0, 0\n"			    \
803 			"10:\n\t"			    \
804 			".insn\n\t"			    \
805 			".section\t.fixup,\"ax\"\n\t"	    \
806 			"11:\tli\t%0, %3\n\t"		    \
807 			"j\t10b\n\t"			    \
808 			".previous\n\t"			    \
809 			".section\t__ex_table,\"a\"\n\t"    \
810 			STR(PTR)"\t1b, 11b\n\t"		    \
811 			STR(PTR)"\t2b, 11b\n\t"		    \
812 			STR(PTR)"\t3b, 11b\n\t"		    \
813 			STR(PTR)"\t4b, 11b\n\t"		    \
814 			".previous"			    \
815 		: "=&r" (res)			    	    \
816 		: "r" (value), "r" (addr), "i" (-EFAULT)    \
817 		: "memory");                                \
818 } while(0)
819 
820 #define     _StoreDW(addr, value, res) \
821 do {                                                        \
822 		__asm__ __volatile__ (                      \
823 			".set\tpush\n\t"		    \
824 			".set\tnoat\n\t"		    \
825 			"1:sb\t%1, 0(%2)\n\t"    	    \
826 			"dsrl\t$1, %1, 0x8\n\t"		    \
827 			"2:sb\t$1, 1(%2)\n\t"    	    \
828 			"dsrl\t$1, $1, 0x8\n\t"		    \
829 			"3:sb\t$1, 2(%2)\n\t"    	    \
830 			"dsrl\t$1, $1, 0x8\n\t"		    \
831 			"4:sb\t$1, 3(%2)\n\t"    	    \
832 			"dsrl\t$1, $1, 0x8\n\t"		    \
833 			"5:sb\t$1, 4(%2)\n\t"    	    \
834 			"dsrl\t$1, $1, 0x8\n\t"		    \
835 			"6:sb\t$1, 5(%2)\n\t"    	    \
836 			"dsrl\t$1, $1, 0x8\n\t"		    \
837 			"7:sb\t$1, 6(%2)\n\t"    	    \
838 			"dsrl\t$1, $1, 0x8\n\t"		    \
839 			"8:sb\t$1, 7(%2)\n\t"    	    \
840 			"dsrl\t$1, $1, 0x8\n\t"		    \
841 			".set\tpop\n\t"			    \
842 			"li\t%0, 0\n"			    \
843 			"10:\n\t"			    \
844 			".insn\n\t"			    \
845 			".section\t.fixup,\"ax\"\n\t"	    \
846 			"11:\tli\t%0, %3\n\t"		    \
847 			"j\t10b\n\t"			    \
848 			".previous\n\t"			    \
849 			".section\t__ex_table,\"a\"\n\t"    \
850 			STR(PTR)"\t1b, 11b\n\t"		    \
851 			STR(PTR)"\t2b, 11b\n\t"		    \
852 			STR(PTR)"\t3b, 11b\n\t"		    \
853 			STR(PTR)"\t4b, 11b\n\t"		    \
854 			STR(PTR)"\t5b, 11b\n\t"		    \
855 			STR(PTR)"\t6b, 11b\n\t"		    \
856 			STR(PTR)"\t7b, 11b\n\t"		    \
857 			STR(PTR)"\t8b, 11b\n\t"		    \
858 			".previous"			    \
859 		: "=&r" (res)			    	    \
860 		: "r" (value), "r" (addr), "i" (-EFAULT)    \
861 		: "memory");                                \
862 } while(0)
863 
864 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
865 #endif
866 
867 #define LoadHWU(addr, value, res)	_LoadHWU(addr, value, res, kernel)
868 #define LoadHWUE(addr, value, res)	_LoadHWU(addr, value, res, user)
869 #define LoadWU(addr, value, res)	_LoadWU(addr, value, res, kernel)
870 #define LoadWUE(addr, value, res)	_LoadWU(addr, value, res, user)
871 #define LoadHW(addr, value, res)	_LoadHW(addr, value, res, kernel)
872 #define LoadHWE(addr, value, res)	_LoadHW(addr, value, res, user)
873 #define LoadW(addr, value, res)		_LoadW(addr, value, res, kernel)
874 #define LoadWE(addr, value, res)	_LoadW(addr, value, res, user)
875 #define LoadDW(addr, value, res)	_LoadDW(addr, value, res)
876 
877 #define StoreHW(addr, value, res)	_StoreHW(addr, value, res, kernel)
878 #define StoreHWE(addr, value, res)	_StoreHW(addr, value, res, user)
879 #define StoreW(addr, value, res)	_StoreW(addr, value, res, kernel)
880 #define StoreWE(addr, value, res)	_StoreW(addr, value, res, user)
881 #define StoreDW(addr, value, res)	_StoreDW(addr, value, res)
882 
883 static void emulate_load_store_insn(struct pt_regs *regs,
884 	void __user *addr, unsigned int __user *pc)
885 {
886 	unsigned long origpc, orig31, value;
887 	union mips_instruction insn;
888 	unsigned int res;
889 #ifdef	CONFIG_EVA
890 	mm_segment_t seg;
891 #endif
892 	origpc = (unsigned long)pc;
893 	orig31 = regs->regs[31];
894 
895 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
896 
897 	/*
898 	 * This load never faults.
899 	 */
900 	__get_user(insn.word, pc);
901 
902 	switch (insn.i_format.opcode) {
903 		/*
904 		 * These are instructions that a compiler doesn't generate.  We
905 		 * can assume therefore that the code is MIPS-aware and
906 		 * really buggy.  Emulating these instructions would break the
907 		 * semantics anyway.
908 		 */
909 	case ll_op:
910 	case lld_op:
911 	case sc_op:
912 	case scd_op:
913 
914 		/*
915 		 * For these instructions the only way to create an address
916 		 * error is an attempted access to kernel/supervisor address
917 		 * space.
918 		 */
919 	case ldl_op:
920 	case ldr_op:
921 	case lwl_op:
922 	case lwr_op:
923 	case sdl_op:
924 	case sdr_op:
925 	case swl_op:
926 	case swr_op:
927 	case lb_op:
928 	case lbu_op:
929 	case sb_op:
930 		goto sigbus;
931 
932 		/*
933 		 * The remaining opcodes are the ones that are really of
934 		 * interest.
935 		 */
936 	case spec3_op:
937 		if (insn.dsp_format.func == lx_op) {
938 			switch (insn.dsp_format.op) {
939 			case lwx_op:
940 				if (!access_ok(addr, 4))
941 					goto sigbus;
942 				LoadW(addr, value, res);
943 				if (res)
944 					goto fault;
945 				compute_return_epc(regs);
946 				regs->regs[insn.dsp_format.rd] = value;
947 				break;
948 			case lhx_op:
949 				if (!access_ok(addr, 2))
950 					goto sigbus;
951 				LoadHW(addr, value, res);
952 				if (res)
953 					goto fault;
954 				compute_return_epc(regs);
955 				regs->regs[insn.dsp_format.rd] = value;
956 				break;
957 			default:
958 				goto sigill;
959 			}
960 		}
961 #ifdef CONFIG_EVA
962 		else {
963 			/*
964 			 * we can land here only from kernel accessing user
965 			 * memory, so we need to "switch" the address limit to
966 			 * user space, so that address check can work properly.
967 			 */
968 			seg = get_fs();
969 			set_fs(USER_DS);
970 			switch (insn.spec3_format.func) {
971 			case lhe_op:
972 				if (!access_ok(addr, 2)) {
973 					set_fs(seg);
974 					goto sigbus;
975 				}
976 				LoadHWE(addr, value, res);
977 				if (res) {
978 					set_fs(seg);
979 					goto fault;
980 				}
981 				compute_return_epc(regs);
982 				regs->regs[insn.spec3_format.rt] = value;
983 				break;
984 			case lwe_op:
985 				if (!access_ok(addr, 4)) {
986 					set_fs(seg);
987 					goto sigbus;
988 				}
989 				LoadWE(addr, value, res);
990 				if (res) {
991 					set_fs(seg);
992 					goto fault;
993 				}
994 				compute_return_epc(regs);
995 				regs->regs[insn.spec3_format.rt] = value;
996 				break;
997 			case lhue_op:
998 				if (!access_ok(addr, 2)) {
999 					set_fs(seg);
1000 					goto sigbus;
1001 				}
1002 				LoadHWUE(addr, value, res);
1003 				if (res) {
1004 					set_fs(seg);
1005 					goto fault;
1006 				}
1007 				compute_return_epc(regs);
1008 				regs->regs[insn.spec3_format.rt] = value;
1009 				break;
1010 			case she_op:
1011 				if (!access_ok(addr, 2)) {
1012 					set_fs(seg);
1013 					goto sigbus;
1014 				}
1015 				compute_return_epc(regs);
1016 				value = regs->regs[insn.spec3_format.rt];
1017 				StoreHWE(addr, value, res);
1018 				if (res) {
1019 					set_fs(seg);
1020 					goto fault;
1021 				}
1022 				break;
1023 			case swe_op:
1024 				if (!access_ok(addr, 4)) {
1025 					set_fs(seg);
1026 					goto sigbus;
1027 				}
1028 				compute_return_epc(regs);
1029 				value = regs->regs[insn.spec3_format.rt];
1030 				StoreWE(addr, value, res);
1031 				if (res) {
1032 					set_fs(seg);
1033 					goto fault;
1034 				}
1035 				break;
1036 			default:
1037 				set_fs(seg);
1038 				goto sigill;
1039 			}
1040 			set_fs(seg);
1041 		}
1042 #endif
1043 		break;
1044 	case lh_op:
1045 		if (!access_ok(addr, 2))
1046 			goto sigbus;
1047 
1048 		if (IS_ENABLED(CONFIG_EVA)) {
1049 			if (uaccess_kernel())
1050 				LoadHW(addr, value, res);
1051 			else
1052 				LoadHWE(addr, value, res);
1053 		} else {
1054 			LoadHW(addr, value, res);
1055 		}
1056 
1057 		if (res)
1058 			goto fault;
1059 		compute_return_epc(regs);
1060 		regs->regs[insn.i_format.rt] = value;
1061 		break;
1062 
1063 	case lw_op:
1064 		if (!access_ok(addr, 4))
1065 			goto sigbus;
1066 
1067 		if (IS_ENABLED(CONFIG_EVA)) {
1068 			if (uaccess_kernel())
1069 				LoadW(addr, value, res);
1070 			else
1071 				LoadWE(addr, value, res);
1072 		} else {
1073 			LoadW(addr, value, res);
1074 		}
1075 
1076 		if (res)
1077 			goto fault;
1078 		compute_return_epc(regs);
1079 		regs->regs[insn.i_format.rt] = value;
1080 		break;
1081 
1082 	case lhu_op:
1083 		if (!access_ok(addr, 2))
1084 			goto sigbus;
1085 
1086 		if (IS_ENABLED(CONFIG_EVA)) {
1087 			if (uaccess_kernel())
1088 				LoadHWU(addr, value, res);
1089 			else
1090 				LoadHWUE(addr, value, res);
1091 		} else {
1092 			LoadHWU(addr, value, res);
1093 		}
1094 
1095 		if (res)
1096 			goto fault;
1097 		compute_return_epc(regs);
1098 		regs->regs[insn.i_format.rt] = value;
1099 		break;
1100 
1101 	case lwu_op:
1102 #ifdef CONFIG_64BIT
1103 		/*
1104 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1105 		 * if we're on a 32-bit processor and an i-cache incoherency
1106 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1107 		 * would blow up, so for now we don't handle unaligned 64-bit
1108 		 * instructions on 32-bit kernels.
1109 		 */
1110 		if (!access_ok(addr, 4))
1111 			goto sigbus;
1112 
1113 		LoadWU(addr, value, res);
1114 		if (res)
1115 			goto fault;
1116 		compute_return_epc(regs);
1117 		regs->regs[insn.i_format.rt] = value;
1118 		break;
1119 #endif /* CONFIG_64BIT */
1120 
1121 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1122 		goto sigill;
1123 
1124 	case ld_op:
1125 #ifdef CONFIG_64BIT
1126 		/*
1127 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1128 		 * if we're on a 32-bit processor and an i-cache incoherency
1129 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1130 		 * would blow up, so for now we don't handle unaligned 64-bit
1131 		 * instructions on 32-bit kernels.
1132 		 */
1133 		if (!access_ok(addr, 8))
1134 			goto sigbus;
1135 
1136 		LoadDW(addr, value, res);
1137 		if (res)
1138 			goto fault;
1139 		compute_return_epc(regs);
1140 		regs->regs[insn.i_format.rt] = value;
1141 		break;
1142 #endif /* CONFIG_64BIT */
1143 
1144 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1145 		goto sigill;
1146 
1147 	case sh_op:
1148 		if (!access_ok(addr, 2))
1149 			goto sigbus;
1150 
1151 		compute_return_epc(regs);
1152 		value = regs->regs[insn.i_format.rt];
1153 
1154 		if (IS_ENABLED(CONFIG_EVA)) {
1155 			if (uaccess_kernel())
1156 				StoreHW(addr, value, res);
1157 			else
1158 				StoreHWE(addr, value, res);
1159 		} else {
1160 			StoreHW(addr, value, res);
1161 		}
1162 
1163 		if (res)
1164 			goto fault;
1165 		break;
1166 
1167 	case sw_op:
1168 		if (!access_ok(addr, 4))
1169 			goto sigbus;
1170 
1171 		compute_return_epc(regs);
1172 		value = regs->regs[insn.i_format.rt];
1173 
1174 		if (IS_ENABLED(CONFIG_EVA)) {
1175 			if (uaccess_kernel())
1176 				StoreW(addr, value, res);
1177 			else
1178 				StoreWE(addr, value, res);
1179 		} else {
1180 			StoreW(addr, value, res);
1181 		}
1182 
1183 		if (res)
1184 			goto fault;
1185 		break;
1186 
1187 	case sd_op:
1188 #ifdef CONFIG_64BIT
1189 		/*
1190 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1191 		 * if we're on a 32-bit processor and an i-cache incoherency
1192 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1193 		 * would blow up, so for now we don't handle unaligned 64-bit
1194 		 * instructions on 32-bit kernels.
1195 		 */
1196 		if (!access_ok(addr, 8))
1197 			goto sigbus;
1198 
1199 		compute_return_epc(regs);
1200 		value = regs->regs[insn.i_format.rt];
1201 		StoreDW(addr, value, res);
1202 		if (res)
1203 			goto fault;
1204 		break;
1205 #endif /* CONFIG_64BIT */
1206 
1207 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1208 		goto sigill;
1209 
1210 #ifdef CONFIG_MIPS_FP_SUPPORT
1211 
1212 	case lwc1_op:
1213 	case ldc1_op:
1214 	case swc1_op:
1215 	case sdc1_op:
1216 	case cop1x_op: {
1217 		void __user *fault_addr = NULL;
1218 
1219 		die_if_kernel("Unaligned FP access in kernel code", regs);
1220 		BUG_ON(!used_math());
1221 
1222 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1223 					       &fault_addr);
1224 		own_fpu(1);	/* Restore FPU state. */
1225 
1226 		/* Signal if something went wrong. */
1227 		process_fpemu_return(res, fault_addr, 0);
1228 
1229 		if (res == 0)
1230 			break;
1231 		return;
1232 	}
1233 #endif /* CONFIG_MIPS_FP_SUPPORT */
1234 
1235 #ifdef CONFIG_CPU_HAS_MSA
1236 
1237 	case msa_op: {
1238 		unsigned int wd, preempted;
1239 		enum msa_2b_fmt df;
1240 		union fpureg *fpr;
1241 
1242 		if (!cpu_has_msa)
1243 			goto sigill;
1244 
1245 		/*
1246 		 * If we've reached this point then userland should have taken
1247 		 * the MSA disabled exception & initialised vector context at
1248 		 * some point in the past.
1249 		 */
1250 		BUG_ON(!thread_msa_context_live());
1251 
1252 		df = insn.msa_mi10_format.df;
1253 		wd = insn.msa_mi10_format.wd;
1254 		fpr = &current->thread.fpu.fpr[wd];
1255 
1256 		switch (insn.msa_mi10_format.func) {
1257 		case msa_ld_op:
1258 			if (!access_ok(addr, sizeof(*fpr)))
1259 				goto sigbus;
1260 
1261 			do {
1262 				/*
1263 				 * If we have live MSA context keep track of
1264 				 * whether we get preempted in order to avoid
1265 				 * the register context we load being clobbered
1266 				 * by the live context as it's saved during
1267 				 * preemption. If we don't have live context
1268 				 * then it can't be saved to clobber the value
1269 				 * we load.
1270 				 */
1271 				preempted = test_thread_flag(TIF_USEDMSA);
1272 
1273 				res = __copy_from_user_inatomic(fpr, addr,
1274 								sizeof(*fpr));
1275 				if (res)
1276 					goto fault;
1277 
1278 				/*
1279 				 * Update the hardware register if it is in use
1280 				 * by the task in this quantum, in order to
1281 				 * avoid having to save & restore the whole
1282 				 * vector context.
1283 				 */
1284 				preempt_disable();
1285 				if (test_thread_flag(TIF_USEDMSA)) {
1286 					write_msa_wr(wd, fpr, df);
1287 					preempted = 0;
1288 				}
1289 				preempt_enable();
1290 			} while (preempted);
1291 			break;
1292 
1293 		case msa_st_op:
1294 			if (!access_ok(addr, sizeof(*fpr)))
1295 				goto sigbus;
1296 
1297 			/*
1298 			 * Update from the hardware register if it is in use by
1299 			 * the task in this quantum, in order to avoid having to
1300 			 * save & restore the whole vector context.
1301 			 */
1302 			preempt_disable();
1303 			if (test_thread_flag(TIF_USEDMSA))
1304 				read_msa_wr(wd, fpr, df);
1305 			preempt_enable();
1306 
1307 			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
1308 			if (res)
1309 				goto fault;
1310 			break;
1311 
1312 		default:
1313 			goto sigbus;
1314 		}
1315 
1316 		compute_return_epc(regs);
1317 		break;
1318 	}
1319 #endif /* CONFIG_CPU_HAS_MSA */
1320 
1321 #ifndef CONFIG_CPU_MIPSR6
1322 	/*
1323 	 * COP2 is available to implementor for application specific use.
1324 	 * It's up to applications to register a notifier chain and do
1325 	 * whatever they have to do, including possible sending of signals.
1326 	 *
1327 	 * This instruction has been reallocated in Release 6
1328 	 */
1329 	case lwc2_op:
1330 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1331 		break;
1332 
1333 	case ldc2_op:
1334 		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1335 		break;
1336 
1337 	case swc2_op:
1338 		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1339 		break;
1340 
1341 	case sdc2_op:
1342 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1343 		break;
1344 #endif
1345 	default:
1346 		/*
1347 		 * Pheeee...  We encountered an yet unknown instruction or
1348 		 * cache coherence problem.  Die sucker, die ...
1349 		 */
1350 		goto sigill;
1351 	}
1352 
1353 #ifdef CONFIG_DEBUG_FS
1354 	unaligned_instructions++;
1355 #endif
1356 
1357 	return;
1358 
1359 fault:
1360 	/* roll back jump/branch */
1361 	regs->cp0_epc = origpc;
1362 	regs->regs[31] = orig31;
1363 	/* Did we have an exception handler installed? */
1364 	if (fixup_exception(regs))
1365 		return;
1366 
1367 	die_if_kernel("Unhandled kernel unaligned access", regs);
1368 	force_sig(SIGSEGV);
1369 
1370 	return;
1371 
1372 sigbus:
1373 	die_if_kernel("Unhandled kernel unaligned access", regs);
1374 	force_sig(SIGBUS);
1375 
1376 	return;
1377 
1378 sigill:
1379 	die_if_kernel
1380 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1381 	force_sig(SIGILL);
1382 }
1383 
1384 /* Recode table from 16-bit register notation to 32-bit GPR. */
1385 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1386 
1387 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1388 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1389 
1390 static void emulate_load_store_microMIPS(struct pt_regs *regs,
1391 					 void __user *addr)
1392 {
1393 	unsigned long value;
1394 	unsigned int res;
1395 	int i;
1396 	unsigned int reg = 0, rvar;
1397 	unsigned long orig31;
1398 	u16 __user *pc16;
1399 	u16 halfword;
1400 	unsigned int word;
1401 	unsigned long origpc, contpc;
1402 	union mips_instruction insn;
1403 	struct mm_decoded_insn mminsn;
1404 
1405 	origpc = regs->cp0_epc;
1406 	orig31 = regs->regs[31];
1407 
1408 	mminsn.micro_mips_mode = 1;
1409 
1410 	/*
1411 	 * This load never faults.
1412 	 */
1413 	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1414 	__get_user(halfword, pc16);
1415 	pc16++;
1416 	contpc = regs->cp0_epc + 2;
1417 	word = ((unsigned int)halfword << 16);
1418 	mminsn.pc_inc = 2;
1419 
1420 	if (!mm_insn_16bit(halfword)) {
1421 		__get_user(halfword, pc16);
1422 		pc16++;
1423 		contpc = regs->cp0_epc + 4;
1424 		mminsn.pc_inc = 4;
1425 		word |= halfword;
1426 	}
1427 	mminsn.insn = word;
1428 
1429 	if (get_user(halfword, pc16))
1430 		goto fault;
1431 	mminsn.next_pc_inc = 2;
1432 	word = ((unsigned int)halfword << 16);
1433 
1434 	if (!mm_insn_16bit(halfword)) {
1435 		pc16++;
1436 		if (get_user(halfword, pc16))
1437 			goto fault;
1438 		mminsn.next_pc_inc = 4;
1439 		word |= halfword;
1440 	}
1441 	mminsn.next_insn = word;
1442 
1443 	insn = (union mips_instruction)(mminsn.insn);
1444 	if (mm_isBranchInstr(regs, mminsn, &contpc))
1445 		insn = (union mips_instruction)(mminsn.next_insn);
1446 
1447 	/*  Parse instruction to find what to do */
1448 
1449 	switch (insn.mm_i_format.opcode) {
1450 
1451 	case mm_pool32a_op:
1452 		switch (insn.mm_x_format.func) {
1453 		case mm_lwxs_op:
1454 			reg = insn.mm_x_format.rd;
1455 			goto loadW;
1456 		}
1457 
1458 		goto sigbus;
1459 
1460 	case mm_pool32b_op:
1461 		switch (insn.mm_m_format.func) {
1462 		case mm_lwp_func:
1463 			reg = insn.mm_m_format.rd;
1464 			if (reg == 31)
1465 				goto sigbus;
1466 
1467 			if (!access_ok(addr, 8))
1468 				goto sigbus;
1469 
1470 			LoadW(addr, value, res);
1471 			if (res)
1472 				goto fault;
1473 			regs->regs[reg] = value;
1474 			addr += 4;
1475 			LoadW(addr, value, res);
1476 			if (res)
1477 				goto fault;
1478 			regs->regs[reg + 1] = value;
1479 			goto success;
1480 
1481 		case mm_swp_func:
1482 			reg = insn.mm_m_format.rd;
1483 			if (reg == 31)
1484 				goto sigbus;
1485 
1486 			if (!access_ok(addr, 8))
1487 				goto sigbus;
1488 
1489 			value = regs->regs[reg];
1490 			StoreW(addr, value, res);
1491 			if (res)
1492 				goto fault;
1493 			addr += 4;
1494 			value = regs->regs[reg + 1];
1495 			StoreW(addr, value, res);
1496 			if (res)
1497 				goto fault;
1498 			goto success;
1499 
1500 		case mm_ldp_func:
1501 #ifdef CONFIG_64BIT
1502 			reg = insn.mm_m_format.rd;
1503 			if (reg == 31)
1504 				goto sigbus;
1505 
1506 			if (!access_ok(addr, 16))
1507 				goto sigbus;
1508 
1509 			LoadDW(addr, value, res);
1510 			if (res)
1511 				goto fault;
1512 			regs->regs[reg] = value;
1513 			addr += 8;
1514 			LoadDW(addr, value, res);
1515 			if (res)
1516 				goto fault;
1517 			regs->regs[reg + 1] = value;
1518 			goto success;
1519 #endif /* CONFIG_64BIT */
1520 
1521 			goto sigill;
1522 
1523 		case mm_sdp_func:
1524 #ifdef CONFIG_64BIT
1525 			reg = insn.mm_m_format.rd;
1526 			if (reg == 31)
1527 				goto sigbus;
1528 
1529 			if (!access_ok(addr, 16))
1530 				goto sigbus;
1531 
1532 			value = regs->regs[reg];
1533 			StoreDW(addr, value, res);
1534 			if (res)
1535 				goto fault;
1536 			addr += 8;
1537 			value = regs->regs[reg + 1];
1538 			StoreDW(addr, value, res);
1539 			if (res)
1540 				goto fault;
1541 			goto success;
1542 #endif /* CONFIG_64BIT */
1543 
1544 			goto sigill;
1545 
1546 		case mm_lwm32_func:
1547 			reg = insn.mm_m_format.rd;
1548 			rvar = reg & 0xf;
1549 			if ((rvar > 9) || !reg)
1550 				goto sigill;
1551 			if (reg & 0x10) {
1552 				if (!access_ok(addr, 4 * (rvar + 1)))
1553 					goto sigbus;
1554 			} else {
1555 				if (!access_ok(addr, 4 * rvar))
1556 					goto sigbus;
1557 			}
1558 			if (rvar == 9)
1559 				rvar = 8;
1560 			for (i = 16; rvar; rvar--, i++) {
1561 				LoadW(addr, value, res);
1562 				if (res)
1563 					goto fault;
1564 				addr += 4;
1565 				regs->regs[i] = value;
1566 			}
1567 			if ((reg & 0xf) == 9) {
1568 				LoadW(addr, value, res);
1569 				if (res)
1570 					goto fault;
1571 				addr += 4;
1572 				regs->regs[30] = value;
1573 			}
1574 			if (reg & 0x10) {
1575 				LoadW(addr, value, res);
1576 				if (res)
1577 					goto fault;
1578 				regs->regs[31] = value;
1579 			}
1580 			goto success;
1581 
1582 		case mm_swm32_func:
1583 			reg = insn.mm_m_format.rd;
1584 			rvar = reg & 0xf;
1585 			if ((rvar > 9) || !reg)
1586 				goto sigill;
1587 			if (reg & 0x10) {
1588 				if (!access_ok(addr, 4 * (rvar + 1)))
1589 					goto sigbus;
1590 			} else {
1591 				if (!access_ok(addr, 4 * rvar))
1592 					goto sigbus;
1593 			}
1594 			if (rvar == 9)
1595 				rvar = 8;
1596 			for (i = 16; rvar; rvar--, i++) {
1597 				value = regs->regs[i];
1598 				StoreW(addr, value, res);
1599 				if (res)
1600 					goto fault;
1601 				addr += 4;
1602 			}
1603 			if ((reg & 0xf) == 9) {
1604 				value = regs->regs[30];
1605 				StoreW(addr, value, res);
1606 				if (res)
1607 					goto fault;
1608 				addr += 4;
1609 			}
1610 			if (reg & 0x10) {
1611 				value = regs->regs[31];
1612 				StoreW(addr, value, res);
1613 				if (res)
1614 					goto fault;
1615 			}
1616 			goto success;
1617 
1618 		case mm_ldm_func:
1619 #ifdef CONFIG_64BIT
1620 			reg = insn.mm_m_format.rd;
1621 			rvar = reg & 0xf;
1622 			if ((rvar > 9) || !reg)
1623 				goto sigill;
1624 			if (reg & 0x10) {
1625 				if (!access_ok(addr, 8 * (rvar + 1)))
1626 					goto sigbus;
1627 			} else {
1628 				if (!access_ok(addr, 8 * rvar))
1629 					goto sigbus;
1630 			}
1631 			if (rvar == 9)
1632 				rvar = 8;
1633 
1634 			for (i = 16; rvar; rvar--, i++) {
1635 				LoadDW(addr, value, res);
1636 				if (res)
1637 					goto fault;
1638 				addr += 4;
1639 				regs->regs[i] = value;
1640 			}
1641 			if ((reg & 0xf) == 9) {
1642 				LoadDW(addr, value, res);
1643 				if (res)
1644 					goto fault;
1645 				addr += 8;
1646 				regs->regs[30] = value;
1647 			}
1648 			if (reg & 0x10) {
1649 				LoadDW(addr, value, res);
1650 				if (res)
1651 					goto fault;
1652 				regs->regs[31] = value;
1653 			}
1654 			goto success;
1655 #endif /* CONFIG_64BIT */
1656 
1657 			goto sigill;
1658 
1659 		case mm_sdm_func:
1660 #ifdef CONFIG_64BIT
1661 			reg = insn.mm_m_format.rd;
1662 			rvar = reg & 0xf;
1663 			if ((rvar > 9) || !reg)
1664 				goto sigill;
1665 			if (reg & 0x10) {
1666 				if (!access_ok(addr, 8 * (rvar + 1)))
1667 					goto sigbus;
1668 			} else {
1669 				if (!access_ok(addr, 8 * rvar))
1670 					goto sigbus;
1671 			}
1672 			if (rvar == 9)
1673 				rvar = 8;
1674 
1675 			for (i = 16; rvar; rvar--, i++) {
1676 				value = regs->regs[i];
1677 				StoreDW(addr, value, res);
1678 				if (res)
1679 					goto fault;
1680 				addr += 8;
1681 			}
1682 			if ((reg & 0xf) == 9) {
1683 				value = regs->regs[30];
1684 				StoreDW(addr, value, res);
1685 				if (res)
1686 					goto fault;
1687 				addr += 8;
1688 			}
1689 			if (reg & 0x10) {
1690 				value = regs->regs[31];
1691 				StoreDW(addr, value, res);
1692 				if (res)
1693 					goto fault;
1694 			}
1695 			goto success;
1696 #endif /* CONFIG_64BIT */
1697 
1698 			goto sigill;
1699 
1700 			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
1701 		}
1702 
1703 		goto sigbus;
1704 
1705 	case mm_pool32c_op:
1706 		switch (insn.mm_m_format.func) {
1707 		case mm_lwu_func:
1708 			reg = insn.mm_m_format.rd;
1709 			goto loadWU;
1710 		}
1711 
1712 		/*  LL,SC,LLD,SCD are not serviced */
1713 		goto sigbus;
1714 
1715 #ifdef CONFIG_MIPS_FP_SUPPORT
1716 	case mm_pool32f_op:
1717 		switch (insn.mm_x_format.func) {
1718 		case mm_lwxc1_func:
1719 		case mm_swxc1_func:
1720 		case mm_ldxc1_func:
1721 		case mm_sdxc1_func:
1722 			goto fpu_emul;
1723 		}
1724 
1725 		goto sigbus;
1726 
1727 	case mm_ldc132_op:
1728 	case mm_sdc132_op:
1729 	case mm_lwc132_op:
1730 	case mm_swc132_op: {
1731 		void __user *fault_addr = NULL;
1732 
1733 fpu_emul:
1734 		/* roll back jump/branch */
1735 		regs->cp0_epc = origpc;
1736 		regs->regs[31] = orig31;
1737 
1738 		die_if_kernel("Unaligned FP access in kernel code", regs);
1739 		BUG_ON(!used_math());
1740 		BUG_ON(!is_fpu_owner());
1741 
1742 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1743 					       &fault_addr);
1744 		own_fpu(1);	/* restore FPU state */
1745 
1746 		/* If something went wrong, signal */
1747 		process_fpemu_return(res, fault_addr, 0);
1748 
1749 		if (res == 0)
1750 			goto success;
1751 		return;
1752 	}
1753 #endif /* CONFIG_MIPS_FP_SUPPORT */
1754 
1755 	case mm_lh32_op:
1756 		reg = insn.mm_i_format.rt;
1757 		goto loadHW;
1758 
1759 	case mm_lhu32_op:
1760 		reg = insn.mm_i_format.rt;
1761 		goto loadHWU;
1762 
1763 	case mm_lw32_op:
1764 		reg = insn.mm_i_format.rt;
1765 		goto loadW;
1766 
1767 	case mm_sh32_op:
1768 		reg = insn.mm_i_format.rt;
1769 		goto storeHW;
1770 
1771 	case mm_sw32_op:
1772 		reg = insn.mm_i_format.rt;
1773 		goto storeW;
1774 
1775 	case mm_ld32_op:
1776 		reg = insn.mm_i_format.rt;
1777 		goto loadDW;
1778 
1779 	case mm_sd32_op:
1780 		reg = insn.mm_i_format.rt;
1781 		goto storeDW;
1782 
1783 	case mm_pool16c_op:
1784 		switch (insn.mm16_m_format.func) {
1785 		case mm_lwm16_op:
1786 			reg = insn.mm16_m_format.rlist;
1787 			rvar = reg + 1;
1788 			if (!access_ok(addr, 4 * rvar))
1789 				goto sigbus;
1790 
1791 			for (i = 16; rvar; rvar--, i++) {
1792 				LoadW(addr, value, res);
1793 				if (res)
1794 					goto fault;
1795 				addr += 4;
1796 				regs->regs[i] = value;
1797 			}
1798 			LoadW(addr, value, res);
1799 			if (res)
1800 				goto fault;
1801 			regs->regs[31] = value;
1802 
1803 			goto success;
1804 
1805 		case mm_swm16_op:
1806 			reg = insn.mm16_m_format.rlist;
1807 			rvar = reg + 1;
1808 			if (!access_ok(addr, 4 * rvar))
1809 				goto sigbus;
1810 
1811 			for (i = 16; rvar; rvar--, i++) {
1812 				value = regs->regs[i];
1813 				StoreW(addr, value, res);
1814 				if (res)
1815 					goto fault;
1816 				addr += 4;
1817 			}
1818 			value = regs->regs[31];
1819 			StoreW(addr, value, res);
1820 			if (res)
1821 				goto fault;
1822 
1823 			goto success;
1824 
1825 		}
1826 
1827 		goto sigbus;
1828 
1829 	case mm_lhu16_op:
1830 		reg = reg16to32[insn.mm16_rb_format.rt];
1831 		goto loadHWU;
1832 
1833 	case mm_lw16_op:
1834 		reg = reg16to32[insn.mm16_rb_format.rt];
1835 		goto loadW;
1836 
1837 	case mm_sh16_op:
1838 		reg = reg16to32st[insn.mm16_rb_format.rt];
1839 		goto storeHW;
1840 
1841 	case mm_sw16_op:
1842 		reg = reg16to32st[insn.mm16_rb_format.rt];
1843 		goto storeW;
1844 
1845 	case mm_lwsp16_op:
1846 		reg = insn.mm16_r5_format.rt;
1847 		goto loadW;
1848 
1849 	case mm_swsp16_op:
1850 		reg = insn.mm16_r5_format.rt;
1851 		goto storeW;
1852 
1853 	case mm_lwgp16_op:
1854 		reg = reg16to32[insn.mm16_r3_format.rt];
1855 		goto loadW;
1856 
1857 	default:
1858 		goto sigill;
1859 	}
1860 
1861 loadHW:
1862 	if (!access_ok(addr, 2))
1863 		goto sigbus;
1864 
1865 	LoadHW(addr, value, res);
1866 	if (res)
1867 		goto fault;
1868 	regs->regs[reg] = value;
1869 	goto success;
1870 
1871 loadHWU:
1872 	if (!access_ok(addr, 2))
1873 		goto sigbus;
1874 
1875 	LoadHWU(addr, value, res);
1876 	if (res)
1877 		goto fault;
1878 	regs->regs[reg] = value;
1879 	goto success;
1880 
1881 loadW:
1882 	if (!access_ok(addr, 4))
1883 		goto sigbus;
1884 
1885 	LoadW(addr, value, res);
1886 	if (res)
1887 		goto fault;
1888 	regs->regs[reg] = value;
1889 	goto success;
1890 
1891 loadWU:
1892 #ifdef CONFIG_64BIT
1893 	/*
1894 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1895 	 * if we're on a 32-bit processor and an i-cache incoherency
1896 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1897 	 * would blow up, so for now we don't handle unaligned 64-bit
1898 	 * instructions on 32-bit kernels.
1899 	 */
1900 	if (!access_ok(addr, 4))
1901 		goto sigbus;
1902 
1903 	LoadWU(addr, value, res);
1904 	if (res)
1905 		goto fault;
1906 	regs->regs[reg] = value;
1907 	goto success;
1908 #endif /* CONFIG_64BIT */
1909 
1910 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1911 	goto sigill;
1912 
1913 loadDW:
1914 #ifdef CONFIG_64BIT
1915 	/*
1916 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1917 	 * if we're on a 32-bit processor and an i-cache incoherency
1918 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1919 	 * would blow up, so for now we don't handle unaligned 64-bit
1920 	 * instructions on 32-bit kernels.
1921 	 */
1922 	if (!access_ok(addr, 8))
1923 		goto sigbus;
1924 
1925 	LoadDW(addr, value, res);
1926 	if (res)
1927 		goto fault;
1928 	regs->regs[reg] = value;
1929 	goto success;
1930 #endif /* CONFIG_64BIT */
1931 
1932 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1933 	goto sigill;
1934 
1935 storeHW:
1936 	if (!access_ok(addr, 2))
1937 		goto sigbus;
1938 
1939 	value = regs->regs[reg];
1940 	StoreHW(addr, value, res);
1941 	if (res)
1942 		goto fault;
1943 	goto success;
1944 
1945 storeW:
1946 	if (!access_ok(addr, 4))
1947 		goto sigbus;
1948 
1949 	value = regs->regs[reg];
1950 	StoreW(addr, value, res);
1951 	if (res)
1952 		goto fault;
1953 	goto success;
1954 
1955 storeDW:
1956 #ifdef CONFIG_64BIT
1957 	/*
1958 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1959 	 * if we're on a 32-bit processor and an i-cache incoherency
1960 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1961 	 * would blow up, so for now we don't handle unaligned 64-bit
1962 	 * instructions on 32-bit kernels.
1963 	 */
1964 	if (!access_ok(addr, 8))
1965 		goto sigbus;
1966 
1967 	value = regs->regs[reg];
1968 	StoreDW(addr, value, res);
1969 	if (res)
1970 		goto fault;
1971 	goto success;
1972 #endif /* CONFIG_64BIT */
1973 
1974 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1975 	goto sigill;
1976 
1977 success:
1978 	regs->cp0_epc = contpc;	/* advance or branch */
1979 
1980 #ifdef CONFIG_DEBUG_FS
1981 	unaligned_instructions++;
1982 #endif
1983 	return;
1984 
1985 fault:
1986 	/* roll back jump/branch */
1987 	regs->cp0_epc = origpc;
1988 	regs->regs[31] = orig31;
1989 	/* Did we have an exception handler installed? */
1990 	if (fixup_exception(regs))
1991 		return;
1992 
1993 	die_if_kernel("Unhandled kernel unaligned access", regs);
1994 	force_sig(SIGSEGV);
1995 
1996 	return;
1997 
1998 sigbus:
1999 	die_if_kernel("Unhandled kernel unaligned access", regs);
2000 	force_sig(SIGBUS);
2001 
2002 	return;
2003 
2004 sigill:
2005 	die_if_kernel
2006 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
2007 	force_sig(SIGILL);
2008 }
2009 
2010 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
2011 {
2012 	unsigned long value;
2013 	unsigned int res;
2014 	int reg;
2015 	unsigned long orig31;
2016 	u16 __user *pc16;
2017 	unsigned long origpc;
2018 	union mips16e_instruction mips16inst, oldinst;
2019 	unsigned int opcode;
2020 	int extended = 0;
2021 
2022 	origpc = regs->cp0_epc;
2023 	orig31 = regs->regs[31];
2024 	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
2025 	/*
2026 	 * This load never faults.
2027 	 */
2028 	__get_user(mips16inst.full, pc16);
2029 	oldinst = mips16inst;
2030 
2031 	/* skip EXTEND instruction */
2032 	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
2033 		extended = 1;
2034 		pc16++;
2035 		__get_user(mips16inst.full, pc16);
2036 	} else if (delay_slot(regs)) {
2037 		/*  skip jump instructions */
2038 		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
2039 		if (mips16inst.ri.opcode == MIPS16e_jal_op)
2040 			pc16++;
2041 		pc16++;
2042 		if (get_user(mips16inst.full, pc16))
2043 			goto sigbus;
2044 	}
2045 
2046 	opcode = mips16inst.ri.opcode;
2047 	switch (opcode) {
2048 	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
2049 		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
2050 		case MIPS16e_ldpc_func:
2051 		case MIPS16e_ldsp_func:
2052 			reg = reg16to32[mips16inst.ri64.ry];
2053 			goto loadDW;
2054 
2055 		case MIPS16e_sdsp_func:
2056 			reg = reg16to32[mips16inst.ri64.ry];
2057 			goto writeDW;
2058 
2059 		case MIPS16e_sdrasp_func:
2060 			reg = 29;	/* GPRSP */
2061 			goto writeDW;
2062 		}
2063 
2064 		goto sigbus;
2065 
2066 	case MIPS16e_swsp_op:
2067 		reg = reg16to32[mips16inst.ri.rx];
2068 		if (extended && cpu_has_mips16e2)
2069 			switch (mips16inst.ri.imm >> 5) {
2070 			case 0:		/* SWSP */
2071 			case 1:		/* SWGP */
2072 				break;
2073 			case 2:		/* SHGP */
2074 				opcode = MIPS16e_sh_op;
2075 				break;
2076 			default:
2077 				goto sigbus;
2078 			}
2079 		break;
2080 
2081 	case MIPS16e_lwpc_op:
2082 		reg = reg16to32[mips16inst.ri.rx];
2083 		break;
2084 
2085 	case MIPS16e_lwsp_op:
2086 		reg = reg16to32[mips16inst.ri.rx];
2087 		if (extended && cpu_has_mips16e2)
2088 			switch (mips16inst.ri.imm >> 5) {
2089 			case 0:		/* LWSP */
2090 			case 1:		/* LWGP */
2091 				break;
2092 			case 2:		/* LHGP */
2093 				opcode = MIPS16e_lh_op;
2094 				break;
2095 			case 4:		/* LHUGP */
2096 				opcode = MIPS16e_lhu_op;
2097 				break;
2098 			default:
2099 				goto sigbus;
2100 			}
2101 		break;
2102 
2103 	case MIPS16e_i8_op:
2104 		if (mips16inst.i8.func != MIPS16e_swrasp_func)
2105 			goto sigbus;
2106 		reg = 29;	/* GPRSP */
2107 		break;
2108 
2109 	default:
2110 		reg = reg16to32[mips16inst.rri.ry];
2111 		break;
2112 	}
2113 
2114 	switch (opcode) {
2115 
2116 	case MIPS16e_lb_op:
2117 	case MIPS16e_lbu_op:
2118 	case MIPS16e_sb_op:
2119 		goto sigbus;
2120 
2121 	case MIPS16e_lh_op:
2122 		if (!access_ok(addr, 2))
2123 			goto sigbus;
2124 
2125 		LoadHW(addr, value, res);
2126 		if (res)
2127 			goto fault;
2128 		MIPS16e_compute_return_epc(regs, &oldinst);
2129 		regs->regs[reg] = value;
2130 		break;
2131 
2132 	case MIPS16e_lhu_op:
2133 		if (!access_ok(addr, 2))
2134 			goto sigbus;
2135 
2136 		LoadHWU(addr, value, res);
2137 		if (res)
2138 			goto fault;
2139 		MIPS16e_compute_return_epc(regs, &oldinst);
2140 		regs->regs[reg] = value;
2141 		break;
2142 
2143 	case MIPS16e_lw_op:
2144 	case MIPS16e_lwpc_op:
2145 	case MIPS16e_lwsp_op:
2146 		if (!access_ok(addr, 4))
2147 			goto sigbus;
2148 
2149 		LoadW(addr, value, res);
2150 		if (res)
2151 			goto fault;
2152 		MIPS16e_compute_return_epc(regs, &oldinst);
2153 		regs->regs[reg] = value;
2154 		break;
2155 
2156 	case MIPS16e_lwu_op:
2157 #ifdef CONFIG_64BIT
2158 		/*
2159 		 * A 32-bit kernel might be running on a 64-bit processor.  But
2160 		 * if we're on a 32-bit processor and an i-cache incoherency
2161 		 * or race makes us see a 64-bit instruction here the sdl/sdr
2162 		 * would blow up, so for now we don't handle unaligned 64-bit
2163 		 * instructions on 32-bit kernels.
2164 		 */
2165 		if (!access_ok(addr, 4))
2166 			goto sigbus;
2167 
2168 		LoadWU(addr, value, res);
2169 		if (res)
2170 			goto fault;
2171 		MIPS16e_compute_return_epc(regs, &oldinst);
2172 		regs->regs[reg] = value;
2173 		break;
2174 #endif /* CONFIG_64BIT */
2175 
2176 		/* Cannot handle 64-bit instructions in 32-bit kernel */
2177 		goto sigill;
2178 
2179 	case MIPS16e_ld_op:
2180 loadDW:
2181 #ifdef CONFIG_64BIT
2182 		/*
2183 		 * A 32-bit kernel might be running on a 64-bit processor.  But
2184 		 * if we're on a 32-bit processor and an i-cache incoherency
2185 		 * or race makes us see a 64-bit instruction here the sdl/sdr
2186 		 * would blow up, so for now we don't handle unaligned 64-bit
2187 		 * instructions on 32-bit kernels.
2188 		 */
2189 		if (!access_ok(addr, 8))
2190 			goto sigbus;
2191 
2192 		LoadDW(addr, value, res);
2193 		if (res)
2194 			goto fault;
2195 		MIPS16e_compute_return_epc(regs, &oldinst);
2196 		regs->regs[reg] = value;
2197 		break;
2198 #endif /* CONFIG_64BIT */
2199 
2200 		/* Cannot handle 64-bit instructions in 32-bit kernel */
2201 		goto sigill;
2202 
2203 	case MIPS16e_sh_op:
2204 		if (!access_ok(addr, 2))
2205 			goto sigbus;
2206 
2207 		MIPS16e_compute_return_epc(regs, &oldinst);
2208 		value = regs->regs[reg];
2209 		StoreHW(addr, value, res);
2210 		if (res)
2211 			goto fault;
2212 		break;
2213 
2214 	case MIPS16e_sw_op:
2215 	case MIPS16e_swsp_op:
2216 	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
2217 		if (!access_ok(addr, 4))
2218 			goto sigbus;
2219 
2220 		MIPS16e_compute_return_epc(regs, &oldinst);
2221 		value = regs->regs[reg];
2222 		StoreW(addr, value, res);
2223 		if (res)
2224 			goto fault;
2225 		break;
2226 
2227 	case MIPS16e_sd_op:
2228 writeDW:
2229 #ifdef CONFIG_64BIT
2230 		/*
2231 		 * A 32-bit kernel might be running on a 64-bit processor.  But
2232 		 * if we're on a 32-bit processor and an i-cache incoherency
2233 		 * or race makes us see a 64-bit instruction here the sdl/sdr
2234 		 * would blow up, so for now we don't handle unaligned 64-bit
2235 		 * instructions on 32-bit kernels.
2236 		 */
2237 		if (!access_ok(addr, 8))
2238 			goto sigbus;
2239 
2240 		MIPS16e_compute_return_epc(regs, &oldinst);
2241 		value = regs->regs[reg];
2242 		StoreDW(addr, value, res);
2243 		if (res)
2244 			goto fault;
2245 		break;
2246 #endif /* CONFIG_64BIT */
2247 
2248 		/* Cannot handle 64-bit instructions in 32-bit kernel */
2249 		goto sigill;
2250 
2251 	default:
2252 		/*
2253 		 * Pheeee...  We encountered an yet unknown instruction or
2254 		 * cache coherence problem.  Die sucker, die ...
2255 		 */
2256 		goto sigill;
2257 	}
2258 
2259 #ifdef CONFIG_DEBUG_FS
2260 	unaligned_instructions++;
2261 #endif
2262 
2263 	return;
2264 
2265 fault:
2266 	/* roll back jump/branch */
2267 	regs->cp0_epc = origpc;
2268 	regs->regs[31] = orig31;
2269 	/* Did we have an exception handler installed? */
2270 	if (fixup_exception(regs))
2271 		return;
2272 
2273 	die_if_kernel("Unhandled kernel unaligned access", regs);
2274 	force_sig(SIGSEGV);
2275 
2276 	return;
2277 
2278 sigbus:
2279 	die_if_kernel("Unhandled kernel unaligned access", regs);
2280 	force_sig(SIGBUS);
2281 
2282 	return;
2283 
2284 sigill:
2285 	die_if_kernel
2286 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
2287 	force_sig(SIGILL);
2288 }
2289 
2290 asmlinkage void do_ade(struct pt_regs *regs)
2291 {
2292 	enum ctx_state prev_state;
2293 	unsigned int __user *pc;
2294 	mm_segment_t seg;
2295 
2296 	prev_state = exception_enter();
2297 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
2298 			1, regs, regs->cp0_badvaddr);
2299 	/*
2300 	 * Did we catch a fault trying to load an instruction?
2301 	 */
2302 	if (regs->cp0_badvaddr == regs->cp0_epc)
2303 		goto sigbus;
2304 
2305 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
2306 		goto sigbus;
2307 	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2308 		goto sigbus;
2309 
2310 	/*
2311 	 * Do branch emulation only if we didn't forward the exception.
2312 	 * This is all so but ugly ...
2313 	 */
2314 
2315 	/*
2316 	 * Are we running in microMIPS mode?
2317 	 */
2318 	if (get_isa16_mode(regs->cp0_epc)) {
2319 		/*
2320 		 * Did we catch a fault trying to load an instruction in
2321 		 * 16-bit mode?
2322 		 */
2323 		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2324 			goto sigbus;
2325 		if (unaligned_action == UNALIGNED_ACTION_SHOW)
2326 			show_registers(regs);
2327 
2328 		if (cpu_has_mmips) {
2329 			seg = get_fs();
2330 			if (!user_mode(regs))
2331 				set_fs(KERNEL_DS);
2332 			emulate_load_store_microMIPS(regs,
2333 				(void __user *)regs->cp0_badvaddr);
2334 			set_fs(seg);
2335 
2336 			return;
2337 		}
2338 
2339 		if (cpu_has_mips16) {
2340 			seg = get_fs();
2341 			if (!user_mode(regs))
2342 				set_fs(KERNEL_DS);
2343 			emulate_load_store_MIPS16e(regs,
2344 				(void __user *)regs->cp0_badvaddr);
2345 			set_fs(seg);
2346 
2347 			return;
2348 		}
2349 
2350 		goto sigbus;
2351 	}
2352 
2353 	if (unaligned_action == UNALIGNED_ACTION_SHOW)
2354 		show_registers(regs);
2355 	pc = (unsigned int __user *)exception_epc(regs);
2356 
2357 	seg = get_fs();
2358 	if (!user_mode(regs))
2359 		set_fs(KERNEL_DS);
2360 	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
2361 	set_fs(seg);
2362 
2363 	return;
2364 
2365 sigbus:
2366 	die_if_kernel("Kernel unaligned instruction access", regs);
2367 	force_sig(SIGBUS);
2368 
2369 	/*
2370 	 * XXX On return from the signal handler we should advance the epc
2371 	 */
2372 	exception_exit(prev_state);
2373 }
2374 
2375 #ifdef CONFIG_DEBUG_FS
2376 static int __init debugfs_unaligned(void)
2377 {
2378 	debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
2379 			   &unaligned_instructions);
2380 	debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2381 			   mips_debugfs_dir, &unaligned_action);
2382 	return 0;
2383 }
2384 arch_initcall(debugfs_unaligned);
2385 #endif
2386