xref: /openbmc/linux/arch/mips/kernel/unaligned.c (revision e23feb16)
1 /*
2  * Handle unaligned accesses by emulation.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  *
11  * This file contains exception handler for address error exception with the
12  * special capability to execute faulting instructions in software.  The
13  * handler does not try to handle the case when the program counter points
14  * to an address not aligned to a word boundary.
15  *
16  * Putting data to unaligned addresses is a bad practice even on Intel where
17  * only the performance is affected.  Much worse is that such code is non-
18  * portable.  Due to several programs that die on MIPS due to alignment
19  * problems I decided to implement this handler anyway though I originally
20  * didn't intend to do this at all for user code.
21  *
22  * For now I enable fixing of address errors by default to make life easier.
23  * I however intend to disable this somewhen in the future when the alignment
24  * problems with user programs have been fixed.	 For programmers this is the
25  * right way to go.
26  *
27  * Fixing address errors is a per process option.  The option is inherited
28  * across fork(2) and execve(2) calls.	If you really want to use the
29  * option in your user programs - I discourage the use of the software
30  * emulation strongly - use the following code in your userland stuff:
31  *
32  * #include <sys/sysmips.h>
33  *
34  * ...
35  * sysmips(MIPS_FIXADE, x);
36  * ...
37  *
38  * The argument x is 0 for disabling software emulation, enabled otherwise.
39  *
40  * Below a little program to play around with this feature.
41  *
42  * #include <stdio.h>
43  * #include <sys/sysmips.h>
44  *
45  * struct foo {
46  *	   unsigned char bar[8];
47  * };
48  *
49  * main(int argc, char *argv[])
50  * {
51  *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52  *	   unsigned int *p = (unsigned int *) (x.bar + 3);
53  *	   int i;
54  *
55  *	   if (argc > 1)
56  *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
57  *
58  *	   printf("*p = %08lx\n", *p);
59  *
60  *	   *p = 0xdeadface;
61  *
62  *	   for(i = 0; i <= 7; i++)
63  *	   printf("%02x ", x.bar[i]);
64  *	   printf("\n");
65  * }
66  *
67  * Coprocessor loads are not supported; I think this case is unimportant
68  * in the practice.
69  *
70  * TODO: Handle ndc (attempted store to doubleword in uncached memory)
71  *	 exception for the R6000.
72  *	 A store crossing a page boundary might be executed only partially.
73  *	 Undo the partial store in this case.
74  */
75 #include <linux/context_tracking.h>
76 #include <linux/mm.h>
77 #include <linux/signal.h>
78 #include <linux/smp.h>
79 #include <linux/sched.h>
80 #include <linux/debugfs.h>
81 #include <linux/perf_event.h>
82 
83 #include <asm/asm.h>
84 #include <asm/branch.h>
85 #include <asm/byteorder.h>
86 #include <asm/cop2.h>
87 #include <asm/fpu.h>
88 #include <asm/fpu_emulator.h>
89 #include <asm/inst.h>
90 #include <asm/uaccess.h>
91 #include <asm/fpu.h>
92 #include <asm/fpu_emulator.h>
93 
94 #define STR(x)	__STR(x)
95 #define __STR(x)  #x
96 
97 enum {
98 	UNALIGNED_ACTION_QUIET,
99 	UNALIGNED_ACTION_SIGNAL,
100 	UNALIGNED_ACTION_SHOW,
101 };
102 #ifdef CONFIG_DEBUG_FS
103 static u32 unaligned_instructions;
104 static u32 unaligned_action;
105 #else
106 #define unaligned_action UNALIGNED_ACTION_QUIET
107 #endif
108 extern void show_registers(struct pt_regs *regs);
109 
110 #ifdef __BIG_ENDIAN
111 #define     LoadHW(addr, value, res)  \
112 		__asm__ __volatile__ (".set\tnoat\n"        \
113 			"1:\tlb\t%0, 0(%2)\n"               \
114 			"2:\tlbu\t$1, 1(%2)\n\t"            \
115 			"sll\t%0, 0x8\n\t"                  \
116 			"or\t%0, $1\n\t"                    \
117 			"li\t%1, 0\n"                       \
118 			"3:\t.set\tat\n\t"                  \
119 			".insn\n\t"                         \
120 			".section\t.fixup,\"ax\"\n\t"       \
121 			"4:\tli\t%1, %3\n\t"                \
122 			"j\t3b\n\t"                         \
123 			".previous\n\t"                     \
124 			".section\t__ex_table,\"a\"\n\t"    \
125 			STR(PTR)"\t1b, 4b\n\t"              \
126 			STR(PTR)"\t2b, 4b\n\t"              \
127 			".previous"                         \
128 			: "=&r" (value), "=r" (res)         \
129 			: "r" (addr), "i" (-EFAULT));
130 
131 #define     LoadW(addr, value, res)   \
132 		__asm__ __volatile__ (                      \
133 			"1:\tlwl\t%0, (%2)\n"               \
134 			"2:\tlwr\t%0, 3(%2)\n\t"            \
135 			"li\t%1, 0\n"                       \
136 			"3:\n\t"                            \
137 			".insn\n\t"                         \
138 			".section\t.fixup,\"ax\"\n\t"       \
139 			"4:\tli\t%1, %3\n\t"                \
140 			"j\t3b\n\t"                         \
141 			".previous\n\t"                     \
142 			".section\t__ex_table,\"a\"\n\t"    \
143 			STR(PTR)"\t1b, 4b\n\t"              \
144 			STR(PTR)"\t2b, 4b\n\t"              \
145 			".previous"                         \
146 			: "=&r" (value), "=r" (res)         \
147 			: "r" (addr), "i" (-EFAULT));
148 
149 #define     LoadHWU(addr, value, res) \
150 		__asm__ __volatile__ (                      \
151 			".set\tnoat\n"                      \
152 			"1:\tlbu\t%0, 0(%2)\n"              \
153 			"2:\tlbu\t$1, 1(%2)\n\t"            \
154 			"sll\t%0, 0x8\n\t"                  \
155 			"or\t%0, $1\n\t"                    \
156 			"li\t%1, 0\n"                       \
157 			"3:\n\t"                            \
158 			".insn\n\t"                         \
159 			".set\tat\n\t"                      \
160 			".section\t.fixup,\"ax\"\n\t"       \
161 			"4:\tli\t%1, %3\n\t"                \
162 			"j\t3b\n\t"                         \
163 			".previous\n\t"                     \
164 			".section\t__ex_table,\"a\"\n\t"    \
165 			STR(PTR)"\t1b, 4b\n\t"              \
166 			STR(PTR)"\t2b, 4b\n\t"              \
167 			".previous"                         \
168 			: "=&r" (value), "=r" (res)         \
169 			: "r" (addr), "i" (-EFAULT));
170 
171 #define     LoadWU(addr, value, res)  \
172 		__asm__ __volatile__ (                      \
173 			"1:\tlwl\t%0, (%2)\n"               \
174 			"2:\tlwr\t%0, 3(%2)\n\t"            \
175 			"dsll\t%0, %0, 32\n\t"              \
176 			"dsrl\t%0, %0, 32\n\t"              \
177 			"li\t%1, 0\n"                       \
178 			"3:\n\t"                            \
179 			".insn\n\t"                         \
180 			"\t.section\t.fixup,\"ax\"\n\t"     \
181 			"4:\tli\t%1, %3\n\t"                \
182 			"j\t3b\n\t"                         \
183 			".previous\n\t"                     \
184 			".section\t__ex_table,\"a\"\n\t"    \
185 			STR(PTR)"\t1b, 4b\n\t"              \
186 			STR(PTR)"\t2b, 4b\n\t"              \
187 			".previous"                         \
188 			: "=&r" (value), "=r" (res)         \
189 			: "r" (addr), "i" (-EFAULT));
190 
191 #define     LoadDW(addr, value, res)  \
192 		__asm__ __volatile__ (                      \
193 			"1:\tldl\t%0, (%2)\n"               \
194 			"2:\tldr\t%0, 7(%2)\n\t"            \
195 			"li\t%1, 0\n"                       \
196 			"3:\n\t"                            \
197 			".insn\n\t"                         \
198 			"\t.section\t.fixup,\"ax\"\n\t"     \
199 			"4:\tli\t%1, %3\n\t"                \
200 			"j\t3b\n\t"                         \
201 			".previous\n\t"                     \
202 			".section\t__ex_table,\"a\"\n\t"    \
203 			STR(PTR)"\t1b, 4b\n\t"              \
204 			STR(PTR)"\t2b, 4b\n\t"              \
205 			".previous"                         \
206 			: "=&r" (value), "=r" (res)         \
207 			: "r" (addr), "i" (-EFAULT));
208 
209 #define     StoreHW(addr, value, res) \
210 		__asm__ __volatile__ (                      \
211 			".set\tnoat\n"                      \
212 			"1:\tsb\t%1, 1(%2)\n\t"             \
213 			"srl\t$1, %1, 0x8\n"                \
214 			"2:\tsb\t$1, 0(%2)\n\t"             \
215 			".set\tat\n\t"                      \
216 			"li\t%0, 0\n"                       \
217 			"3:\n\t"                            \
218 			".insn\n\t"                         \
219 			".section\t.fixup,\"ax\"\n\t"       \
220 			"4:\tli\t%0, %3\n\t"                \
221 			"j\t3b\n\t"                         \
222 			".previous\n\t"                     \
223 			".section\t__ex_table,\"a\"\n\t"    \
224 			STR(PTR)"\t1b, 4b\n\t"              \
225 			STR(PTR)"\t2b, 4b\n\t"              \
226 			".previous"                         \
227 			: "=r" (res)                        \
228 			: "r" (value), "r" (addr), "i" (-EFAULT));
229 
230 #define     StoreW(addr, value, res)  \
231 		__asm__ __volatile__ (                      \
232 			"1:\tswl\t%1,(%2)\n"                \
233 			"2:\tswr\t%1, 3(%2)\n\t"            \
234 			"li\t%0, 0\n"                       \
235 			"3:\n\t"                            \
236 			".insn\n\t"                         \
237 			".section\t.fixup,\"ax\"\n\t"       \
238 			"4:\tli\t%0, %3\n\t"                \
239 			"j\t3b\n\t"                         \
240 			".previous\n\t"                     \
241 			".section\t__ex_table,\"a\"\n\t"    \
242 			STR(PTR)"\t1b, 4b\n\t"              \
243 			STR(PTR)"\t2b, 4b\n\t"              \
244 			".previous"                         \
245 		: "=r" (res)                                \
246 		: "r" (value), "r" (addr), "i" (-EFAULT));
247 
248 #define     StoreDW(addr, value, res) \
249 		__asm__ __volatile__ (                      \
250 			"1:\tsdl\t%1,(%2)\n"                \
251 			"2:\tsdr\t%1, 7(%2)\n\t"            \
252 			"li\t%0, 0\n"                       \
253 			"3:\n\t"                            \
254 			".insn\n\t"                         \
255 			".section\t.fixup,\"ax\"\n\t"       \
256 			"4:\tli\t%0, %3\n\t"                \
257 			"j\t3b\n\t"                         \
258 			".previous\n\t"                     \
259 			".section\t__ex_table,\"a\"\n\t"    \
260 			STR(PTR)"\t1b, 4b\n\t"              \
261 			STR(PTR)"\t2b, 4b\n\t"              \
262 			".previous"                         \
263 		: "=r" (res)                                \
264 		: "r" (value), "r" (addr), "i" (-EFAULT));
265 #endif
266 
267 #ifdef __LITTLE_ENDIAN
268 #define     LoadHW(addr, value, res)  \
269 		__asm__ __volatile__ (".set\tnoat\n"        \
270 			"1:\tlb\t%0, 1(%2)\n"               \
271 			"2:\tlbu\t$1, 0(%2)\n\t"            \
272 			"sll\t%0, 0x8\n\t"                  \
273 			"or\t%0, $1\n\t"                    \
274 			"li\t%1, 0\n"                       \
275 			"3:\t.set\tat\n\t"                  \
276 			".insn\n\t"                         \
277 			".section\t.fixup,\"ax\"\n\t"       \
278 			"4:\tli\t%1, %3\n\t"                \
279 			"j\t3b\n\t"                         \
280 			".previous\n\t"                     \
281 			".section\t__ex_table,\"a\"\n\t"    \
282 			STR(PTR)"\t1b, 4b\n\t"              \
283 			STR(PTR)"\t2b, 4b\n\t"              \
284 			".previous"                         \
285 			: "=&r" (value), "=r" (res)         \
286 			: "r" (addr), "i" (-EFAULT));
287 
288 #define     LoadW(addr, value, res)   \
289 		__asm__ __volatile__ (                      \
290 			"1:\tlwl\t%0, 3(%2)\n"              \
291 			"2:\tlwr\t%0, (%2)\n\t"             \
292 			"li\t%1, 0\n"                       \
293 			"3:\n\t"                            \
294 			".insn\n\t"                         \
295 			".section\t.fixup,\"ax\"\n\t"       \
296 			"4:\tli\t%1, %3\n\t"                \
297 			"j\t3b\n\t"                         \
298 			".previous\n\t"                     \
299 			".section\t__ex_table,\"a\"\n\t"    \
300 			STR(PTR)"\t1b, 4b\n\t"              \
301 			STR(PTR)"\t2b, 4b\n\t"              \
302 			".previous"                         \
303 			: "=&r" (value), "=r" (res)         \
304 			: "r" (addr), "i" (-EFAULT));
305 
306 #define     LoadHWU(addr, value, res) \
307 		__asm__ __volatile__ (                      \
308 			".set\tnoat\n"                      \
309 			"1:\tlbu\t%0, 1(%2)\n"              \
310 			"2:\tlbu\t$1, 0(%2)\n\t"            \
311 			"sll\t%0, 0x8\n\t"                  \
312 			"or\t%0, $1\n\t"                    \
313 			"li\t%1, 0\n"                       \
314 			"3:\n\t"                            \
315 			".insn\n\t"                         \
316 			".set\tat\n\t"                      \
317 			".section\t.fixup,\"ax\"\n\t"       \
318 			"4:\tli\t%1, %3\n\t"                \
319 			"j\t3b\n\t"                         \
320 			".previous\n\t"                     \
321 			".section\t__ex_table,\"a\"\n\t"    \
322 			STR(PTR)"\t1b, 4b\n\t"              \
323 			STR(PTR)"\t2b, 4b\n\t"              \
324 			".previous"                         \
325 			: "=&r" (value), "=r" (res)         \
326 			: "r" (addr), "i" (-EFAULT));
327 
328 #define     LoadWU(addr, value, res)  \
329 		__asm__ __volatile__ (                      \
330 			"1:\tlwl\t%0, 3(%2)\n"              \
331 			"2:\tlwr\t%0, (%2)\n\t"             \
332 			"dsll\t%0, %0, 32\n\t"              \
333 			"dsrl\t%0, %0, 32\n\t"              \
334 			"li\t%1, 0\n"                       \
335 			"3:\n\t"                            \
336 			".insn\n\t"                         \
337 			"\t.section\t.fixup,\"ax\"\n\t"     \
338 			"4:\tli\t%1, %3\n\t"                \
339 			"j\t3b\n\t"                         \
340 			".previous\n\t"                     \
341 			".section\t__ex_table,\"a\"\n\t"    \
342 			STR(PTR)"\t1b, 4b\n\t"              \
343 			STR(PTR)"\t2b, 4b\n\t"              \
344 			".previous"                         \
345 			: "=&r" (value), "=r" (res)         \
346 			: "r" (addr), "i" (-EFAULT));
347 
348 #define     LoadDW(addr, value, res)  \
349 		__asm__ __volatile__ (                      \
350 			"1:\tldl\t%0, 7(%2)\n"              \
351 			"2:\tldr\t%0, (%2)\n\t"             \
352 			"li\t%1, 0\n"                       \
353 			"3:\n\t"                            \
354 			".insn\n\t"                         \
355 			"\t.section\t.fixup,\"ax\"\n\t"     \
356 			"4:\tli\t%1, %3\n\t"                \
357 			"j\t3b\n\t"                         \
358 			".previous\n\t"                     \
359 			".section\t__ex_table,\"a\"\n\t"    \
360 			STR(PTR)"\t1b, 4b\n\t"              \
361 			STR(PTR)"\t2b, 4b\n\t"              \
362 			".previous"                         \
363 			: "=&r" (value), "=r" (res)         \
364 			: "r" (addr), "i" (-EFAULT));
365 
366 #define     StoreHW(addr, value, res) \
367 		__asm__ __volatile__ (                      \
368 			".set\tnoat\n"                      \
369 			"1:\tsb\t%1, 0(%2)\n\t"             \
370 			"srl\t$1,%1, 0x8\n"                 \
371 			"2:\tsb\t$1, 1(%2)\n\t"             \
372 			".set\tat\n\t"                      \
373 			"li\t%0, 0\n"                       \
374 			"3:\n\t"                            \
375 			".insn\n\t"                         \
376 			".section\t.fixup,\"ax\"\n\t"       \
377 			"4:\tli\t%0, %3\n\t"                \
378 			"j\t3b\n\t"                         \
379 			".previous\n\t"                     \
380 			".section\t__ex_table,\"a\"\n\t"    \
381 			STR(PTR)"\t1b, 4b\n\t"              \
382 			STR(PTR)"\t2b, 4b\n\t"              \
383 			".previous"                         \
384 			: "=r" (res)                        \
385 			: "r" (value), "r" (addr), "i" (-EFAULT));
386 
387 #define     StoreW(addr, value, res)  \
388 		__asm__ __volatile__ (                      \
389 			"1:\tswl\t%1, 3(%2)\n"              \
390 			"2:\tswr\t%1, (%2)\n\t"             \
391 			"li\t%0, 0\n"                       \
392 			"3:\n\t"                            \
393 			".insn\n\t"                         \
394 			".section\t.fixup,\"ax\"\n\t"       \
395 			"4:\tli\t%0, %3\n\t"                \
396 			"j\t3b\n\t"                         \
397 			".previous\n\t"                     \
398 			".section\t__ex_table,\"a\"\n\t"    \
399 			STR(PTR)"\t1b, 4b\n\t"              \
400 			STR(PTR)"\t2b, 4b\n\t"              \
401 			".previous"                         \
402 		: "=r" (res)                                \
403 		: "r" (value), "r" (addr), "i" (-EFAULT));
404 
405 #define     StoreDW(addr, value, res) \
406 		__asm__ __volatile__ (                      \
407 			"1:\tsdl\t%1, 7(%2)\n"              \
408 			"2:\tsdr\t%1, (%2)\n\t"             \
409 			"li\t%0, 0\n"                       \
410 			"3:\n\t"                            \
411 			".insn\n\t"                         \
412 			".section\t.fixup,\"ax\"\n\t"       \
413 			"4:\tli\t%0, %3\n\t"                \
414 			"j\t3b\n\t"                         \
415 			".previous\n\t"                     \
416 			".section\t__ex_table,\"a\"\n\t"    \
417 			STR(PTR)"\t1b, 4b\n\t"              \
418 			STR(PTR)"\t2b, 4b\n\t"              \
419 			".previous"                         \
420 		: "=r" (res)                                \
421 		: "r" (value), "r" (addr), "i" (-EFAULT));
422 #endif
423 
424 static void emulate_load_store_insn(struct pt_regs *regs,
425 	void __user *addr, unsigned int __user *pc)
426 {
427 	union mips_instruction insn;
428 	unsigned long value;
429 	unsigned int res;
430 	unsigned long origpc;
431 	unsigned long orig31;
432 	void __user *fault_addr = NULL;
433 
434 	origpc = (unsigned long)pc;
435 	orig31 = regs->regs[31];
436 
437 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
438 
439 	/*
440 	 * This load never faults.
441 	 */
442 	__get_user(insn.word, pc);
443 
444 	switch (insn.i_format.opcode) {
445 		/*
446 		 * These are instructions that a compiler doesn't generate.  We
447 		 * can assume therefore that the code is MIPS-aware and
448 		 * really buggy.  Emulating these instructions would break the
449 		 * semantics anyway.
450 		 */
451 	case ll_op:
452 	case lld_op:
453 	case sc_op:
454 	case scd_op:
455 
456 		/*
457 		 * For these instructions the only way to create an address
458 		 * error is an attempted access to kernel/supervisor address
459 		 * space.
460 		 */
461 	case ldl_op:
462 	case ldr_op:
463 	case lwl_op:
464 	case lwr_op:
465 	case sdl_op:
466 	case sdr_op:
467 	case swl_op:
468 	case swr_op:
469 	case lb_op:
470 	case lbu_op:
471 	case sb_op:
472 		goto sigbus;
473 
474 		/*
475 		 * The remaining opcodes are the ones that are really of
476 		 * interest.
477 		 */
478 	case lh_op:
479 		if (!access_ok(VERIFY_READ, addr, 2))
480 			goto sigbus;
481 
482 		LoadHW(addr, value, res);
483 		if (res)
484 			goto fault;
485 		compute_return_epc(regs);
486 		regs->regs[insn.i_format.rt] = value;
487 		break;
488 
489 	case lw_op:
490 		if (!access_ok(VERIFY_READ, addr, 4))
491 			goto sigbus;
492 
493 		LoadW(addr, value, res);
494 		if (res)
495 			goto fault;
496 		compute_return_epc(regs);
497 		regs->regs[insn.i_format.rt] = value;
498 		break;
499 
500 	case lhu_op:
501 		if (!access_ok(VERIFY_READ, addr, 2))
502 			goto sigbus;
503 
504 		LoadHWU(addr, value, res);
505 		if (res)
506 			goto fault;
507 		compute_return_epc(regs);
508 		regs->regs[insn.i_format.rt] = value;
509 		break;
510 
511 	case lwu_op:
512 #ifdef CONFIG_64BIT
513 		/*
514 		 * A 32-bit kernel might be running on a 64-bit processor.  But
515 		 * if we're on a 32-bit processor and an i-cache incoherency
516 		 * or race makes us see a 64-bit instruction here the sdl/sdr
517 		 * would blow up, so for now we don't handle unaligned 64-bit
518 		 * instructions on 32-bit kernels.
519 		 */
520 		if (!access_ok(VERIFY_READ, addr, 4))
521 			goto sigbus;
522 
523 		LoadWU(addr, value, res);
524 		if (res)
525 			goto fault;
526 		compute_return_epc(regs);
527 		regs->regs[insn.i_format.rt] = value;
528 		break;
529 #endif /* CONFIG_64BIT */
530 
531 		/* Cannot handle 64-bit instructions in 32-bit kernel */
532 		goto sigill;
533 
534 	case ld_op:
535 #ifdef CONFIG_64BIT
536 		/*
537 		 * A 32-bit kernel might be running on a 64-bit processor.  But
538 		 * if we're on a 32-bit processor and an i-cache incoherency
539 		 * or race makes us see a 64-bit instruction here the sdl/sdr
540 		 * would blow up, so for now we don't handle unaligned 64-bit
541 		 * instructions on 32-bit kernels.
542 		 */
543 		if (!access_ok(VERIFY_READ, addr, 8))
544 			goto sigbus;
545 
546 		LoadDW(addr, value, res);
547 		if (res)
548 			goto fault;
549 		compute_return_epc(regs);
550 		regs->regs[insn.i_format.rt] = value;
551 		break;
552 #endif /* CONFIG_64BIT */
553 
554 		/* Cannot handle 64-bit instructions in 32-bit kernel */
555 		goto sigill;
556 
557 	case sh_op:
558 		if (!access_ok(VERIFY_WRITE, addr, 2))
559 			goto sigbus;
560 
561 		compute_return_epc(regs);
562 		value = regs->regs[insn.i_format.rt];
563 		StoreHW(addr, value, res);
564 		if (res)
565 			goto fault;
566 		break;
567 
568 	case sw_op:
569 		if (!access_ok(VERIFY_WRITE, addr, 4))
570 			goto sigbus;
571 
572 		compute_return_epc(regs);
573 		value = regs->regs[insn.i_format.rt];
574 		StoreW(addr, value, res);
575 		if (res)
576 			goto fault;
577 		break;
578 
579 	case sd_op:
580 #ifdef CONFIG_64BIT
581 		/*
582 		 * A 32-bit kernel might be running on a 64-bit processor.  But
583 		 * if we're on a 32-bit processor and an i-cache incoherency
584 		 * or race makes us see a 64-bit instruction here the sdl/sdr
585 		 * would blow up, so for now we don't handle unaligned 64-bit
586 		 * instructions on 32-bit kernels.
587 		 */
588 		if (!access_ok(VERIFY_WRITE, addr, 8))
589 			goto sigbus;
590 
591 		compute_return_epc(regs);
592 		value = regs->regs[insn.i_format.rt];
593 		StoreDW(addr, value, res);
594 		if (res)
595 			goto fault;
596 		break;
597 #endif /* CONFIG_64BIT */
598 
599 		/* Cannot handle 64-bit instructions in 32-bit kernel */
600 		goto sigill;
601 
602 	case lwc1_op:
603 	case ldc1_op:
604 	case swc1_op:
605 	case sdc1_op:
606 		die_if_kernel("Unaligned FP access in kernel code", regs);
607 		BUG_ON(!used_math());
608 		BUG_ON(!is_fpu_owner());
609 
610 		lose_fpu(1);	/* Save FPU state for the emulator. */
611 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
612 					       &fault_addr);
613 		own_fpu(1);	/* Restore FPU state. */
614 
615 		/* Signal if something went wrong. */
616 		process_fpemu_return(res, fault_addr);
617 
618 		if (res == 0)
619 			break;
620 		return;
621 
622 	/*
623 	 * COP2 is available to implementor for application specific use.
624 	 * It's up to applications to register a notifier chain and do
625 	 * whatever they have to do, including possible sending of signals.
626 	 */
627 	case lwc2_op:
628 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
629 		break;
630 
631 	case ldc2_op:
632 		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
633 		break;
634 
635 	case swc2_op:
636 		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
637 		break;
638 
639 	case sdc2_op:
640 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
641 		break;
642 
643 	default:
644 		/*
645 		 * Pheeee...  We encountered an yet unknown instruction or
646 		 * cache coherence problem.  Die sucker, die ...
647 		 */
648 		goto sigill;
649 	}
650 
651 #ifdef CONFIG_DEBUG_FS
652 	unaligned_instructions++;
653 #endif
654 
655 	return;
656 
657 fault:
658 	/* roll back jump/branch */
659 	regs->cp0_epc = origpc;
660 	regs->regs[31] = orig31;
661 	/* Did we have an exception handler installed? */
662 	if (fixup_exception(regs))
663 		return;
664 
665 	die_if_kernel("Unhandled kernel unaligned access", regs);
666 	force_sig(SIGSEGV, current);
667 
668 	return;
669 
670 sigbus:
671 	die_if_kernel("Unhandled kernel unaligned access", regs);
672 	force_sig(SIGBUS, current);
673 
674 	return;
675 
676 sigill:
677 	die_if_kernel
678 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
679 	force_sig(SIGILL, current);
680 }
681 
682 /* Recode table from 16-bit register notation to 32-bit GPR. */
683 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
684 
685 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
686 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
687 
688 static void emulate_load_store_microMIPS(struct pt_regs *regs,
689 					 void __user *addr)
690 {
691 	unsigned long value;
692 	unsigned int res;
693 	int i;
694 	unsigned int reg = 0, rvar;
695 	unsigned long orig31;
696 	u16 __user *pc16;
697 	u16 halfword;
698 	unsigned int word;
699 	unsigned long origpc, contpc;
700 	union mips_instruction insn;
701 	struct mm_decoded_insn mminsn;
702 	void __user *fault_addr = NULL;
703 
704 	origpc = regs->cp0_epc;
705 	orig31 = regs->regs[31];
706 
707 	mminsn.micro_mips_mode = 1;
708 
709 	/*
710 	 * This load never faults.
711 	 */
712 	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
713 	__get_user(halfword, pc16);
714 	pc16++;
715 	contpc = regs->cp0_epc + 2;
716 	word = ((unsigned int)halfword << 16);
717 	mminsn.pc_inc = 2;
718 
719 	if (!mm_insn_16bit(halfword)) {
720 		__get_user(halfword, pc16);
721 		pc16++;
722 		contpc = regs->cp0_epc + 4;
723 		mminsn.pc_inc = 4;
724 		word |= halfword;
725 	}
726 	mminsn.insn = word;
727 
728 	if (get_user(halfword, pc16))
729 		goto fault;
730 	mminsn.next_pc_inc = 2;
731 	word = ((unsigned int)halfword << 16);
732 
733 	if (!mm_insn_16bit(halfword)) {
734 		pc16++;
735 		if (get_user(halfword, pc16))
736 			goto fault;
737 		mminsn.next_pc_inc = 4;
738 		word |= halfword;
739 	}
740 	mminsn.next_insn = word;
741 
742 	insn = (union mips_instruction)(mminsn.insn);
743 	if (mm_isBranchInstr(regs, mminsn, &contpc))
744 		insn = (union mips_instruction)(mminsn.next_insn);
745 
746 	/*  Parse instruction to find what to do */
747 
748 	switch (insn.mm_i_format.opcode) {
749 
750 	case mm_pool32a_op:
751 		switch (insn.mm_x_format.func) {
752 		case mm_lwxs_op:
753 			reg = insn.mm_x_format.rd;
754 			goto loadW;
755 		}
756 
757 		goto sigbus;
758 
759 	case mm_pool32b_op:
760 		switch (insn.mm_m_format.func) {
761 		case mm_lwp_func:
762 			reg = insn.mm_m_format.rd;
763 			if (reg == 31)
764 				goto sigbus;
765 
766 			if (!access_ok(VERIFY_READ, addr, 8))
767 				goto sigbus;
768 
769 			LoadW(addr, value, res);
770 			if (res)
771 				goto fault;
772 			regs->regs[reg] = value;
773 			addr += 4;
774 			LoadW(addr, value, res);
775 			if (res)
776 				goto fault;
777 			regs->regs[reg + 1] = value;
778 			goto success;
779 
780 		case mm_swp_func:
781 			reg = insn.mm_m_format.rd;
782 			if (reg == 31)
783 				goto sigbus;
784 
785 			if (!access_ok(VERIFY_WRITE, addr, 8))
786 				goto sigbus;
787 
788 			value = regs->regs[reg];
789 			StoreW(addr, value, res);
790 			if (res)
791 				goto fault;
792 			addr += 4;
793 			value = regs->regs[reg + 1];
794 			StoreW(addr, value, res);
795 			if (res)
796 				goto fault;
797 			goto success;
798 
799 		case mm_ldp_func:
800 #ifdef CONFIG_64BIT
801 			reg = insn.mm_m_format.rd;
802 			if (reg == 31)
803 				goto sigbus;
804 
805 			if (!access_ok(VERIFY_READ, addr, 16))
806 				goto sigbus;
807 
808 			LoadDW(addr, value, res);
809 			if (res)
810 				goto fault;
811 			regs->regs[reg] = value;
812 			addr += 8;
813 			LoadDW(addr, value, res);
814 			if (res)
815 				goto fault;
816 			regs->regs[reg + 1] = value;
817 			goto success;
818 #endif /* CONFIG_64BIT */
819 
820 			goto sigill;
821 
822 		case mm_sdp_func:
823 #ifdef CONFIG_64BIT
824 			reg = insn.mm_m_format.rd;
825 			if (reg == 31)
826 				goto sigbus;
827 
828 			if (!access_ok(VERIFY_WRITE, addr, 16))
829 				goto sigbus;
830 
831 			value = regs->regs[reg];
832 			StoreDW(addr, value, res);
833 			if (res)
834 				goto fault;
835 			addr += 8;
836 			value = regs->regs[reg + 1];
837 			StoreDW(addr, value, res);
838 			if (res)
839 				goto fault;
840 			goto success;
841 #endif /* CONFIG_64BIT */
842 
843 			goto sigill;
844 
845 		case mm_lwm32_func:
846 			reg = insn.mm_m_format.rd;
847 			rvar = reg & 0xf;
848 			if ((rvar > 9) || !reg)
849 				goto sigill;
850 			if (reg & 0x10) {
851 				if (!access_ok
852 				    (VERIFY_READ, addr, 4 * (rvar + 1)))
853 					goto sigbus;
854 			} else {
855 				if (!access_ok(VERIFY_READ, addr, 4 * rvar))
856 					goto sigbus;
857 			}
858 			if (rvar == 9)
859 				rvar = 8;
860 			for (i = 16; rvar; rvar--, i++) {
861 				LoadW(addr, value, res);
862 				if (res)
863 					goto fault;
864 				addr += 4;
865 				regs->regs[i] = value;
866 			}
867 			if ((reg & 0xf) == 9) {
868 				LoadW(addr, value, res);
869 				if (res)
870 					goto fault;
871 				addr += 4;
872 				regs->regs[30] = value;
873 			}
874 			if (reg & 0x10) {
875 				LoadW(addr, value, res);
876 				if (res)
877 					goto fault;
878 				regs->regs[31] = value;
879 			}
880 			goto success;
881 
882 		case mm_swm32_func:
883 			reg = insn.mm_m_format.rd;
884 			rvar = reg & 0xf;
885 			if ((rvar > 9) || !reg)
886 				goto sigill;
887 			if (reg & 0x10) {
888 				if (!access_ok
889 				    (VERIFY_WRITE, addr, 4 * (rvar + 1)))
890 					goto sigbus;
891 			} else {
892 				if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
893 					goto sigbus;
894 			}
895 			if (rvar == 9)
896 				rvar = 8;
897 			for (i = 16; rvar; rvar--, i++) {
898 				value = regs->regs[i];
899 				StoreW(addr, value, res);
900 				if (res)
901 					goto fault;
902 				addr += 4;
903 			}
904 			if ((reg & 0xf) == 9) {
905 				value = regs->regs[30];
906 				StoreW(addr, value, res);
907 				if (res)
908 					goto fault;
909 				addr += 4;
910 			}
911 			if (reg & 0x10) {
912 				value = regs->regs[31];
913 				StoreW(addr, value, res);
914 				if (res)
915 					goto fault;
916 			}
917 			goto success;
918 
919 		case mm_ldm_func:
920 #ifdef CONFIG_64BIT
921 			reg = insn.mm_m_format.rd;
922 			rvar = reg & 0xf;
923 			if ((rvar > 9) || !reg)
924 				goto sigill;
925 			if (reg & 0x10) {
926 				if (!access_ok
927 				    (VERIFY_READ, addr, 8 * (rvar + 1)))
928 					goto sigbus;
929 			} else {
930 				if (!access_ok(VERIFY_READ, addr, 8 * rvar))
931 					goto sigbus;
932 			}
933 			if (rvar == 9)
934 				rvar = 8;
935 
936 			for (i = 16; rvar; rvar--, i++) {
937 				LoadDW(addr, value, res);
938 				if (res)
939 					goto fault;
940 				addr += 4;
941 				regs->regs[i] = value;
942 			}
943 			if ((reg & 0xf) == 9) {
944 				LoadDW(addr, value, res);
945 				if (res)
946 					goto fault;
947 				addr += 8;
948 				regs->regs[30] = value;
949 			}
950 			if (reg & 0x10) {
951 				LoadDW(addr, value, res);
952 				if (res)
953 					goto fault;
954 				regs->regs[31] = value;
955 			}
956 			goto success;
957 #endif /* CONFIG_64BIT */
958 
959 			goto sigill;
960 
961 		case mm_sdm_func:
962 #ifdef CONFIG_64BIT
963 			reg = insn.mm_m_format.rd;
964 			rvar = reg & 0xf;
965 			if ((rvar > 9) || !reg)
966 				goto sigill;
967 			if (reg & 0x10) {
968 				if (!access_ok
969 				    (VERIFY_WRITE, addr, 8 * (rvar + 1)))
970 					goto sigbus;
971 			} else {
972 				if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
973 					goto sigbus;
974 			}
975 			if (rvar == 9)
976 				rvar = 8;
977 
978 			for (i = 16; rvar; rvar--, i++) {
979 				value = regs->regs[i];
980 				StoreDW(addr, value, res);
981 				if (res)
982 					goto fault;
983 				addr += 8;
984 			}
985 			if ((reg & 0xf) == 9) {
986 				value = regs->regs[30];
987 				StoreDW(addr, value, res);
988 				if (res)
989 					goto fault;
990 				addr += 8;
991 			}
992 			if (reg & 0x10) {
993 				value = regs->regs[31];
994 				StoreDW(addr, value, res);
995 				if (res)
996 					goto fault;
997 			}
998 			goto success;
999 #endif /* CONFIG_64BIT */
1000 
1001 			goto sigill;
1002 
1003 			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
1004 		}
1005 
1006 		goto sigbus;
1007 
1008 	case mm_pool32c_op:
1009 		switch (insn.mm_m_format.func) {
1010 		case mm_lwu_func:
1011 			reg = insn.mm_m_format.rd;
1012 			goto loadWU;
1013 		}
1014 
1015 		/*  LL,SC,LLD,SCD are not serviced */
1016 		goto sigbus;
1017 
1018 	case mm_pool32f_op:
1019 		switch (insn.mm_x_format.func) {
1020 		case mm_lwxc1_func:
1021 		case mm_swxc1_func:
1022 		case mm_ldxc1_func:
1023 		case mm_sdxc1_func:
1024 			goto fpu_emul;
1025 		}
1026 
1027 		goto sigbus;
1028 
1029 	case mm_ldc132_op:
1030 	case mm_sdc132_op:
1031 	case mm_lwc132_op:
1032 	case mm_swc132_op:
1033 fpu_emul:
1034 		/* roll back jump/branch */
1035 		regs->cp0_epc = origpc;
1036 		regs->regs[31] = orig31;
1037 
1038 		die_if_kernel("Unaligned FP access in kernel code", regs);
1039 		BUG_ON(!used_math());
1040 		BUG_ON(!is_fpu_owner());
1041 
1042 		lose_fpu(1);	/* save the FPU state for the emulator */
1043 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1044 					       &fault_addr);
1045 		own_fpu(1);	/* restore FPU state */
1046 
1047 		/* If something went wrong, signal */
1048 		process_fpemu_return(res, fault_addr);
1049 
1050 		if (res == 0)
1051 			goto success;
1052 		return;
1053 
1054 	case mm_lh32_op:
1055 		reg = insn.mm_i_format.rt;
1056 		goto loadHW;
1057 
1058 	case mm_lhu32_op:
1059 		reg = insn.mm_i_format.rt;
1060 		goto loadHWU;
1061 
1062 	case mm_lw32_op:
1063 		reg = insn.mm_i_format.rt;
1064 		goto loadW;
1065 
1066 	case mm_sh32_op:
1067 		reg = insn.mm_i_format.rt;
1068 		goto storeHW;
1069 
1070 	case mm_sw32_op:
1071 		reg = insn.mm_i_format.rt;
1072 		goto storeW;
1073 
1074 	case mm_ld32_op:
1075 		reg = insn.mm_i_format.rt;
1076 		goto loadDW;
1077 
1078 	case mm_sd32_op:
1079 		reg = insn.mm_i_format.rt;
1080 		goto storeDW;
1081 
1082 	case mm_pool16c_op:
1083 		switch (insn.mm16_m_format.func) {
1084 		case mm_lwm16_op:
1085 			reg = insn.mm16_m_format.rlist;
1086 			rvar = reg + 1;
1087 			if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1088 				goto sigbus;
1089 
1090 			for (i = 16; rvar; rvar--, i++) {
1091 				LoadW(addr, value, res);
1092 				if (res)
1093 					goto fault;
1094 				addr += 4;
1095 				regs->regs[i] = value;
1096 			}
1097 			LoadW(addr, value, res);
1098 			if (res)
1099 				goto fault;
1100 			regs->regs[31] = value;
1101 
1102 			goto success;
1103 
1104 		case mm_swm16_op:
1105 			reg = insn.mm16_m_format.rlist;
1106 			rvar = reg + 1;
1107 			if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1108 				goto sigbus;
1109 
1110 			for (i = 16; rvar; rvar--, i++) {
1111 				value = regs->regs[i];
1112 				StoreW(addr, value, res);
1113 				if (res)
1114 					goto fault;
1115 				addr += 4;
1116 			}
1117 			value = regs->regs[31];
1118 			StoreW(addr, value, res);
1119 			if (res)
1120 				goto fault;
1121 
1122 			goto success;
1123 
1124 		}
1125 
1126 		goto sigbus;
1127 
1128 	case mm_lhu16_op:
1129 		reg = reg16to32[insn.mm16_rb_format.rt];
1130 		goto loadHWU;
1131 
1132 	case mm_lw16_op:
1133 		reg = reg16to32[insn.mm16_rb_format.rt];
1134 		goto loadW;
1135 
1136 	case mm_sh16_op:
1137 		reg = reg16to32st[insn.mm16_rb_format.rt];
1138 		goto storeHW;
1139 
1140 	case mm_sw16_op:
1141 		reg = reg16to32st[insn.mm16_rb_format.rt];
1142 		goto storeW;
1143 
1144 	case mm_lwsp16_op:
1145 		reg = insn.mm16_r5_format.rt;
1146 		goto loadW;
1147 
1148 	case mm_swsp16_op:
1149 		reg = insn.mm16_r5_format.rt;
1150 		goto storeW;
1151 
1152 	case mm_lwgp16_op:
1153 		reg = reg16to32[insn.mm16_r3_format.rt];
1154 		goto loadW;
1155 
1156 	default:
1157 		goto sigill;
1158 	}
1159 
1160 loadHW:
1161 	if (!access_ok(VERIFY_READ, addr, 2))
1162 		goto sigbus;
1163 
1164 	LoadHW(addr, value, res);
1165 	if (res)
1166 		goto fault;
1167 	regs->regs[reg] = value;
1168 	goto success;
1169 
1170 loadHWU:
1171 	if (!access_ok(VERIFY_READ, addr, 2))
1172 		goto sigbus;
1173 
1174 	LoadHWU(addr, value, res);
1175 	if (res)
1176 		goto fault;
1177 	regs->regs[reg] = value;
1178 	goto success;
1179 
1180 loadW:
1181 	if (!access_ok(VERIFY_READ, addr, 4))
1182 		goto sigbus;
1183 
1184 	LoadW(addr, value, res);
1185 	if (res)
1186 		goto fault;
1187 	regs->regs[reg] = value;
1188 	goto success;
1189 
1190 loadWU:
1191 #ifdef CONFIG_64BIT
1192 	/*
1193 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1194 	 * if we're on a 32-bit processor and an i-cache incoherency
1195 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1196 	 * would blow up, so for now we don't handle unaligned 64-bit
1197 	 * instructions on 32-bit kernels.
1198 	 */
1199 	if (!access_ok(VERIFY_READ, addr, 4))
1200 		goto sigbus;
1201 
1202 	LoadWU(addr, value, res);
1203 	if (res)
1204 		goto fault;
1205 	regs->regs[reg] = value;
1206 	goto success;
1207 #endif /* CONFIG_64BIT */
1208 
1209 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1210 	goto sigill;
1211 
1212 loadDW:
1213 #ifdef CONFIG_64BIT
1214 	/*
1215 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1216 	 * if we're on a 32-bit processor and an i-cache incoherency
1217 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1218 	 * would blow up, so for now we don't handle unaligned 64-bit
1219 	 * instructions on 32-bit kernels.
1220 	 */
1221 	if (!access_ok(VERIFY_READ, addr, 8))
1222 		goto sigbus;
1223 
1224 	LoadDW(addr, value, res);
1225 	if (res)
1226 		goto fault;
1227 	regs->regs[reg] = value;
1228 	goto success;
1229 #endif /* CONFIG_64BIT */
1230 
1231 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1232 	goto sigill;
1233 
1234 storeHW:
1235 	if (!access_ok(VERIFY_WRITE, addr, 2))
1236 		goto sigbus;
1237 
1238 	value = regs->regs[reg];
1239 	StoreHW(addr, value, res);
1240 	if (res)
1241 		goto fault;
1242 	goto success;
1243 
1244 storeW:
1245 	if (!access_ok(VERIFY_WRITE, addr, 4))
1246 		goto sigbus;
1247 
1248 	value = regs->regs[reg];
1249 	StoreW(addr, value, res);
1250 	if (res)
1251 		goto fault;
1252 	goto success;
1253 
1254 storeDW:
1255 #ifdef CONFIG_64BIT
1256 	/*
1257 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1258 	 * if we're on a 32-bit processor and an i-cache incoherency
1259 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1260 	 * would blow up, so for now we don't handle unaligned 64-bit
1261 	 * instructions on 32-bit kernels.
1262 	 */
1263 	if (!access_ok(VERIFY_WRITE, addr, 8))
1264 		goto sigbus;
1265 
1266 	value = regs->regs[reg];
1267 	StoreDW(addr, value, res);
1268 	if (res)
1269 		goto fault;
1270 	goto success;
1271 #endif /* CONFIG_64BIT */
1272 
1273 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1274 	goto sigill;
1275 
1276 success:
1277 	regs->cp0_epc = contpc;	/* advance or branch */
1278 
1279 #ifdef CONFIG_DEBUG_FS
1280 	unaligned_instructions++;
1281 #endif
1282 	return;
1283 
1284 fault:
1285 	/* roll back jump/branch */
1286 	regs->cp0_epc = origpc;
1287 	regs->regs[31] = orig31;
1288 	/* Did we have an exception handler installed? */
1289 	if (fixup_exception(regs))
1290 		return;
1291 
1292 	die_if_kernel("Unhandled kernel unaligned access", regs);
1293 	force_sig(SIGSEGV, current);
1294 
1295 	return;
1296 
1297 sigbus:
1298 	die_if_kernel("Unhandled kernel unaligned access", regs);
1299 	force_sig(SIGBUS, current);
1300 
1301 	return;
1302 
1303 sigill:
1304 	die_if_kernel
1305 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1306 	force_sig(SIGILL, current);
1307 }
1308 
1309 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1310 {
1311 	unsigned long value;
1312 	unsigned int res;
1313 	int reg;
1314 	unsigned long orig31;
1315 	u16 __user *pc16;
1316 	unsigned long origpc;
1317 	union mips16e_instruction mips16inst, oldinst;
1318 
1319 	origpc = regs->cp0_epc;
1320 	orig31 = regs->regs[31];
1321 	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1322 	/*
1323 	 * This load never faults.
1324 	 */
1325 	__get_user(mips16inst.full, pc16);
1326 	oldinst = mips16inst;
1327 
1328 	/* skip EXTEND instruction */
1329 	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1330 		pc16++;
1331 		__get_user(mips16inst.full, pc16);
1332 	} else if (delay_slot(regs)) {
1333 		/*  skip jump instructions */
1334 		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1335 		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1336 			pc16++;
1337 		pc16++;
1338 		if (get_user(mips16inst.full, pc16))
1339 			goto sigbus;
1340 	}
1341 
1342 	switch (mips16inst.ri.opcode) {
1343 	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1344 		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1345 		case MIPS16e_ldpc_func:
1346 		case MIPS16e_ldsp_func:
1347 			reg = reg16to32[mips16inst.ri64.ry];
1348 			goto loadDW;
1349 
1350 		case MIPS16e_sdsp_func:
1351 			reg = reg16to32[mips16inst.ri64.ry];
1352 			goto writeDW;
1353 
1354 		case MIPS16e_sdrasp_func:
1355 			reg = 29;	/* GPRSP */
1356 			goto writeDW;
1357 		}
1358 
1359 		goto sigbus;
1360 
1361 	case MIPS16e_swsp_op:
1362 	case MIPS16e_lwpc_op:
1363 	case MIPS16e_lwsp_op:
1364 		reg = reg16to32[mips16inst.ri.rx];
1365 		break;
1366 
1367 	case MIPS16e_i8_op:
1368 		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1369 			goto sigbus;
1370 		reg = 29;	/* GPRSP */
1371 		break;
1372 
1373 	default:
1374 		reg = reg16to32[mips16inst.rri.ry];
1375 		break;
1376 	}
1377 
1378 	switch (mips16inst.ri.opcode) {
1379 
1380 	case MIPS16e_lb_op:
1381 	case MIPS16e_lbu_op:
1382 	case MIPS16e_sb_op:
1383 		goto sigbus;
1384 
1385 	case MIPS16e_lh_op:
1386 		if (!access_ok(VERIFY_READ, addr, 2))
1387 			goto sigbus;
1388 
1389 		LoadHW(addr, value, res);
1390 		if (res)
1391 			goto fault;
1392 		MIPS16e_compute_return_epc(regs, &oldinst);
1393 		regs->regs[reg] = value;
1394 		break;
1395 
1396 	case MIPS16e_lhu_op:
1397 		if (!access_ok(VERIFY_READ, addr, 2))
1398 			goto sigbus;
1399 
1400 		LoadHWU(addr, value, res);
1401 		if (res)
1402 			goto fault;
1403 		MIPS16e_compute_return_epc(regs, &oldinst);
1404 		regs->regs[reg] = value;
1405 		break;
1406 
1407 	case MIPS16e_lw_op:
1408 	case MIPS16e_lwpc_op:
1409 	case MIPS16e_lwsp_op:
1410 		if (!access_ok(VERIFY_READ, addr, 4))
1411 			goto sigbus;
1412 
1413 		LoadW(addr, value, res);
1414 		if (res)
1415 			goto fault;
1416 		MIPS16e_compute_return_epc(regs, &oldinst);
1417 		regs->regs[reg] = value;
1418 		break;
1419 
1420 	case MIPS16e_lwu_op:
1421 #ifdef CONFIG_64BIT
1422 		/*
1423 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1424 		 * if we're on a 32-bit processor and an i-cache incoherency
1425 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1426 		 * would blow up, so for now we don't handle unaligned 64-bit
1427 		 * instructions on 32-bit kernels.
1428 		 */
1429 		if (!access_ok(VERIFY_READ, addr, 4))
1430 			goto sigbus;
1431 
1432 		LoadWU(addr, value, res);
1433 		if (res)
1434 			goto fault;
1435 		MIPS16e_compute_return_epc(regs, &oldinst);
1436 		regs->regs[reg] = value;
1437 		break;
1438 #endif /* CONFIG_64BIT */
1439 
1440 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1441 		goto sigill;
1442 
1443 	case MIPS16e_ld_op:
1444 loadDW:
1445 #ifdef CONFIG_64BIT
1446 		/*
1447 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1448 		 * if we're on a 32-bit processor and an i-cache incoherency
1449 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1450 		 * would blow up, so for now we don't handle unaligned 64-bit
1451 		 * instructions on 32-bit kernels.
1452 		 */
1453 		if (!access_ok(VERIFY_READ, addr, 8))
1454 			goto sigbus;
1455 
1456 		LoadDW(addr, value, res);
1457 		if (res)
1458 			goto fault;
1459 		MIPS16e_compute_return_epc(regs, &oldinst);
1460 		regs->regs[reg] = value;
1461 		break;
1462 #endif /* CONFIG_64BIT */
1463 
1464 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1465 		goto sigill;
1466 
1467 	case MIPS16e_sh_op:
1468 		if (!access_ok(VERIFY_WRITE, addr, 2))
1469 			goto sigbus;
1470 
1471 		MIPS16e_compute_return_epc(regs, &oldinst);
1472 		value = regs->regs[reg];
1473 		StoreHW(addr, value, res);
1474 		if (res)
1475 			goto fault;
1476 		break;
1477 
1478 	case MIPS16e_sw_op:
1479 	case MIPS16e_swsp_op:
1480 	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1481 		if (!access_ok(VERIFY_WRITE, addr, 4))
1482 			goto sigbus;
1483 
1484 		MIPS16e_compute_return_epc(regs, &oldinst);
1485 		value = regs->regs[reg];
1486 		StoreW(addr, value, res);
1487 		if (res)
1488 			goto fault;
1489 		break;
1490 
1491 	case MIPS16e_sd_op:
1492 writeDW:
1493 #ifdef CONFIG_64BIT
1494 		/*
1495 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1496 		 * if we're on a 32-bit processor and an i-cache incoherency
1497 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1498 		 * would blow up, so for now we don't handle unaligned 64-bit
1499 		 * instructions on 32-bit kernels.
1500 		 */
1501 		if (!access_ok(VERIFY_WRITE, addr, 8))
1502 			goto sigbus;
1503 
1504 		MIPS16e_compute_return_epc(regs, &oldinst);
1505 		value = regs->regs[reg];
1506 		StoreDW(addr, value, res);
1507 		if (res)
1508 			goto fault;
1509 		break;
1510 #endif /* CONFIG_64BIT */
1511 
1512 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1513 		goto sigill;
1514 
1515 	default:
1516 		/*
1517 		 * Pheeee...  We encountered an yet unknown instruction or
1518 		 * cache coherence problem.  Die sucker, die ...
1519 		 */
1520 		goto sigill;
1521 	}
1522 
1523 #ifdef CONFIG_DEBUG_FS
1524 	unaligned_instructions++;
1525 #endif
1526 
1527 	return;
1528 
1529 fault:
1530 	/* roll back jump/branch */
1531 	regs->cp0_epc = origpc;
1532 	regs->regs[31] = orig31;
1533 	/* Did we have an exception handler installed? */
1534 	if (fixup_exception(regs))
1535 		return;
1536 
1537 	die_if_kernel("Unhandled kernel unaligned access", regs);
1538 	force_sig(SIGSEGV, current);
1539 
1540 	return;
1541 
1542 sigbus:
1543 	die_if_kernel("Unhandled kernel unaligned access", regs);
1544 	force_sig(SIGBUS, current);
1545 
1546 	return;
1547 
1548 sigill:
1549 	die_if_kernel
1550 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1551 	force_sig(SIGILL, current);
1552 }
1553 
1554 asmlinkage void do_ade(struct pt_regs *regs)
1555 {
1556 	enum ctx_state prev_state;
1557 	unsigned int __user *pc;
1558 	mm_segment_t seg;
1559 
1560 	prev_state = exception_enter();
1561 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1562 			1, regs, regs->cp0_badvaddr);
1563 	/*
1564 	 * Did we catch a fault trying to load an instruction?
1565 	 */
1566 	if (regs->cp0_badvaddr == regs->cp0_epc)
1567 		goto sigbus;
1568 
1569 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1570 		goto sigbus;
1571 	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1572 		goto sigbus;
1573 
1574 	/*
1575 	 * Do branch emulation only if we didn't forward the exception.
1576 	 * This is all so but ugly ...
1577 	 */
1578 
1579 	/*
1580 	 * Are we running in microMIPS mode?
1581 	 */
1582 	if (get_isa16_mode(regs->cp0_epc)) {
1583 		/*
1584 		 * Did we catch a fault trying to load an instruction in
1585 		 * 16-bit mode?
1586 		 */
1587 		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1588 			goto sigbus;
1589 		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1590 			show_registers(regs);
1591 
1592 		if (cpu_has_mmips) {
1593 			seg = get_fs();
1594 			if (!user_mode(regs))
1595 				set_fs(KERNEL_DS);
1596 			emulate_load_store_microMIPS(regs,
1597 				(void __user *)regs->cp0_badvaddr);
1598 			set_fs(seg);
1599 
1600 			return;
1601 		}
1602 
1603 		if (cpu_has_mips16) {
1604 			seg = get_fs();
1605 			if (!user_mode(regs))
1606 				set_fs(KERNEL_DS);
1607 			emulate_load_store_MIPS16e(regs,
1608 				(void __user *)regs->cp0_badvaddr);
1609 			set_fs(seg);
1610 
1611 			return;
1612 	}
1613 
1614 		goto sigbus;
1615 	}
1616 
1617 	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1618 		show_registers(regs);
1619 	pc = (unsigned int __user *)exception_epc(regs);
1620 
1621 	seg = get_fs();
1622 	if (!user_mode(regs))
1623 		set_fs(KERNEL_DS);
1624 	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1625 	set_fs(seg);
1626 
1627 	return;
1628 
1629 sigbus:
1630 	die_if_kernel("Kernel unaligned instruction access", regs);
1631 	force_sig(SIGBUS, current);
1632 
1633 	/*
1634 	 * XXX On return from the signal handler we should advance the epc
1635 	 */
1636 	exception_exit(prev_state);
1637 }
1638 
1639 #ifdef CONFIG_DEBUG_FS
1640 extern struct dentry *mips_debugfs_dir;
1641 static int __init debugfs_unaligned(void)
1642 {
1643 	struct dentry *d;
1644 
1645 	if (!mips_debugfs_dir)
1646 		return -ENODEV;
1647 	d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1648 			       mips_debugfs_dir, &unaligned_instructions);
1649 	if (!d)
1650 		return -ENOMEM;
1651 	d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1652 			       mips_debugfs_dir, &unaligned_action);
1653 	if (!d)
1654 		return -ENOMEM;
1655 	return 0;
1656 }
1657 __initcall(debugfs_unaligned);
1658 #endif
1659