xref: /openbmc/linux/arch/mips/kernel/unaligned.c (revision f35e839a)
1 /*
2  * Handle unaligned accesses by emulation.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  *
11  * This file contains exception handler for address error exception with the
12  * special capability to execute faulting instructions in software.  The
13  * handler does not try to handle the case when the program counter points
14  * to an address not aligned to a word boundary.
15  *
16  * Putting data to unaligned addresses is a bad practice even on Intel where
17  * only the performance is affected.  Much worse is that such code is non-
18  * portable.  Due to several programs that die on MIPS due to alignment
19  * problems I decided to implement this handler anyway though I originally
20  * didn't intend to do this at all for user code.
21  *
22  * For now I enable fixing of address errors by default to make life easier.
23  * I however intend to disable this somewhen in the future when the alignment
24  * problems with user programs have been fixed.	 For programmers this is the
25  * right way to go.
26  *
27  * Fixing address errors is a per process option.  The option is inherited
28  * across fork(2) and execve(2) calls.	If you really want to use the
29  * option in your user programs - I discourage the use of the software
30  * emulation strongly - use the following code in your userland stuff:
31  *
32  * #include <sys/sysmips.h>
33  *
34  * ...
35  * sysmips(MIPS_FIXADE, x);
36  * ...
37  *
38  * The argument x is 0 for disabling software emulation, enabled otherwise.
39  *
40  * Below a little program to play around with this feature.
41  *
42  * #include <stdio.h>
43  * #include <sys/sysmips.h>
44  *
45  * struct foo {
46  *	   unsigned char bar[8];
47  * };
48  *
49  * main(int argc, char *argv[])
50  * {
51  *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52  *	   unsigned int *p = (unsigned int *) (x.bar + 3);
53  *	   int i;
54  *
55  *	   if (argc > 1)
56  *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
57  *
58  *	   printf("*p = %08lx\n", *p);
59  *
60  *	   *p = 0xdeadface;
61  *
62  *	   for(i = 0; i <= 7; i++)
63  *	   printf("%02x ", x.bar[i]);
64  *	   printf("\n");
65  * }
66  *
67  * Coprocessor loads are not supported; I think this case is unimportant
68  * in the practice.
69  *
70  * TODO: Handle ndc (attempted store to doubleword in uncached memory)
71  *	 exception for the R6000.
72  *	 A store crossing a page boundary might be executed only partially.
73  *	 Undo the partial store in this case.
74  */
75 #include <linux/mm.h>
76 #include <linux/signal.h>
77 #include <linux/smp.h>
78 #include <linux/sched.h>
79 #include <linux/debugfs.h>
80 #include <linux/perf_event.h>
81 
82 #include <asm/asm.h>
83 #include <asm/branch.h>
84 #include <asm/byteorder.h>
85 #include <asm/cop2.h>
86 #include <asm/fpu.h>
87 #include <asm/fpu_emulator.h>
88 #include <asm/inst.h>
89 #include <asm/uaccess.h>
90 #include <asm/fpu.h>
91 #include <asm/fpu_emulator.h>
92 
93 #define STR(x)	__STR(x)
94 #define __STR(x)  #x
95 
96 enum {
97 	UNALIGNED_ACTION_QUIET,
98 	UNALIGNED_ACTION_SIGNAL,
99 	UNALIGNED_ACTION_SHOW,
100 };
101 #ifdef CONFIG_DEBUG_FS
102 static u32 unaligned_instructions;
103 static u32 unaligned_action;
104 #else
105 #define unaligned_action UNALIGNED_ACTION_QUIET
106 #endif
107 extern void show_registers(struct pt_regs *regs);
108 
109 #ifdef __BIG_ENDIAN
110 #define     LoadHW(addr, value, res)  \
111 		__asm__ __volatile__ (".set\tnoat\n"        \
112 			"1:\tlb\t%0, 0(%2)\n"               \
113 			"2:\tlbu\t$1, 1(%2)\n\t"            \
114 			"sll\t%0, 0x8\n\t"                  \
115 			"or\t%0, $1\n\t"                    \
116 			"li\t%1, 0\n"                       \
117 			"3:\t.set\tat\n\t"                  \
118 			".insn\n\t"                         \
119 			".section\t.fixup,\"ax\"\n\t"       \
120 			"4:\tli\t%1, %3\n\t"                \
121 			"j\t3b\n\t"                         \
122 			".previous\n\t"                     \
123 			".section\t__ex_table,\"a\"\n\t"    \
124 			STR(PTR)"\t1b, 4b\n\t"              \
125 			STR(PTR)"\t2b, 4b\n\t"              \
126 			".previous"                         \
127 			: "=&r" (value), "=r" (res)         \
128 			: "r" (addr), "i" (-EFAULT));
129 
130 #define     LoadW(addr, value, res)   \
131 		__asm__ __volatile__ (                      \
132 			"1:\tlwl\t%0, (%2)\n"               \
133 			"2:\tlwr\t%0, 3(%2)\n\t"            \
134 			"li\t%1, 0\n"                       \
135 			"3:\n\t"                            \
136 			".insn\n\t"                         \
137 			".section\t.fixup,\"ax\"\n\t"       \
138 			"4:\tli\t%1, %3\n\t"                \
139 			"j\t3b\n\t"                         \
140 			".previous\n\t"                     \
141 			".section\t__ex_table,\"a\"\n\t"    \
142 			STR(PTR)"\t1b, 4b\n\t"              \
143 			STR(PTR)"\t2b, 4b\n\t"              \
144 			".previous"                         \
145 			: "=&r" (value), "=r" (res)         \
146 			: "r" (addr), "i" (-EFAULT));
147 
148 #define     LoadHWU(addr, value, res) \
149 		__asm__ __volatile__ (                      \
150 			".set\tnoat\n"                      \
151 			"1:\tlbu\t%0, 0(%2)\n"              \
152 			"2:\tlbu\t$1, 1(%2)\n\t"            \
153 			"sll\t%0, 0x8\n\t"                  \
154 			"or\t%0, $1\n\t"                    \
155 			"li\t%1, 0\n"                       \
156 			"3:\n\t"                            \
157 			".insn\n\t"                         \
158 			".set\tat\n\t"                      \
159 			".section\t.fixup,\"ax\"\n\t"       \
160 			"4:\tli\t%1, %3\n\t"                \
161 			"j\t3b\n\t"                         \
162 			".previous\n\t"                     \
163 			".section\t__ex_table,\"a\"\n\t"    \
164 			STR(PTR)"\t1b, 4b\n\t"              \
165 			STR(PTR)"\t2b, 4b\n\t"              \
166 			".previous"                         \
167 			: "=&r" (value), "=r" (res)         \
168 			: "r" (addr), "i" (-EFAULT));
169 
170 #define     LoadWU(addr, value, res)  \
171 		__asm__ __volatile__ (                      \
172 			"1:\tlwl\t%0, (%2)\n"               \
173 			"2:\tlwr\t%0, 3(%2)\n\t"            \
174 			"dsll\t%0, %0, 32\n\t"              \
175 			"dsrl\t%0, %0, 32\n\t"              \
176 			"li\t%1, 0\n"                       \
177 			"3:\n\t"                            \
178 			".insn\n\t"                         \
179 			"\t.section\t.fixup,\"ax\"\n\t"     \
180 			"4:\tli\t%1, %3\n\t"                \
181 			"j\t3b\n\t"                         \
182 			".previous\n\t"                     \
183 			".section\t__ex_table,\"a\"\n\t"    \
184 			STR(PTR)"\t1b, 4b\n\t"              \
185 			STR(PTR)"\t2b, 4b\n\t"              \
186 			".previous"                         \
187 			: "=&r" (value), "=r" (res)         \
188 			: "r" (addr), "i" (-EFAULT));
189 
190 #define     LoadDW(addr, value, res)  \
191 		__asm__ __volatile__ (                      \
192 			"1:\tldl\t%0, (%2)\n"               \
193 			"2:\tldr\t%0, 7(%2)\n\t"            \
194 			"li\t%1, 0\n"                       \
195 			"3:\n\t"                            \
196 			".insn\n\t"                         \
197 			"\t.section\t.fixup,\"ax\"\n\t"     \
198 			"4:\tli\t%1, %3\n\t"                \
199 			"j\t3b\n\t"                         \
200 			".previous\n\t"                     \
201 			".section\t__ex_table,\"a\"\n\t"    \
202 			STR(PTR)"\t1b, 4b\n\t"              \
203 			STR(PTR)"\t2b, 4b\n\t"              \
204 			".previous"                         \
205 			: "=&r" (value), "=r" (res)         \
206 			: "r" (addr), "i" (-EFAULT));
207 
208 #define     StoreHW(addr, value, res) \
209 		__asm__ __volatile__ (                      \
210 			".set\tnoat\n"                      \
211 			"1:\tsb\t%1, 1(%2)\n\t"             \
212 			"srl\t$1, %1, 0x8\n"                \
213 			"2:\tsb\t$1, 0(%2)\n\t"             \
214 			".set\tat\n\t"                      \
215 			"li\t%0, 0\n"                       \
216 			"3:\n\t"                            \
217 			".insn\n\t"                         \
218 			".section\t.fixup,\"ax\"\n\t"       \
219 			"4:\tli\t%0, %3\n\t"                \
220 			"j\t3b\n\t"                         \
221 			".previous\n\t"                     \
222 			".section\t__ex_table,\"a\"\n\t"    \
223 			STR(PTR)"\t1b, 4b\n\t"              \
224 			STR(PTR)"\t2b, 4b\n\t"              \
225 			".previous"                         \
226 			: "=r" (res)                        \
227 			: "r" (value), "r" (addr), "i" (-EFAULT));
228 
229 #define     StoreW(addr, value, res)  \
230 		__asm__ __volatile__ (                      \
231 			"1:\tswl\t%1,(%2)\n"                \
232 			"2:\tswr\t%1, 3(%2)\n\t"            \
233 			"li\t%0, 0\n"                       \
234 			"3:\n\t"                            \
235 			".insn\n\t"                         \
236 			".section\t.fixup,\"ax\"\n\t"       \
237 			"4:\tli\t%0, %3\n\t"                \
238 			"j\t3b\n\t"                         \
239 			".previous\n\t"                     \
240 			".section\t__ex_table,\"a\"\n\t"    \
241 			STR(PTR)"\t1b, 4b\n\t"              \
242 			STR(PTR)"\t2b, 4b\n\t"              \
243 			".previous"                         \
244 		: "=r" (res)                                \
245 		: "r" (value), "r" (addr), "i" (-EFAULT));
246 
247 #define     StoreDW(addr, value, res) \
248 		__asm__ __volatile__ (                      \
249 			"1:\tsdl\t%1,(%2)\n"                \
250 			"2:\tsdr\t%1, 7(%2)\n\t"            \
251 			"li\t%0, 0\n"                       \
252 			"3:\n\t"                            \
253 			".insn\n\t"                         \
254 			".section\t.fixup,\"ax\"\n\t"       \
255 			"4:\tli\t%0, %3\n\t"                \
256 			"j\t3b\n\t"                         \
257 			".previous\n\t"                     \
258 			".section\t__ex_table,\"a\"\n\t"    \
259 			STR(PTR)"\t1b, 4b\n\t"              \
260 			STR(PTR)"\t2b, 4b\n\t"              \
261 			".previous"                         \
262 		: "=r" (res)                                \
263 		: "r" (value), "r" (addr), "i" (-EFAULT));
264 #endif
265 
266 #ifdef __LITTLE_ENDIAN
267 #define     LoadHW(addr, value, res)  \
268 		__asm__ __volatile__ (".set\tnoat\n"        \
269 			"1:\tlb\t%0, 1(%2)\n"               \
270 			"2:\tlbu\t$1, 0(%2)\n\t"            \
271 			"sll\t%0, 0x8\n\t"                  \
272 			"or\t%0, $1\n\t"                    \
273 			"li\t%1, 0\n"                       \
274 			"3:\t.set\tat\n\t"                  \
275 			".insn\n\t"                         \
276 			".section\t.fixup,\"ax\"\n\t"       \
277 			"4:\tli\t%1, %3\n\t"                \
278 			"j\t3b\n\t"                         \
279 			".previous\n\t"                     \
280 			".section\t__ex_table,\"a\"\n\t"    \
281 			STR(PTR)"\t1b, 4b\n\t"              \
282 			STR(PTR)"\t2b, 4b\n\t"              \
283 			".previous"                         \
284 			: "=&r" (value), "=r" (res)         \
285 			: "r" (addr), "i" (-EFAULT));
286 
287 #define     LoadW(addr, value, res)   \
288 		__asm__ __volatile__ (                      \
289 			"1:\tlwl\t%0, 3(%2)\n"              \
290 			"2:\tlwr\t%0, (%2)\n\t"             \
291 			"li\t%1, 0\n"                       \
292 			"3:\n\t"                            \
293 			".insn\n\t"                         \
294 			".section\t.fixup,\"ax\"\n\t"       \
295 			"4:\tli\t%1, %3\n\t"                \
296 			"j\t3b\n\t"                         \
297 			".previous\n\t"                     \
298 			".section\t__ex_table,\"a\"\n\t"    \
299 			STR(PTR)"\t1b, 4b\n\t"              \
300 			STR(PTR)"\t2b, 4b\n\t"              \
301 			".previous"                         \
302 			: "=&r" (value), "=r" (res)         \
303 			: "r" (addr), "i" (-EFAULT));
304 
305 #define     LoadHWU(addr, value, res) \
306 		__asm__ __volatile__ (                      \
307 			".set\tnoat\n"                      \
308 			"1:\tlbu\t%0, 1(%2)\n"              \
309 			"2:\tlbu\t$1, 0(%2)\n\t"            \
310 			"sll\t%0, 0x8\n\t"                  \
311 			"or\t%0, $1\n\t"                    \
312 			"li\t%1, 0\n"                       \
313 			"3:\n\t"                            \
314 			".insn\n\t"                         \
315 			".set\tat\n\t"                      \
316 			".section\t.fixup,\"ax\"\n\t"       \
317 			"4:\tli\t%1, %3\n\t"                \
318 			"j\t3b\n\t"                         \
319 			".previous\n\t"                     \
320 			".section\t__ex_table,\"a\"\n\t"    \
321 			STR(PTR)"\t1b, 4b\n\t"              \
322 			STR(PTR)"\t2b, 4b\n\t"              \
323 			".previous"                         \
324 			: "=&r" (value), "=r" (res)         \
325 			: "r" (addr), "i" (-EFAULT));
326 
327 #define     LoadWU(addr, value, res)  \
328 		__asm__ __volatile__ (                      \
329 			"1:\tlwl\t%0, 3(%2)\n"              \
330 			"2:\tlwr\t%0, (%2)\n\t"             \
331 			"dsll\t%0, %0, 32\n\t"              \
332 			"dsrl\t%0, %0, 32\n\t"              \
333 			"li\t%1, 0\n"                       \
334 			"3:\n\t"                            \
335 			".insn\n\t"                         \
336 			"\t.section\t.fixup,\"ax\"\n\t"     \
337 			"4:\tli\t%1, %3\n\t"                \
338 			"j\t3b\n\t"                         \
339 			".previous\n\t"                     \
340 			".section\t__ex_table,\"a\"\n\t"    \
341 			STR(PTR)"\t1b, 4b\n\t"              \
342 			STR(PTR)"\t2b, 4b\n\t"              \
343 			".previous"                         \
344 			: "=&r" (value), "=r" (res)         \
345 			: "r" (addr), "i" (-EFAULT));
346 
347 #define     LoadDW(addr, value, res)  \
348 		__asm__ __volatile__ (                      \
349 			"1:\tldl\t%0, 7(%2)\n"              \
350 			"2:\tldr\t%0, (%2)\n\t"             \
351 			"li\t%1, 0\n"                       \
352 			"3:\n\t"                            \
353 			".insn\n\t"                         \
354 			"\t.section\t.fixup,\"ax\"\n\t"     \
355 			"4:\tli\t%1, %3\n\t"                \
356 			"j\t3b\n\t"                         \
357 			".previous\n\t"                     \
358 			".section\t__ex_table,\"a\"\n\t"    \
359 			STR(PTR)"\t1b, 4b\n\t"              \
360 			STR(PTR)"\t2b, 4b\n\t"              \
361 			".previous"                         \
362 			: "=&r" (value), "=r" (res)         \
363 			: "r" (addr), "i" (-EFAULT));
364 
365 #define     StoreHW(addr, value, res) \
366 		__asm__ __volatile__ (                      \
367 			".set\tnoat\n"                      \
368 			"1:\tsb\t%1, 0(%2)\n\t"             \
369 			"srl\t$1,%1, 0x8\n"                 \
370 			"2:\tsb\t$1, 1(%2)\n\t"             \
371 			".set\tat\n\t"                      \
372 			"li\t%0, 0\n"                       \
373 			"3:\n\t"                            \
374 			".insn\n\t"                         \
375 			".section\t.fixup,\"ax\"\n\t"       \
376 			"4:\tli\t%0, %3\n\t"                \
377 			"j\t3b\n\t"                         \
378 			".previous\n\t"                     \
379 			".section\t__ex_table,\"a\"\n\t"    \
380 			STR(PTR)"\t1b, 4b\n\t"              \
381 			STR(PTR)"\t2b, 4b\n\t"              \
382 			".previous"                         \
383 			: "=r" (res)                        \
384 			: "r" (value), "r" (addr), "i" (-EFAULT));
385 
386 #define     StoreW(addr, value, res)  \
387 		__asm__ __volatile__ (                      \
388 			"1:\tswl\t%1, 3(%2)\n"              \
389 			"2:\tswr\t%1, (%2)\n\t"             \
390 			"li\t%0, 0\n"                       \
391 			"3:\n\t"                            \
392 			".insn\n\t"                         \
393 			".section\t.fixup,\"ax\"\n\t"       \
394 			"4:\tli\t%0, %3\n\t"                \
395 			"j\t3b\n\t"                         \
396 			".previous\n\t"                     \
397 			".section\t__ex_table,\"a\"\n\t"    \
398 			STR(PTR)"\t1b, 4b\n\t"              \
399 			STR(PTR)"\t2b, 4b\n\t"              \
400 			".previous"                         \
401 		: "=r" (res)                                \
402 		: "r" (value), "r" (addr), "i" (-EFAULT));
403 
404 #define     StoreDW(addr, value, res) \
405 		__asm__ __volatile__ (                      \
406 			"1:\tsdl\t%1, 7(%2)\n"              \
407 			"2:\tsdr\t%1, (%2)\n\t"             \
408 			"li\t%0, 0\n"                       \
409 			"3:\n\t"                            \
410 			".insn\n\t"                         \
411 			".section\t.fixup,\"ax\"\n\t"       \
412 			"4:\tli\t%0, %3\n\t"                \
413 			"j\t3b\n\t"                         \
414 			".previous\n\t"                     \
415 			".section\t__ex_table,\"a\"\n\t"    \
416 			STR(PTR)"\t1b, 4b\n\t"              \
417 			STR(PTR)"\t2b, 4b\n\t"              \
418 			".previous"                         \
419 		: "=r" (res)                                \
420 		: "r" (value), "r" (addr), "i" (-EFAULT));
421 #endif
422 
423 static void emulate_load_store_insn(struct pt_regs *regs,
424 	void __user *addr, unsigned int __user *pc)
425 {
426 	union mips_instruction insn;
427 	unsigned long value;
428 	unsigned int res;
429 	unsigned long origpc;
430 	unsigned long orig31;
431 	void __user *fault_addr = NULL;
432 
433 	origpc = (unsigned long)pc;
434 	orig31 = regs->regs[31];
435 
436 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
437 
438 	/*
439 	 * This load never faults.
440 	 */
441 	__get_user(insn.word, pc);
442 
443 	switch (insn.i_format.opcode) {
444 		/*
445 		 * These are instructions that a compiler doesn't generate.  We
446 		 * can assume therefore that the code is MIPS-aware and
447 		 * really buggy.  Emulating these instructions would break the
448 		 * semantics anyway.
449 		 */
450 	case ll_op:
451 	case lld_op:
452 	case sc_op:
453 	case scd_op:
454 
455 		/*
456 		 * For these instructions the only way to create an address
457 		 * error is an attempted access to kernel/supervisor address
458 		 * space.
459 		 */
460 	case ldl_op:
461 	case ldr_op:
462 	case lwl_op:
463 	case lwr_op:
464 	case sdl_op:
465 	case sdr_op:
466 	case swl_op:
467 	case swr_op:
468 	case lb_op:
469 	case lbu_op:
470 	case sb_op:
471 		goto sigbus;
472 
473 		/*
474 		 * The remaining opcodes are the ones that are really of
475 		 * interest.
476 		 */
477 	case lh_op:
478 		if (!access_ok(VERIFY_READ, addr, 2))
479 			goto sigbus;
480 
481 		LoadHW(addr, value, res);
482 		if (res)
483 			goto fault;
484 		compute_return_epc(regs);
485 		regs->regs[insn.i_format.rt] = value;
486 		break;
487 
488 	case lw_op:
489 		if (!access_ok(VERIFY_READ, addr, 4))
490 			goto sigbus;
491 
492 		LoadW(addr, value, res);
493 		if (res)
494 			goto fault;
495 		compute_return_epc(regs);
496 		regs->regs[insn.i_format.rt] = value;
497 		break;
498 
499 	case lhu_op:
500 		if (!access_ok(VERIFY_READ, addr, 2))
501 			goto sigbus;
502 
503 		LoadHWU(addr, value, res);
504 		if (res)
505 			goto fault;
506 		compute_return_epc(regs);
507 		regs->regs[insn.i_format.rt] = value;
508 		break;
509 
510 	case lwu_op:
511 #ifdef CONFIG_64BIT
512 		/*
513 		 * A 32-bit kernel might be running on a 64-bit processor.  But
514 		 * if we're on a 32-bit processor and an i-cache incoherency
515 		 * or race makes us see a 64-bit instruction here the sdl/sdr
516 		 * would blow up, so for now we don't handle unaligned 64-bit
517 		 * instructions on 32-bit kernels.
518 		 */
519 		if (!access_ok(VERIFY_READ, addr, 4))
520 			goto sigbus;
521 
522 		LoadWU(addr, value, res);
523 		if (res)
524 			goto fault;
525 		compute_return_epc(regs);
526 		regs->regs[insn.i_format.rt] = value;
527 		break;
528 #endif /* CONFIG_64BIT */
529 
530 		/* Cannot handle 64-bit instructions in 32-bit kernel */
531 		goto sigill;
532 
533 	case ld_op:
534 #ifdef CONFIG_64BIT
535 		/*
536 		 * A 32-bit kernel might be running on a 64-bit processor.  But
537 		 * if we're on a 32-bit processor and an i-cache incoherency
538 		 * or race makes us see a 64-bit instruction here the sdl/sdr
539 		 * would blow up, so for now we don't handle unaligned 64-bit
540 		 * instructions on 32-bit kernels.
541 		 */
542 		if (!access_ok(VERIFY_READ, addr, 8))
543 			goto sigbus;
544 
545 		LoadDW(addr, value, res);
546 		if (res)
547 			goto fault;
548 		compute_return_epc(regs);
549 		regs->regs[insn.i_format.rt] = value;
550 		break;
551 #endif /* CONFIG_64BIT */
552 
553 		/* Cannot handle 64-bit instructions in 32-bit kernel */
554 		goto sigill;
555 
556 	case sh_op:
557 		if (!access_ok(VERIFY_WRITE, addr, 2))
558 			goto sigbus;
559 
560 		compute_return_epc(regs);
561 		value = regs->regs[insn.i_format.rt];
562 		StoreHW(addr, value, res);
563 		if (res)
564 			goto fault;
565 		break;
566 
567 	case sw_op:
568 		if (!access_ok(VERIFY_WRITE, addr, 4))
569 			goto sigbus;
570 
571 		compute_return_epc(regs);
572 		value = regs->regs[insn.i_format.rt];
573 		StoreW(addr, value, res);
574 		if (res)
575 			goto fault;
576 		break;
577 
578 	case sd_op:
579 #ifdef CONFIG_64BIT
580 		/*
581 		 * A 32-bit kernel might be running on a 64-bit processor.  But
582 		 * if we're on a 32-bit processor and an i-cache incoherency
583 		 * or race makes us see a 64-bit instruction here the sdl/sdr
584 		 * would blow up, so for now we don't handle unaligned 64-bit
585 		 * instructions on 32-bit kernels.
586 		 */
587 		if (!access_ok(VERIFY_WRITE, addr, 8))
588 			goto sigbus;
589 
590 		compute_return_epc(regs);
591 		value = regs->regs[insn.i_format.rt];
592 		StoreDW(addr, value, res);
593 		if (res)
594 			goto fault;
595 		break;
596 #endif /* CONFIG_64BIT */
597 
598 		/* Cannot handle 64-bit instructions in 32-bit kernel */
599 		goto sigill;
600 
601 	case lwc1_op:
602 	case ldc1_op:
603 	case swc1_op:
604 	case sdc1_op:
605 		die_if_kernel("Unaligned FP access in kernel code", regs);
606 		BUG_ON(!used_math());
607 		BUG_ON(!is_fpu_owner());
608 
609 		lose_fpu(1);	/* Save FPU state for the emulator. */
610 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
611 					       &fault_addr);
612 		own_fpu(1);	/* Restore FPU state. */
613 
614 		/* Signal if something went wrong. */
615 		process_fpemu_return(res, fault_addr);
616 
617 		if (res == 0)
618 			break;
619 		return;
620 
621 	/*
622 	 * COP2 is available to implementor for application specific use.
623 	 * It's up to applications to register a notifier chain and do
624 	 * whatever they have to do, including possible sending of signals.
625 	 */
626 	case lwc2_op:
627 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
628 		break;
629 
630 	case ldc2_op:
631 		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
632 		break;
633 
634 	case swc2_op:
635 		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
636 		break;
637 
638 	case sdc2_op:
639 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
640 		break;
641 
642 	default:
643 		/*
644 		 * Pheeee...  We encountered an yet unknown instruction or
645 		 * cache coherence problem.  Die sucker, die ...
646 		 */
647 		goto sigill;
648 	}
649 
650 #ifdef CONFIG_DEBUG_FS
651 	unaligned_instructions++;
652 #endif
653 
654 	return;
655 
656 fault:
657 	/* roll back jump/branch */
658 	regs->cp0_epc = origpc;
659 	regs->regs[31] = orig31;
660 	/* Did we have an exception handler installed? */
661 	if (fixup_exception(regs))
662 		return;
663 
664 	die_if_kernel("Unhandled kernel unaligned access", regs);
665 	force_sig(SIGSEGV, current);
666 
667 	return;
668 
669 sigbus:
670 	die_if_kernel("Unhandled kernel unaligned access", regs);
671 	force_sig(SIGBUS, current);
672 
673 	return;
674 
675 sigill:
676 	die_if_kernel
677 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
678 	force_sig(SIGILL, current);
679 }
680 
681 /* Recode table from 16-bit register notation to 32-bit GPR. */
682 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
683 
684 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
685 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
686 
687 void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
688 {
689 	unsigned long value;
690 	unsigned int res;
691 	int i;
692 	unsigned int reg = 0, rvar;
693 	unsigned long orig31;
694 	u16 __user *pc16;
695 	u16 halfword;
696 	unsigned int word;
697 	unsigned long origpc, contpc;
698 	union mips_instruction insn;
699 	struct mm_decoded_insn mminsn;
700 	void __user *fault_addr = NULL;
701 
702 	origpc = regs->cp0_epc;
703 	orig31 = regs->regs[31];
704 
705 	mminsn.micro_mips_mode = 1;
706 
707 	/*
708 	 * This load never faults.
709 	 */
710 	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
711 	__get_user(halfword, pc16);
712 	pc16++;
713 	contpc = regs->cp0_epc + 2;
714 	word = ((unsigned int)halfword << 16);
715 	mminsn.pc_inc = 2;
716 
717 	if (!mm_insn_16bit(halfword)) {
718 		__get_user(halfword, pc16);
719 		pc16++;
720 		contpc = regs->cp0_epc + 4;
721 		mminsn.pc_inc = 4;
722 		word |= halfword;
723 	}
724 	mminsn.insn = word;
725 
726 	if (get_user(halfword, pc16))
727 		goto fault;
728 	mminsn.next_pc_inc = 2;
729 	word = ((unsigned int)halfword << 16);
730 
731 	if (!mm_insn_16bit(halfword)) {
732 		pc16++;
733 		if (get_user(halfword, pc16))
734 			goto fault;
735 		mminsn.next_pc_inc = 4;
736 		word |= halfword;
737 	}
738 	mminsn.next_insn = word;
739 
740 	insn = (union mips_instruction)(mminsn.insn);
741 	if (mm_isBranchInstr(regs, mminsn, &contpc))
742 		insn = (union mips_instruction)(mminsn.next_insn);
743 
744 	/*  Parse instruction to find what to do */
745 
746 	switch (insn.mm_i_format.opcode) {
747 
748 	case mm_pool32a_op:
749 		switch (insn.mm_x_format.func) {
750 		case mm_lwxs_op:
751 			reg = insn.mm_x_format.rd;
752 			goto loadW;
753 		}
754 
755 		goto sigbus;
756 
757 	case mm_pool32b_op:
758 		switch (insn.mm_m_format.func) {
759 		case mm_lwp_func:
760 			reg = insn.mm_m_format.rd;
761 			if (reg == 31)
762 				goto sigbus;
763 
764 			if (!access_ok(VERIFY_READ, addr, 8))
765 				goto sigbus;
766 
767 			LoadW(addr, value, res);
768 			if (res)
769 				goto fault;
770 			regs->regs[reg] = value;
771 			addr += 4;
772 			LoadW(addr, value, res);
773 			if (res)
774 				goto fault;
775 			regs->regs[reg + 1] = value;
776 			goto success;
777 
778 		case mm_swp_func:
779 			reg = insn.mm_m_format.rd;
780 			if (reg == 31)
781 				goto sigbus;
782 
783 			if (!access_ok(VERIFY_WRITE, addr, 8))
784 				goto sigbus;
785 
786 			value = regs->regs[reg];
787 			StoreW(addr, value, res);
788 			if (res)
789 				goto fault;
790 			addr += 4;
791 			value = regs->regs[reg + 1];
792 			StoreW(addr, value, res);
793 			if (res)
794 				goto fault;
795 			goto success;
796 
797 		case mm_ldp_func:
798 #ifdef CONFIG_64BIT
799 			reg = insn.mm_m_format.rd;
800 			if (reg == 31)
801 				goto sigbus;
802 
803 			if (!access_ok(VERIFY_READ, addr, 16))
804 				goto sigbus;
805 
806 			LoadDW(addr, value, res);
807 			if (res)
808 				goto fault;
809 			regs->regs[reg] = value;
810 			addr += 8;
811 			LoadDW(addr, value, res);
812 			if (res)
813 				goto fault;
814 			regs->regs[reg + 1] = value;
815 			goto success;
816 #endif /* CONFIG_64BIT */
817 
818 			goto sigill;
819 
820 		case mm_sdp_func:
821 #ifdef CONFIG_64BIT
822 			reg = insn.mm_m_format.rd;
823 			if (reg == 31)
824 				goto sigbus;
825 
826 			if (!access_ok(VERIFY_WRITE, addr, 16))
827 				goto sigbus;
828 
829 			value = regs->regs[reg];
830 			StoreDW(addr, value, res);
831 			if (res)
832 				goto fault;
833 			addr += 8;
834 			value = regs->regs[reg + 1];
835 			StoreDW(addr, value, res);
836 			if (res)
837 				goto fault;
838 			goto success;
839 #endif /* CONFIG_64BIT */
840 
841 			goto sigill;
842 
843 		case mm_lwm32_func:
844 			reg = insn.mm_m_format.rd;
845 			rvar = reg & 0xf;
846 			if ((rvar > 9) || !reg)
847 				goto sigill;
848 			if (reg & 0x10) {
849 				if (!access_ok
850 				    (VERIFY_READ, addr, 4 * (rvar + 1)))
851 					goto sigbus;
852 			} else {
853 				if (!access_ok(VERIFY_READ, addr, 4 * rvar))
854 					goto sigbus;
855 			}
856 			if (rvar == 9)
857 				rvar = 8;
858 			for (i = 16; rvar; rvar--, i++) {
859 				LoadW(addr, value, res);
860 				if (res)
861 					goto fault;
862 				addr += 4;
863 				regs->regs[i] = value;
864 			}
865 			if ((reg & 0xf) == 9) {
866 				LoadW(addr, value, res);
867 				if (res)
868 					goto fault;
869 				addr += 4;
870 				regs->regs[30] = value;
871 			}
872 			if (reg & 0x10) {
873 				LoadW(addr, value, res);
874 				if (res)
875 					goto fault;
876 				regs->regs[31] = value;
877 			}
878 			goto success;
879 
880 		case mm_swm32_func:
881 			reg = insn.mm_m_format.rd;
882 			rvar = reg & 0xf;
883 			if ((rvar > 9) || !reg)
884 				goto sigill;
885 			if (reg & 0x10) {
886 				if (!access_ok
887 				    (VERIFY_WRITE, addr, 4 * (rvar + 1)))
888 					goto sigbus;
889 			} else {
890 				if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
891 					goto sigbus;
892 			}
893 			if (rvar == 9)
894 				rvar = 8;
895 			for (i = 16; rvar; rvar--, i++) {
896 				value = regs->regs[i];
897 				StoreW(addr, value, res);
898 				if (res)
899 					goto fault;
900 				addr += 4;
901 			}
902 			if ((reg & 0xf) == 9) {
903 				value = regs->regs[30];
904 				StoreW(addr, value, res);
905 				if (res)
906 					goto fault;
907 				addr += 4;
908 			}
909 			if (reg & 0x10) {
910 				value = regs->regs[31];
911 				StoreW(addr, value, res);
912 				if (res)
913 					goto fault;
914 			}
915 			goto success;
916 
917 		case mm_ldm_func:
918 #ifdef CONFIG_64BIT
919 			reg = insn.mm_m_format.rd;
920 			rvar = reg & 0xf;
921 			if ((rvar > 9) || !reg)
922 				goto sigill;
923 			if (reg & 0x10) {
924 				if (!access_ok
925 				    (VERIFY_READ, addr, 8 * (rvar + 1)))
926 					goto sigbus;
927 			} else {
928 				if (!access_ok(VERIFY_READ, addr, 8 * rvar))
929 					goto sigbus;
930 			}
931 			if (rvar == 9)
932 				rvar = 8;
933 
934 			for (i = 16; rvar; rvar--, i++) {
935 				LoadDW(addr, value, res);
936 				if (res)
937 					goto fault;
938 				addr += 4;
939 				regs->regs[i] = value;
940 			}
941 			if ((reg & 0xf) == 9) {
942 				LoadDW(addr, value, res);
943 				if (res)
944 					goto fault;
945 				addr += 8;
946 				regs->regs[30] = value;
947 			}
948 			if (reg & 0x10) {
949 				LoadDW(addr, value, res);
950 				if (res)
951 					goto fault;
952 				regs->regs[31] = value;
953 			}
954 			goto success;
955 #endif /* CONFIG_64BIT */
956 
957 			goto sigill;
958 
959 		case mm_sdm_func:
960 #ifdef CONFIG_64BIT
961 			reg = insn.mm_m_format.rd;
962 			rvar = reg & 0xf;
963 			if ((rvar > 9) || !reg)
964 				goto sigill;
965 			if (reg & 0x10) {
966 				if (!access_ok
967 				    (VERIFY_WRITE, addr, 8 * (rvar + 1)))
968 					goto sigbus;
969 			} else {
970 				if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
971 					goto sigbus;
972 			}
973 			if (rvar == 9)
974 				rvar = 8;
975 
976 			for (i = 16; rvar; rvar--, i++) {
977 				value = regs->regs[i];
978 				StoreDW(addr, value, res);
979 				if (res)
980 					goto fault;
981 				addr += 8;
982 			}
983 			if ((reg & 0xf) == 9) {
984 				value = regs->regs[30];
985 				StoreDW(addr, value, res);
986 				if (res)
987 					goto fault;
988 				addr += 8;
989 			}
990 			if (reg & 0x10) {
991 				value = regs->regs[31];
992 				StoreDW(addr, value, res);
993 				if (res)
994 					goto fault;
995 			}
996 			goto success;
997 #endif /* CONFIG_64BIT */
998 
999 			goto sigill;
1000 
1001 			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
1002 		}
1003 
1004 		goto sigbus;
1005 
1006 	case mm_pool32c_op:
1007 		switch (insn.mm_m_format.func) {
1008 		case mm_lwu_func:
1009 			reg = insn.mm_m_format.rd;
1010 			goto loadWU;
1011 		}
1012 
1013 		/*  LL,SC,LLD,SCD are not serviced */
1014 		goto sigbus;
1015 
1016 	case mm_pool32f_op:
1017 		switch (insn.mm_x_format.func) {
1018 		case mm_lwxc1_func:
1019 		case mm_swxc1_func:
1020 		case mm_ldxc1_func:
1021 		case mm_sdxc1_func:
1022 			goto fpu_emul;
1023 		}
1024 
1025 		goto sigbus;
1026 
1027 	case mm_ldc132_op:
1028 	case mm_sdc132_op:
1029 	case mm_lwc132_op:
1030 	case mm_swc132_op:
1031 fpu_emul:
1032 		/* roll back jump/branch */
1033 		regs->cp0_epc = origpc;
1034 		regs->regs[31] = orig31;
1035 
1036 		die_if_kernel("Unaligned FP access in kernel code", regs);
1037 		BUG_ON(!used_math());
1038 		BUG_ON(!is_fpu_owner());
1039 
1040 		lose_fpu(1);	/* save the FPU state for the emulator */
1041 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1042 					       &fault_addr);
1043 		own_fpu(1);	/* restore FPU state */
1044 
1045 		/* If something went wrong, signal */
1046 		process_fpemu_return(res, fault_addr);
1047 
1048 		if (res == 0)
1049 			goto success;
1050 		return;
1051 
1052 	case mm_lh32_op:
1053 		reg = insn.mm_i_format.rt;
1054 		goto loadHW;
1055 
1056 	case mm_lhu32_op:
1057 		reg = insn.mm_i_format.rt;
1058 		goto loadHWU;
1059 
1060 	case mm_lw32_op:
1061 		reg = insn.mm_i_format.rt;
1062 		goto loadW;
1063 
1064 	case mm_sh32_op:
1065 		reg = insn.mm_i_format.rt;
1066 		goto storeHW;
1067 
1068 	case mm_sw32_op:
1069 		reg = insn.mm_i_format.rt;
1070 		goto storeW;
1071 
1072 	case mm_ld32_op:
1073 		reg = insn.mm_i_format.rt;
1074 		goto loadDW;
1075 
1076 	case mm_sd32_op:
1077 		reg = insn.mm_i_format.rt;
1078 		goto storeDW;
1079 
1080 	case mm_pool16c_op:
1081 		switch (insn.mm16_m_format.func) {
1082 		case mm_lwm16_op:
1083 			reg = insn.mm16_m_format.rlist;
1084 			rvar = reg + 1;
1085 			if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1086 				goto sigbus;
1087 
1088 			for (i = 16; rvar; rvar--, i++) {
1089 				LoadW(addr, value, res);
1090 				if (res)
1091 					goto fault;
1092 				addr += 4;
1093 				regs->regs[i] = value;
1094 			}
1095 			LoadW(addr, value, res);
1096 			if (res)
1097 				goto fault;
1098 			regs->regs[31] = value;
1099 
1100 			goto success;
1101 
1102 		case mm_swm16_op:
1103 			reg = insn.mm16_m_format.rlist;
1104 			rvar = reg + 1;
1105 			if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1106 				goto sigbus;
1107 
1108 			for (i = 16; rvar; rvar--, i++) {
1109 				value = regs->regs[i];
1110 				StoreW(addr, value, res);
1111 				if (res)
1112 					goto fault;
1113 				addr += 4;
1114 			}
1115 			value = regs->regs[31];
1116 			StoreW(addr, value, res);
1117 			if (res)
1118 				goto fault;
1119 
1120 			goto success;
1121 
1122 		}
1123 
1124 		goto sigbus;
1125 
1126 	case mm_lhu16_op:
1127 		reg = reg16to32[insn.mm16_rb_format.rt];
1128 		goto loadHWU;
1129 
1130 	case mm_lw16_op:
1131 		reg = reg16to32[insn.mm16_rb_format.rt];
1132 		goto loadW;
1133 
1134 	case mm_sh16_op:
1135 		reg = reg16to32st[insn.mm16_rb_format.rt];
1136 		goto storeHW;
1137 
1138 	case mm_sw16_op:
1139 		reg = reg16to32st[insn.mm16_rb_format.rt];
1140 		goto storeW;
1141 
1142 	case mm_lwsp16_op:
1143 		reg = insn.mm16_r5_format.rt;
1144 		goto loadW;
1145 
1146 	case mm_swsp16_op:
1147 		reg = insn.mm16_r5_format.rt;
1148 		goto storeW;
1149 
1150 	case mm_lwgp16_op:
1151 		reg = reg16to32[insn.mm16_r3_format.rt];
1152 		goto loadW;
1153 
1154 	default:
1155 		goto sigill;
1156 	}
1157 
1158 loadHW:
1159 	if (!access_ok(VERIFY_READ, addr, 2))
1160 		goto sigbus;
1161 
1162 	LoadHW(addr, value, res);
1163 	if (res)
1164 		goto fault;
1165 	regs->regs[reg] = value;
1166 	goto success;
1167 
1168 loadHWU:
1169 	if (!access_ok(VERIFY_READ, addr, 2))
1170 		goto sigbus;
1171 
1172 	LoadHWU(addr, value, res);
1173 	if (res)
1174 		goto fault;
1175 	regs->regs[reg] = value;
1176 	goto success;
1177 
1178 loadW:
1179 	if (!access_ok(VERIFY_READ, addr, 4))
1180 		goto sigbus;
1181 
1182 	LoadW(addr, value, res);
1183 	if (res)
1184 		goto fault;
1185 	regs->regs[reg] = value;
1186 	goto success;
1187 
1188 loadWU:
1189 #ifdef CONFIG_64BIT
1190 	/*
1191 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1192 	 * if we're on a 32-bit processor and an i-cache incoherency
1193 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1194 	 * would blow up, so for now we don't handle unaligned 64-bit
1195 	 * instructions on 32-bit kernels.
1196 	 */
1197 	if (!access_ok(VERIFY_READ, addr, 4))
1198 		goto sigbus;
1199 
1200 	LoadWU(addr, value, res);
1201 	if (res)
1202 		goto fault;
1203 	regs->regs[reg] = value;
1204 	goto success;
1205 #endif /* CONFIG_64BIT */
1206 
1207 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1208 	goto sigill;
1209 
1210 loadDW:
1211 #ifdef CONFIG_64BIT
1212 	/*
1213 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1214 	 * if we're on a 32-bit processor and an i-cache incoherency
1215 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1216 	 * would blow up, so for now we don't handle unaligned 64-bit
1217 	 * instructions on 32-bit kernels.
1218 	 */
1219 	if (!access_ok(VERIFY_READ, addr, 8))
1220 		goto sigbus;
1221 
1222 	LoadDW(addr, value, res);
1223 	if (res)
1224 		goto fault;
1225 	regs->regs[reg] = value;
1226 	goto success;
1227 #endif /* CONFIG_64BIT */
1228 
1229 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1230 	goto sigill;
1231 
1232 storeHW:
1233 	if (!access_ok(VERIFY_WRITE, addr, 2))
1234 		goto sigbus;
1235 
1236 	value = regs->regs[reg];
1237 	StoreHW(addr, value, res);
1238 	if (res)
1239 		goto fault;
1240 	goto success;
1241 
1242 storeW:
1243 	if (!access_ok(VERIFY_WRITE, addr, 4))
1244 		goto sigbus;
1245 
1246 	value = regs->regs[reg];
1247 	StoreW(addr, value, res);
1248 	if (res)
1249 		goto fault;
1250 	goto success;
1251 
1252 storeDW:
1253 #ifdef CONFIG_64BIT
1254 	/*
1255 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1256 	 * if we're on a 32-bit processor and an i-cache incoherency
1257 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1258 	 * would blow up, so for now we don't handle unaligned 64-bit
1259 	 * instructions on 32-bit kernels.
1260 	 */
1261 	if (!access_ok(VERIFY_WRITE, addr, 8))
1262 		goto sigbus;
1263 
1264 	value = regs->regs[reg];
1265 	StoreDW(addr, value, res);
1266 	if (res)
1267 		goto fault;
1268 	goto success;
1269 #endif /* CONFIG_64BIT */
1270 
1271 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1272 	goto sigill;
1273 
1274 success:
1275 	regs->cp0_epc = contpc;	/* advance or branch */
1276 
1277 #ifdef CONFIG_DEBUG_FS
1278 	unaligned_instructions++;
1279 #endif
1280 	return;
1281 
1282 fault:
1283 	/* roll back jump/branch */
1284 	regs->cp0_epc = origpc;
1285 	regs->regs[31] = orig31;
1286 	/* Did we have an exception handler installed? */
1287 	if (fixup_exception(regs))
1288 		return;
1289 
1290 	die_if_kernel("Unhandled kernel unaligned access", regs);
1291 	force_sig(SIGSEGV, current);
1292 
1293 	return;
1294 
1295 sigbus:
1296 	die_if_kernel("Unhandled kernel unaligned access", regs);
1297 	force_sig(SIGBUS, current);
1298 
1299 	return;
1300 
1301 sigill:
1302 	die_if_kernel
1303 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1304 	force_sig(SIGILL, current);
1305 }
1306 
1307 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1308 {
1309 	unsigned long value;
1310 	unsigned int res;
1311 	int reg;
1312 	unsigned long orig31;
1313 	u16 __user *pc16;
1314 	unsigned long origpc;
1315 	union mips16e_instruction mips16inst, oldinst;
1316 
1317 	origpc = regs->cp0_epc;
1318 	orig31 = regs->regs[31];
1319 	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1320 	/*
1321 	 * This load never faults.
1322 	 */
1323 	__get_user(mips16inst.full, pc16);
1324 	oldinst = mips16inst;
1325 
1326 	/* skip EXTEND instruction */
1327 	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1328 		pc16++;
1329 		__get_user(mips16inst.full, pc16);
1330 	} else if (delay_slot(regs)) {
1331 		/*  skip jump instructions */
1332 		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1333 		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1334 			pc16++;
1335 		pc16++;
1336 		if (get_user(mips16inst.full, pc16))
1337 			goto sigbus;
1338 	}
1339 
1340 	switch (mips16inst.ri.opcode) {
1341 	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1342 		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1343 		case MIPS16e_ldpc_func:
1344 		case MIPS16e_ldsp_func:
1345 			reg = reg16to32[mips16inst.ri64.ry];
1346 			goto loadDW;
1347 
1348 		case MIPS16e_sdsp_func:
1349 			reg = reg16to32[mips16inst.ri64.ry];
1350 			goto writeDW;
1351 
1352 		case MIPS16e_sdrasp_func:
1353 			reg = 29;	/* GPRSP */
1354 			goto writeDW;
1355 		}
1356 
1357 		goto sigbus;
1358 
1359 	case MIPS16e_swsp_op:
1360 	case MIPS16e_lwpc_op:
1361 	case MIPS16e_lwsp_op:
1362 		reg = reg16to32[mips16inst.ri.rx];
1363 		break;
1364 
1365 	case MIPS16e_i8_op:
1366 		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1367 			goto sigbus;
1368 		reg = 29;	/* GPRSP */
1369 		break;
1370 
1371 	default:
1372 		reg = reg16to32[mips16inst.rri.ry];
1373 		break;
1374 	}
1375 
1376 	switch (mips16inst.ri.opcode) {
1377 
1378 	case MIPS16e_lb_op:
1379 	case MIPS16e_lbu_op:
1380 	case MIPS16e_sb_op:
1381 		goto sigbus;
1382 
1383 	case MIPS16e_lh_op:
1384 		if (!access_ok(VERIFY_READ, addr, 2))
1385 			goto sigbus;
1386 
1387 		LoadHW(addr, value, res);
1388 		if (res)
1389 			goto fault;
1390 		MIPS16e_compute_return_epc(regs, &oldinst);
1391 		regs->regs[reg] = value;
1392 		break;
1393 
1394 	case MIPS16e_lhu_op:
1395 		if (!access_ok(VERIFY_READ, addr, 2))
1396 			goto sigbus;
1397 
1398 		LoadHWU(addr, value, res);
1399 		if (res)
1400 			goto fault;
1401 		MIPS16e_compute_return_epc(regs, &oldinst);
1402 		regs->regs[reg] = value;
1403 		break;
1404 
1405 	case MIPS16e_lw_op:
1406 	case MIPS16e_lwpc_op:
1407 	case MIPS16e_lwsp_op:
1408 		if (!access_ok(VERIFY_READ, addr, 4))
1409 			goto sigbus;
1410 
1411 		LoadW(addr, value, res);
1412 		if (res)
1413 			goto fault;
1414 		MIPS16e_compute_return_epc(regs, &oldinst);
1415 		regs->regs[reg] = value;
1416 		break;
1417 
1418 	case MIPS16e_lwu_op:
1419 #ifdef CONFIG_64BIT
1420 		/*
1421 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1422 		 * if we're on a 32-bit processor and an i-cache incoherency
1423 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1424 		 * would blow up, so for now we don't handle unaligned 64-bit
1425 		 * instructions on 32-bit kernels.
1426 		 */
1427 		if (!access_ok(VERIFY_READ, addr, 4))
1428 			goto sigbus;
1429 
1430 		LoadWU(addr, value, res);
1431 		if (res)
1432 			goto fault;
1433 		MIPS16e_compute_return_epc(regs, &oldinst);
1434 		regs->regs[reg] = value;
1435 		break;
1436 #endif /* CONFIG_64BIT */
1437 
1438 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1439 		goto sigill;
1440 
1441 	case MIPS16e_ld_op:
1442 loadDW:
1443 #ifdef CONFIG_64BIT
1444 		/*
1445 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1446 		 * if we're on a 32-bit processor and an i-cache incoherency
1447 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1448 		 * would blow up, so for now we don't handle unaligned 64-bit
1449 		 * instructions on 32-bit kernels.
1450 		 */
1451 		if (!access_ok(VERIFY_READ, addr, 8))
1452 			goto sigbus;
1453 
1454 		LoadDW(addr, value, res);
1455 		if (res)
1456 			goto fault;
1457 		MIPS16e_compute_return_epc(regs, &oldinst);
1458 		regs->regs[reg] = value;
1459 		break;
1460 #endif /* CONFIG_64BIT */
1461 
1462 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1463 		goto sigill;
1464 
1465 	case MIPS16e_sh_op:
1466 		if (!access_ok(VERIFY_WRITE, addr, 2))
1467 			goto sigbus;
1468 
1469 		MIPS16e_compute_return_epc(regs, &oldinst);
1470 		value = regs->regs[reg];
1471 		StoreHW(addr, value, res);
1472 		if (res)
1473 			goto fault;
1474 		break;
1475 
1476 	case MIPS16e_sw_op:
1477 	case MIPS16e_swsp_op:
1478 	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1479 		if (!access_ok(VERIFY_WRITE, addr, 4))
1480 			goto sigbus;
1481 
1482 		MIPS16e_compute_return_epc(regs, &oldinst);
1483 		value = regs->regs[reg];
1484 		StoreW(addr, value, res);
1485 		if (res)
1486 			goto fault;
1487 		break;
1488 
1489 	case MIPS16e_sd_op:
1490 writeDW:
1491 #ifdef CONFIG_64BIT
1492 		/*
1493 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1494 		 * if we're on a 32-bit processor and an i-cache incoherency
1495 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1496 		 * would blow up, so for now we don't handle unaligned 64-bit
1497 		 * instructions on 32-bit kernels.
1498 		 */
1499 		if (!access_ok(VERIFY_WRITE, addr, 8))
1500 			goto sigbus;
1501 
1502 		MIPS16e_compute_return_epc(regs, &oldinst);
1503 		value = regs->regs[reg];
1504 		StoreDW(addr, value, res);
1505 		if (res)
1506 			goto fault;
1507 		break;
1508 #endif /* CONFIG_64BIT */
1509 
1510 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1511 		goto sigill;
1512 
1513 	default:
1514 		/*
1515 		 * Pheeee...  We encountered an yet unknown instruction or
1516 		 * cache coherence problem.  Die sucker, die ...
1517 		 */
1518 		goto sigill;
1519 	}
1520 
1521 #ifdef CONFIG_DEBUG_FS
1522 	unaligned_instructions++;
1523 #endif
1524 
1525 	return;
1526 
1527 fault:
1528 	/* roll back jump/branch */
1529 	regs->cp0_epc = origpc;
1530 	regs->regs[31] = orig31;
1531 	/* Did we have an exception handler installed? */
1532 	if (fixup_exception(regs))
1533 		return;
1534 
1535 	die_if_kernel("Unhandled kernel unaligned access", regs);
1536 	force_sig(SIGSEGV, current);
1537 
1538 	return;
1539 
1540 sigbus:
1541 	die_if_kernel("Unhandled kernel unaligned access", regs);
1542 	force_sig(SIGBUS, current);
1543 
1544 	return;
1545 
1546 sigill:
1547 	die_if_kernel
1548 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1549 	force_sig(SIGILL, current);
1550 }
1551 asmlinkage void do_ade(struct pt_regs *regs)
1552 {
1553 	unsigned int __user *pc;
1554 	mm_segment_t seg;
1555 
1556 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1557 			1, regs, regs->cp0_badvaddr);
1558 	/*
1559 	 * Did we catch a fault trying to load an instruction?
1560 	 */
1561 	if (regs->cp0_badvaddr == regs->cp0_epc)
1562 		goto sigbus;
1563 
1564 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1565 		goto sigbus;
1566 	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1567 		goto sigbus;
1568 
1569 	/*
1570 	 * Do branch emulation only if we didn't forward the exception.
1571 	 * This is all so but ugly ...
1572 	 */
1573 
1574 	/*
1575 	 * Are we running in microMIPS mode?
1576 	 */
1577 	if (get_isa16_mode(regs->cp0_epc)) {
1578 		/*
1579 		 * Did we catch a fault trying to load an instruction in
1580 		 * 16-bit mode?
1581 		 */
1582 		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1583 			goto sigbus;
1584 		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1585 			show_registers(regs);
1586 
1587 		if (cpu_has_mmips) {
1588 			seg = get_fs();
1589 			if (!user_mode(regs))
1590 				set_fs(KERNEL_DS);
1591 			emulate_load_store_microMIPS(regs,
1592 				(void __user *)regs->cp0_badvaddr);
1593 			set_fs(seg);
1594 
1595 			return;
1596 		}
1597 
1598 		if (cpu_has_mips16) {
1599 			seg = get_fs();
1600 			if (!user_mode(regs))
1601 				set_fs(KERNEL_DS);
1602 			emulate_load_store_MIPS16e(regs,
1603 				(void __user *)regs->cp0_badvaddr);
1604 			set_fs(seg);
1605 
1606 			return;
1607 	}
1608 
1609 		goto sigbus;
1610 	}
1611 
1612 	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1613 		show_registers(regs);
1614 	pc = (unsigned int __user *)exception_epc(regs);
1615 
1616 	seg = get_fs();
1617 	if (!user_mode(regs))
1618 		set_fs(KERNEL_DS);
1619 	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1620 	set_fs(seg);
1621 
1622 	return;
1623 
1624 sigbus:
1625 	die_if_kernel("Kernel unaligned instruction access", regs);
1626 	force_sig(SIGBUS, current);
1627 
1628 	/*
1629 	 * XXX On return from the signal handler we should advance the epc
1630 	 */
1631 }
1632 
1633 #ifdef CONFIG_DEBUG_FS
1634 extern struct dentry *mips_debugfs_dir;
1635 static int __init debugfs_unaligned(void)
1636 {
1637 	struct dentry *d;
1638 
1639 	if (!mips_debugfs_dir)
1640 		return -ENODEV;
1641 	d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1642 			       mips_debugfs_dir, &unaligned_instructions);
1643 	if (!d)
1644 		return -ENOMEM;
1645 	d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1646 			       mips_debugfs_dir, &unaligned_action);
1647 	if (!d)
1648 		return -ENOMEM;
1649 	return 0;
1650 }
1651 __initcall(debugfs_unaligned);
1652 #endif
1653